[llvm] efc7bbb - [AMDGPU] Make v2bf16 BUILD_VECTOR legal (#92022)

via llvm-commits llvm-commits at lists.llvm.org
Mon May 13 14:53:30 PDT 2024


Author: Stanislav Mekhanoshin
Date: 2024-05-13T14:53:26-07:00
New Revision: efc7bbb917428393f543b09eecddf6e4bb5fce08

URL: https://github.com/llvm/llvm-project/commit/efc7bbb917428393f543b09eecddf6e4bb5fce08
DIFF: https://github.com/llvm/llvm-project/commit/efc7bbb917428393f543b09eecddf6e4bb5fce08.diff

LOG: [AMDGPU] Make v2bf16 BUILD_VECTOR legal (#92022)

There is nothing specific here and it is not different from i16 or f16.

Added: 
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2bf16.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/test/CodeGen/AMDGPU/bf16-conversions.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0a3a56e9b3a0b..8645f560d997d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -233,9 +233,6 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
     // sources.
     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
-
-    setOperationAction(ISD::BUILD_VECTOR, MVT::v2bf16, Promote);
-    AddPromotedToType(ISD::BUILD_VECTOR, MVT::v2bf16, MVT::v2i16);
   }
 
   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
@@ -744,9 +741,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
     setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND},
                        MVT::v8i32, Expand);
 
-    if (!Subtarget->hasVOP3PInsts())
-      setOperationAction(ISD::BUILD_VECTOR,
-                         {MVT::v2i16, MVT::v2f16, MVT::v2bf16}, Custom);
+    setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i16, MVT::v2f16, MVT::v2bf16},
+                       Subtarget->hasVOP3PInsts() ? Legal : Custom);
 
     setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
     // This isn't really legal, but this avoids the legalizer unrolling it (and

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index f9e811f54d05e..e7aeaa017306c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -3166,7 +3166,7 @@ def : GCNPat <
   (v2f16 (V_AND_B32_e64 (i32 (V_MOV_B32_e32 (i32 0xffff))), VGPR_32:$src1))
 >;
 
-foreach vecTy = [v2i16, v2f16] in {
+foreach vecTy = [v2i16, v2f16, v2bf16] in {
 
 defvar Ty = vecTy.ElementType;
 
@@ -3212,7 +3212,7 @@ def : GCNPat <
 >;
 
 
-foreach vecTy = [v2i16, v2f16] in {
+foreach vecTy = [v2i16, v2f16, v2bf16] in {
 
 defvar Ty = vecTy.ElementType;
 defvar immzeroTy = !if(!eq(Ty, i16), immzero, fpimmzero);

diff  --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index 7108f3d65768c..1c9f35dd45fee 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -55,7 +55,8 @@ define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
 ; GCN-NEXT:    s_add_i32 s5, s2, 0x7fff
 ; GCN-NEXT:    v_cmp_u_f32_e64 s[2:3], s1, s1
 ; GCN-NEXT:    s_and_b64 s[2:3], s[2:3], exec
-; GCN-NEXT:    s_cselect_b32 s2, s4, s5
+; GCN-NEXT:    s_cselect_b32 s1, s4, s5
+; GCN-NEXT:    s_lshr_b32 s2, s1, 16
 ; GCN-NEXT:    s_bfe_u32 s1, s0, 0x10010
 ; GCN-NEXT:    s_add_i32 s1, s1, s0
 ; GCN-NEXT:    s_or_b32 s3, s0, 0x400000
@@ -63,7 +64,8 @@ define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
 ; GCN-NEXT:    v_cmp_u_f32_e64 s[0:1], s0, s0
 ; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], exec
 ; GCN-NEXT:    s_cselect_b32 s0, s3, s4
-; GCN-NEXT:    s_pack_hh_b32_b16 s0, s0, s2
+; GCN-NEXT:    s_lshr_b32 s0, s0, 16
+; GCN-NEXT:    s_pack_ll_b32_b16 s0, s0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    ; return to shader part epilog
   %res = fptrunc <2 x float> %src to <2 x bfloat>

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2bf16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2bf16.ll
new file mode 100644
index 0000000000000..c9b01eb5a9725
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2bf16.ll
@@ -0,0 +1,1690 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=tonga < %s | FileCheck -check-prefix=VI %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX900 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 < %s | FileCheck -check-prefix=GFX940 %s
+
+define amdgpu_kernel void @s_insertelement_v2bf16_0(ptr addrspace(1) %out, ptr addrspace(4) %vec.ptr) #0 {
+; SI-LABEL: s_insertelement_v2bf16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s4, s[2:3], 0x0
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0xffff0000
+; SI-NEXT:    s_or_b32 s4, s4, 0x40a0
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_insertelement_v2bf16_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[2:3], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s2, 0xffff0000
+; VI-NEXT:    s_or_b32 s0, s0, 0x40a0
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: s_insertelement_v2bf16_0:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_mov_b32_e32 v0, 0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_load_dword s2, s[2:3], 0x0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_lshr_b32 s2, s2, 16
+; GFX900-NEXT:    s_pack_ll_b32_b16 s2, 0x40a0, s2
+; GFX900-NEXT:    v_mov_b32_e32 v1, s2
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: s_insertelement_v2bf16_0:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NEXT:    v_mov_b32_e32 v0, 0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    s_load_dword s2, s[2:3], 0x0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    s_lshr_b32 s2, s2, 16
+; GFX940-NEXT:    s_pack_ll_b32_b16 s2, 0x40a0, s2
+; GFX940-NEXT:    v_mov_b32_e32 v1, s2
+; GFX940-NEXT:    global_store_dword v0, v1, s[0:1] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %vec = load <2 x bfloat>, ptr addrspace(4) %vec.ptr
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 5.000000e+00, i32 0
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @s_insertelement_v2bf16_1(ptr addrspace(1) %out, ptr addrspace(4) %vec.ptr) #0 {
+; SI-LABEL: s_insertelement_v2bf16_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_load_dword s4, s[2:3], 0x0
+; SI-NEXT:    s_mov_b32 s3, 0x100f000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_and_b32 s4, s4, 0xffff
+; SI-NEXT:    s_or_b32 s4, s4, 0x40a00000
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_insertelement_v2bf16_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[2:3], 0x0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    s_and_b32 s0, s2, 0xffff
+; VI-NEXT:    s_or_b32 s0, s0, 0x40a00000
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: s_insertelement_v2bf16_1:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_mov_b32_e32 v0, 0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_load_dword s2, s[2:3], 0x0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_pack_ll_b32_b16 s2, s2, 0x40a0
+; GFX900-NEXT:    v_mov_b32_e32 v1, s2
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: s_insertelement_v2bf16_1:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NEXT:    v_mov_b32_e32 v0, 0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    s_load_dword s2, s[2:3], 0x0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    s_pack_ll_b32_b16 s2, s2, 0x40a0
+; GFX940-NEXT:    v_mov_b32_e32 v1, s2
+; GFX940-NEXT:    global_store_dword v0, v1, s[0:1] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %vec = load <2 x bfloat>, ptr addrspace(4) %vec.ptr
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 5.000000e+00, i32 1
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v2bf16_0(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: v_insertelement_v2bf16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; SI-NEXT:    v_or_b32_e32 v2, 0x40a0, v2
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v2bf16_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v3
+; VI-NEXT:    v_or_b32_e32 v2, 0x40a0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v2bf16_0:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX900-NEXT:    v_mov_b32_e32 v2, 0x40a0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX900-NEXT:    s_mov_b32 s2, 0xffff
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v1, s2, v2, v1
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v2bf16_0:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX940-NEXT:    v_mov_b32_e32 v2, 0x40a0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX940-NEXT:    s_mov_b32 s2, 0xffff
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v1, s2, v2, v1
+; GFX940-NEXT:    global_store_dword v0, v1, s[0:1] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <2 x bfloat>, ptr addrspace(1) %in.gep
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 5.000000e+00, i32 0
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v2bf16_0_inlineimm(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: v_insertelement_v2bf16_0_inlineimm:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; SI-NEXT:    v_or_b32_e32 v2, 53, v2
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v2bf16_0_inlineimm:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_and_b32_e32 v2, 0xffff0000, v3
+; VI-NEXT:    v_or_b32_e32 v2, 53, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v2bf16_0_inlineimm:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX900-NEXT:    s_mov_b32 s2, 0xffff
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v1, s2, 53, v1
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v2bf16_0_inlineimm:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX940-NEXT:    s_mov_b32 s2, 0xffff
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v1, s2, 53, v1
+; GFX940-NEXT:    global_store_dword v0, v1, s[0:1] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <2 x bfloat>, ptr addrspace(1) %in.gep
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 0xR0035, i32 0
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v2bf16_1(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: v_insertelement_v2bf16_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    v_or_b32_e32 v2, 0x40a00000, v2
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v2bf16_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v2, 0x40a00000
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v2bf16_1:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX900-NEXT:    v_mov_b32_e32 v2, 0x5040100
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX900-NEXT:    s_movk_i32 s2, 0x40a0
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_perm_b32 v1, s2, v1, v2
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v2bf16_1:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX940-NEXT:    v_mov_b32_e32 v2, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX940-NEXT:    s_movk_i32 s2, 0x40a0
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_perm_b32 v1, s2, v1, v2
+; GFX940-NEXT:    global_store_dword v0, v1, s[0:1] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <2 x bfloat>, ptr addrspace(1) %in.gep
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 5.000000e+00, i32 1
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v2bf16_1_inlineimm(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: v_insertelement_v2bf16_1_inlineimm:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    v_or_b32_e32 v2, 0x230000, v2
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v2bf16_1_inlineimm:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v2, 0x230000
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v2bf16_1_inlineimm:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX900-NEXT:    v_mov_b32_e32 v2, 0x5040100
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_perm_b32 v1, 35, v1, v2
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v2bf16_1_inlineimm:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX940-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX940-NEXT:    v_mov_b32_e32 v2, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_perm_b32 v1, 35, v1, v2
+; GFX940-NEXT:    global_store_dword v0, v1, s[0:1] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <2 x bfloat>, ptr addrspace(1) %in.gep
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 0xR0023, i32 1
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v2bf16_dynamic_vgpr(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %idx.ptr) #0 {
+; SI-LABEL: v_insertelement_v2bf16_dynamic_vgpr:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s11, 0x100f000
+; SI-NEXT:    s_mov_b32 s10, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_mov_b64 s[6:7], s[10:11]
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[8:11], 0 addr64
+; SI-NEXT:    s_mov_b32 s4, 0x12341234
+; SI-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 4, v2
+; SI-NEXT:    v_lshl_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_bfi_b32 v2, v2, s4, v3
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v2bf16_dynamic_vgpr:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1]
+; VI-NEXT:    s_mov_b32 s2, 0xffff
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_mov_b32 s0, 0x12341234
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 4, v4
+; VI-NEXT:    v_lshlrev_b32_e64 v2, v2, s2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v2, v2, s0, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v2bf16_dynamic_vgpr:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dword v1, v0, s[6:7]
+; GFX900-NEXT:    global_load_dword v2, v0, s[2:3]
+; GFX900-NEXT:    s_mov_b32 s2, 0xffff
+; GFX900-NEXT:    s_waitcnt vmcnt(1)
+; GFX900-NEXT:    v_lshlrev_b32_e32 v1, 4, v1
+; GFX900-NEXT:    v_lshlrev_b32_e64 v1, v1, s2
+; GFX900-NEXT:    s_mov_b32 s2, 0x12341234
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v1, v1, s2, v2
+; GFX900-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v2bf16_dynamic_vgpr:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x10
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; GFX940-NEXT:    s_mov_b32 s0, 0xffff
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dword v1, v0, s[2:3]
+; GFX940-NEXT:    global_load_dword v2, v0, s[6:7]
+; GFX940-NEXT:    s_waitcnt vmcnt(1)
+; GFX940-NEXT:    v_lshlrev_b32_e32 v1, 4, v1
+; GFX940-NEXT:    v_lshlrev_b32_e64 v1, v1, s0
+; GFX940-NEXT:    s_mov_b32 s0, 0x12341234
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v1, v1, s0, v2
+; GFX940-NEXT:    global_store_dword v0, v1, s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %idx.gep = getelementptr inbounds i32, ptr addrspace(1) %idx.ptr, i64 %tid.ext
+  %out.gep = getelementptr inbounds <2 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %idx = load i32, ptr addrspace(1) %idx.gep
+  %vec = load <2 x bfloat>, ptr addrspace(1) %in.gep
+  %vecins = insertelement <2 x bfloat> %vec, bfloat 0xR1234, i32 %idx
+  store <2 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v4bf16_0(ptr addrspace(1) %out, ptr addrspace(1) %in, [8 x i32], i32 %val) #0 {
+; SI-LABEL: v_insertelement_v4bf16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s8, s[4:5], 0xc
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b32 s4, 0xffff
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_bfi_b32 v2, s4, v4, v2
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v4bf16_0:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x30
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v4, 0x3020504
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_perm_b32 v0, s4, v0, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v4bf16_0:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dword s6, s[4:5], 0x30
+; GFX900-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX900-NEXT:    s_mov_b32 s2, 0xffff
+; GFX900-NEXT:    v_mov_b32_e32 v3, s6
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v0, s2, v3, v0
+; GFX900-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v4bf16_0:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dword s2, s[0:1], 0x30
+; GFX940-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX940-NEXT:    s_mov_b32 s0, 0xffff
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx2 v[0:1], v2, s[6:7]
+; GFX940-NEXT:    v_mov_b32_e32 v3, s2
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v0, s0, v3, v0
+; GFX940-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <4 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <4 x bfloat> %vec, bfloat %val.cvt, i32 0
+  store <4 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v4bf16_1(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val) #0 {
+; SI-LABEL: v_insertelement_v4bf16_1:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s8, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_lshl_b32 s4, s8, 16
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    v_or_b32_e32 v2, s4, v2
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v4bf16_1:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v4, 0x1000504
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_perm_b32 v0, v0, s4, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v4bf16_1:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX900-NEXT:    v_mov_b32_e32 v3, 0x5040100
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_perm_b32 v0, s6, v0, v3
+; GFX900-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v4bf16_1:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dword s2, s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX940-NEXT:    v_mov_b32_e32 v3, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx2 v[0:1], v2, s[6:7]
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_perm_b32 v0, s2, v0, v3
+; GFX940-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <4 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <4 x bfloat> %vec, bfloat %val.cvt, i32 1
+  store <4 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v4bf16_2(ptr addrspace(1) %out, ptr addrspace(1) %in, [8 x i32], i32 %val) #0 {
+; SI-LABEL: v_insertelement_v4bf16_2:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s8, s[4:5], 0xc
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b32 s4, 0xffff
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_bfi_b32 v3, s4, v4, v3
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v4bf16_2:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x30
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v4, 0x3020504
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_perm_b32 v1, s4, v1, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v4bf16_2:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dword s6, s[4:5], 0x30
+; GFX900-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX900-NEXT:    s_mov_b32 s2, 0xffff
+; GFX900-NEXT:    v_mov_b32_e32 v3, s6
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v1, s2, v3, v1
+; GFX900-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v4bf16_2:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dword s2, s[0:1], 0x30
+; GFX940-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX940-NEXT:    s_mov_b32 s0, 0xffff
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx2 v[0:1], v2, s[6:7]
+; GFX940-NEXT:    v_mov_b32_e32 v3, s2
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v1, s0, v3, v1
+; GFX940-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <4 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <4 x bfloat> %vec, bfloat %val.cvt, i32 2
+  store <4 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v4bf16_3(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val) #0 {
+; SI-LABEL: v_insertelement_v4bf16_3:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s8, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_lshl_b32 s4, s8, 16
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v3
+; SI-NEXT:    v_or_b32_e32 v3, s4, v3
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v4bf16_3:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v4, 0x1000504
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_perm_b32 v1, v1, s4, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v4bf16_3:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX900-NEXT:    v_mov_b32_e32 v3, 0x5040100
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_perm_b32 v1, s6, v1, v3
+; GFX900-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v4bf16_3:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dword s2, s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX940-NEXT:    v_mov_b32_e32 v3, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx2 v[0:1], v2, s[6:7]
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_perm_b32 v1, s2, v1, v3
+; GFX940-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <4 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <4 x bfloat> %vec, bfloat %val.cvt, i32 3
+  store <4 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v4bf16_dynamic_sgpr(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val, i32 %idxval) #0 {
+; SI-LABEL: v_insertelement_v4bf16_dynamic_sgpr:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_lshl_b32 s4, s8, 16
+; SI-NEXT:    s_and_b32 s5, s8, 0xffff
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_lshl_b32 s6, s9, 4
+; SI-NEXT:    s_or_b32 s7, s5, s4
+; SI-NEXT:    s_lshl_b64 s[4:5], 0xffff, s6
+; SI-NEXT:    v_mov_b32_e32 v4, s7
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_bfi_b32 v3, s5, v4, v3
+; SI-NEXT:    v_bfi_b32 v2, s4, v5, v2
+; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v4bf16_dynamic_sgpr:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    s_lshl_b32 s1, s4, 16
+; VI-NEXT:    s_and_b32 s2, s4, 0xffff
+; VI-NEXT:    s_lshl_b32 s3, s5, 4
+; VI-NEXT:    s_or_b32 s2, s2, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    s_lshl_b64 s[0:1], 0xffff, s3
+; VI-NEXT:    v_mov_b32_e32 v4, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_bfi_b32 v1, s1, v4, v1
+; VI-NEXT:    v_bfi_b32 v0, s0, v5, v0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v4bf16_dynamic_sgpr:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx2 v[0:1], v2, s[2:3]
+; GFX900-NEXT:    s_lshl_b32 s2, s7, 4
+; GFX900-NEXT:    s_pack_ll_b32_b16 s4, s6, s6
+; GFX900-NEXT:    s_lshl_b64 s[2:3], 0xffff, s2
+; GFX900-NEXT:    v_mov_b32_e32 v3, s4
+; GFX900-NEXT:    v_mov_b32_e32 v4, s4
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v1, s3, v3, v1
+; GFX900-NEXT:    v_bfi_b32 v0, s2, v4, v0
+; GFX900-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v4bf16_dynamic_sgpr:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx2 v[0:1], v2, s[6:7]
+; GFX940-NEXT:    s_lshl_b32 s0, s3, 4
+; GFX940-NEXT:    s_pack_ll_b32_b16 s2, s2, s2
+; GFX940-NEXT:    s_lshl_b64 s[0:1], 0xffff, s0
+; GFX940-NEXT:    v_mov_b32_e32 v3, s2
+; GFX940-NEXT:    v_mov_b32_e32 v4, s2
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v1, s1, v3, v1
+; GFX940-NEXT:    v_bfi_b32 v0, s0, v4, v0
+; GFX940-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <4 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <4 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <4 x bfloat> %vec, bfloat %val.cvt, i32 %idxval
+  store <4 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v8bf16_3(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val) {
+; SI-LABEL: v_insertelement_v8bf16_3:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s8, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v5, 0
+; SI-NEXT:    buffer_load_dwordx4 v[0:3], v[4:5], s[4:7], 0 addr64
+; SI-NEXT:    s_lshl_b32 s4, s8, 16
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_or_b32_e32 v1, s4, v1
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], v[4:5], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v8bf16_3:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s0, v4
+; VI-NEXT:    s_lshl_b32 s0, s4, 16
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v6, s0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v8bf16_3:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; GFX900-NEXT:    v_mov_b32_e32 v5, 0x5040100
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx4 v[0:3], v4, s[2:3]
+; GFX900-NEXT:    s_mov_b32 s2, 0xffff
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_bfi_b32 v3, s2, v3, v3
+; GFX900-NEXT:    v_bfi_b32 v2, s2, v2, v2
+; GFX900-NEXT:    v_bfi_b32 v0, s2, v0, v0
+; GFX900-NEXT:    v_perm_b32 v1, s6, v1, v5
+; GFX900-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v8bf16_3:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dword s2, s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; GFX940-NEXT:    s_mov_b32 s0, 0xffff
+; GFX940-NEXT:    v_mov_b32_e32 v5, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx4 v[0:3], v4, s[6:7]
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_bfi_b32 v3, s0, v3, v3
+; GFX940-NEXT:    v_bfi_b32 v2, s0, v2, v2
+; GFX940-NEXT:    v_bfi_b32 v0, s0, v0, v0
+; GFX940-NEXT:    v_perm_b32 v1, s2, v1, v5
+; GFX940-NEXT:    global_store_dwordx4 v4, v[0:3], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <8 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <8 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <8 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <8 x bfloat> %vec, bfloat %val.cvt, i32 3
+  store <8 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v8bf16_dynamic(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val, i32 %n) {
+; SI-LABEL: v_insertelement_v8bf16_dynamic:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v5, 0
+; SI-NEXT:    buffer_load_dwordx4 v[0:3], v[4:5], s[4:7], 0 addr64
+; SI-NEXT:    s_cmp_eq_u32 s9, 6
+; SI-NEXT:    v_mov_b32_e32 v6, s8
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 7
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cndmask_b32_e32 v7, v3, v6, vcc
+; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 4
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 5
+; SI-NEXT:    v_lshrrev_b32_e32 v8, 16, v2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 2
+; SI-NEXT:    v_and_b32_e32 v7, 0xffff, v7
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 3
+; SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v1
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    v_or_b32_e32 v3, v7, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v8
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 0
+; SI-NEXT:    v_or_b32_e32 v2, v2, v7
+; SI-NEXT:    v_cndmask_b32_e32 v7, v9, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s9, 1
+; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    v_cndmask_b32_e32 v6, v10, v6, vcc
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v7, 16, v7
+; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT:    v_or_b32_e32 v1, v1, v7
+; SI-NEXT:    v_or_b32_e32 v0, v0, v6
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], v[4:5], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v8bf16_dynamic:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s0, v4
+; VI-NEXT:    s_cmp_eq_u32 s5, 6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 7
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_cndmask_b32_e32 v7, v3, v6, vcc
+; VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 4
+; VI-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 5
+; VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v2
+; VI-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 2
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; VI-NEXT:    v_cndmask_b32_e32 v8, v8, v6, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 3
+; VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v3, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_lshlrev_b32_e32 v7, 16, v8
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v6, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 0
+; VI-NEXT:    v_or_b32_sdwa v2, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_cndmask_b32_e32 v7, v9, v6, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 1
+; VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    v_cndmask_b32_e32 v6, v10, v6, vcc
+; VI-NEXT:    v_lshlrev_b32_e32 v7, 16, v7
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v8bf16_dynamic:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx4 v[0:3], v4, s[2:3]
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 6
+; GFX900-NEXT:    v_mov_b32_e32 v5, s6
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 7
+; GFX900-NEXT:    s_mov_b32 s2, 0x5040100
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_cndmask_b32_e32 v6, v3, v5, vcc
+; GFX900-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 4
+; GFX900-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 5
+; GFX900-NEXT:    v_lshrrev_b32_e32 v7, 16, v2
+; GFX900-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 2
+; GFX900-NEXT:    v_perm_b32 v3, v3, v6, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v6, v7, v5, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 3
+; GFX900-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
+; GFX900-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 0
+; GFX900-NEXT:    v_perm_b32 v2, v6, v2, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v6, v8, v5, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 1
+; GFX900-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GFX900-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX900-NEXT:    v_perm_b32 v1, v6, v1, s2
+; GFX900-NEXT:    v_perm_b32 v0, v5, v0, s2
+; GFX900-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v8bf16_dynamic:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; GFX940-NEXT:    s_mov_b32 s0, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx4 v[0:3], v4, s[6:7]
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 6
+; GFX940-NEXT:    v_mov_b32_e32 v5, s2
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 7
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_cndmask_b32_e32 v6, v3, v5, vcc
+; GFX940-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 4
+; GFX940-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 5
+; GFX940-NEXT:    v_lshrrev_b32_e32 v7, 16, v2
+; GFX940-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 2
+; GFX940-NEXT:    v_perm_b32 v3, v3, v6, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v6, v7, v5, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 3
+; GFX940-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
+; GFX940-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX940-NEXT:    v_perm_b32 v2, v6, v2, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v6, v8, v5, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 1
+; GFX940-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GFX940-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX940-NEXT:    v_perm_b32 v1, v6, v1, s0
+; GFX940-NEXT:    v_perm_b32 v0, v5, v0, s0
+; GFX940-NEXT:    global_store_dwordx4 v4, v[0:3], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <8 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <8 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <8 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <8 x bfloat> %vec, bfloat %val.cvt, i32 %n
+  store <8 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v16bf16_3(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val) {
+; SI-LABEL: v_insertelement_v16bf16_3:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_load_dword s8, s[4:5], 0x4
+; SI-NEXT:    s_mov_b32 s7, 0x100f000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v9, 0
+; SI-NEXT:    buffer_load_dwordx4 v[0:3], v[8:9], s[4:7], 0 addr64
+; SI-NEXT:    buffer_load_dwordx4 v[4:7], v[8:9], s[4:7], 0 addr64 offset:16
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_lshl_b32 s4, s8, 16
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_or_b32_e32 v1, s4, v1
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], v[8:9], s[0:3], 0 addr64 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], v[8:9], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v16bf16_3:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v8
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_add_u32_e32 v4, vcc, 16, v0
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; VI-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; VI-NEXT:    v_mov_b32_e32 v9, s1
+; VI-NEXT:    v_add_u32_e32 v8, vcc, s0, v8
+; VI-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; VI-NEXT:    s_lshl_b32 s1, s4, 16
+; VI-NEXT:    v_add_u32_e32 v10, vcc, 16, v8
+; VI-NEXT:    v_mov_b32_e32 v12, s1
+; VI-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
+; VI-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v16bf16_3:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX900-NEXT:    v_mov_b32_e32 v9, 0x5040100
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx4 v[0:3], v8, s[2:3]
+; GFX900-NEXT:    global_load_dwordx4 v[4:7], v8, s[2:3] offset:16
+; GFX900-NEXT:    s_waitcnt vmcnt(1)
+; GFX900-NEXT:    v_perm_b32 v1, s6, v1, v9
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
+; GFX900-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v16bf16_3:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dword s2, s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX940-NEXT:    v_mov_b32_e32 v9, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx4 v[0:3], v8, s[6:7]
+; GFX940-NEXT:    global_load_dwordx4 v[4:7], v8, s[6:7] offset:16
+; GFX940-NEXT:    s_waitcnt vmcnt(1)
+; GFX940-NEXT:    v_perm_b32 v1, s2, v1, v9
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    global_store_dwordx4 v8, v[4:7], s[4:5] offset:16 sc0 sc1
+; GFX940-NEXT:    global_store_dwordx4 v8, v[0:3], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <16 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <16 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <16 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <16 x bfloat> %vec, bfloat %val.cvt, i32 3
+  store <16 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+define amdgpu_kernel void @v_insertelement_v16bf16_dynamic(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %val, i32 %n) {
+; SI-LABEL: v_insertelement_v16bf16_dynamic:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; SI-NEXT:    s_mov_b32 s11, 0x100f000
+; SI-NEXT:    s_mov_b32 s10, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 5, v0
+; SI-NEXT:    v_mov_b32_e32 v5, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[8:9], s[2:3]
+; SI-NEXT:    buffer_load_dwordx4 v[7:10], v[4:5], s[8:11], 0 addr64
+; SI-NEXT:    buffer_load_dwordx4 v[0:3], v[4:5], s[8:11], 0 addr64 offset:16
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
+; SI-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_cmp_eq_u32 s5, 6
+; SI-NEXT:    v_mov_b32_e32 v6, s4
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 7
+; SI-NEXT:    s_waitcnt vmcnt(1)
+; SI-NEXT:    v_cndmask_b32_e32 v11, v10, v6, vcc
+; SI-NEXT:    v_lshrrev_b32_e32 v10, 16, v10
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 4
+; SI-NEXT:    v_cndmask_b32_e32 v10, v10, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 5
+; SI-NEXT:    v_lshrrev_b32_e32 v12, 16, v9
+; SI-NEXT:    v_cndmask_b32_e32 v9, v9, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 2
+; SI-NEXT:    v_and_b32_e32 v11, 0xffff, v11
+; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v10
+; SI-NEXT:    v_cndmask_b32_e32 v12, v12, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 3
+; SI-NEXT:    v_lshrrev_b32_e32 v13, 16, v8
+; SI-NEXT:    v_and_b32_e32 v9, 0xffff, v9
+; SI-NEXT:    v_or_b32_e32 v10, v11, v10
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v12
+; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 0
+; SI-NEXT:    v_or_b32_e32 v9, v9, v11
+; SI-NEXT:    v_cndmask_b32_e32 v11, v13, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 1
+; SI-NEXT:    v_lshrrev_b32_e32 v14, 16, v7
+; SI-NEXT:    v_and_b32_e32 v8, 0xffff, v8
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
+; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 14
+; SI-NEXT:    v_or_b32_e32 v8, v8, v11
+; SI-NEXT:    v_cndmask_b32_e32 v11, v14, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 15
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshrrev_b32_e32 v15, 16, v3
+; SI-NEXT:    v_and_b32_e32 v7, 0xffff, v7
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
+; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 12
+; SI-NEXT:    v_or_b32_e32 v7, v7, v11
+; SI-NEXT:    v_cndmask_b32_e32 v11, v15, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 13
+; SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v2
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
+; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 10
+; SI-NEXT:    v_or_b32_e32 v3, v3, v11
+; SI-NEXT:    v_cndmask_b32_e32 v11, v16, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 11
+; SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v1
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
+; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 8
+; SI-NEXT:    v_or_b32_e32 v2, v2, v11
+; SI-NEXT:    v_cndmask_b32_e32 v11, v17, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    s_cmp_eq_u32 s5, 9
+; SI-NEXT:    v_lshrrev_b32_e32 v18, 16, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    v_cndmask_b32_e32 v6, v18, v6, vcc
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v11
+; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT:    v_or_b32_e32 v1, v1, v11
+; SI-NEXT:    v_or_b32_e32 v0, v0, v6
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], v[4:5], s[0:3], 0 addr64 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[7:10], v[4:5], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_insertelement_v16bf16_dynamic:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; VI-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s3
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s2, v8
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v0, vcc
+; VI-NEXT:    v_add_u32_e32 v0, vcc, 16, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; VI-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; VI-NEXT:    v_mov_b32_e32 v9, s1
+; VI-NEXT:    v_add_u32_e32 v8, vcc, s0, v8
+; VI-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; VI-NEXT:    v_add_u32_e32 v10, vcc, 16, v8
+; VI-NEXT:    s_cmp_eq_u32 s5, 14
+; VI-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
+; VI-NEXT:    v_mov_b32_e32 v12, s4
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 15
+; VI-NEXT:    s_waitcnt vmcnt(1)
+; VI-NEXT:    v_cndmask_b32_e32 v13, v3, v12, vcc
+; VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 12
+; VI-NEXT:    v_cndmask_b32_e32 v3, v3, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 13
+; VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v2
+; VI-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 10
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; VI-NEXT:    v_cndmask_b32_e32 v14, v14, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 11
+; VI-NEXT:    v_lshrrev_b32_e32 v15, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v3, v13, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_lshlrev_b32_e32 v13, 16, v14
+; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 8
+; VI-NEXT:    v_or_b32_sdwa v2, v2, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_cndmask_b32_e32 v13, v15, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 9
+; VI-NEXT:    v_lshrrev_b32_e32 v16, 16, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 6
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_cndmask_b32_e32 v13, v16, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 7
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshrrev_b32_e32 v17, 16, v7
+; VI-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; VI-NEXT:    v_cndmask_b32_e32 v7, v7, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 4
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_cndmask_b32_e32 v13, v17, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 5
+; VI-NEXT:    v_lshrrev_b32_e32 v18, 16, v6
+; VI-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; VI-NEXT:    v_cndmask_b32_e32 v6, v6, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 2
+; VI-NEXT:    v_or_b32_sdwa v7, v7, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_cndmask_b32_e32 v13, v18, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 3
+; VI-NEXT:    v_lshrrev_b32_e32 v19, 16, v5
+; VI-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; VI-NEXT:    v_cndmask_b32_e32 v5, v5, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 0
+; VI-NEXT:    v_or_b32_sdwa v6, v6, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_cndmask_b32_e32 v13, v19, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s5, 1
+; VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v4
+; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v12, vcc
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    v_cndmask_b32_e32 v12, v20, v12, vcc
+; VI-NEXT:    v_lshlrev_b32_e32 v13, 16, v13
+; VI-NEXT:    v_lshlrev_b32_e32 v12, 16, v12
+; VI-NEXT:    v_or_b32_sdwa v5, v5, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v4, v4, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; VI-NEXT:    flat_store_dwordx4 v[10:11], v[0:3]
+; VI-NEXT:    s_endpgm
+;
+; GFX900-LABEL: v_insertelement_v16bf16_dynamic:
+; GFX900:       ; %bb.0:
+; GFX900-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 5, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    global_load_dwordx4 v[1:4], v0, s[2:3]
+; GFX900-NEXT:    global_load_dwordx4 v[5:8], v0, s[2:3] offset:16
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 6
+; GFX900-NEXT:    v_mov_b32_e32 v9, s6
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 7
+; GFX900-NEXT:    s_mov_b32 s2, 0x5040100
+; GFX900-NEXT:    s_waitcnt vmcnt(1)
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v4, v9, vcc
+; GFX900-NEXT:    v_lshrrev_b32_e32 v4, 16, v4
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 4
+; GFX900-NEXT:    v_cndmask_b32_e32 v4, v4, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 5
+; GFX900-NEXT:    v_lshrrev_b32_e32 v11, 16, v3
+; GFX900-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 2
+; GFX900-NEXT:    v_perm_b32 v4, v4, v10, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v11, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 3
+; GFX900-NEXT:    v_lshrrev_b32_e32 v12, 16, v2
+; GFX900-NEXT:    v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 0
+; GFX900-NEXT:    v_perm_b32 v3, v10, v3, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v12, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 1
+; GFX900-NEXT:    v_lshrrev_b32_e32 v13, 16, v1
+; GFX900-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 14
+; GFX900-NEXT:    v_perm_b32 v2, v10, v2, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v13, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 15
+; GFX900-NEXT:    s_waitcnt vmcnt(0)
+; GFX900-NEXT:    v_lshrrev_b32_e32 v14, 16, v8
+; GFX900-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 12
+; GFX900-NEXT:    v_perm_b32 v1, v10, v1, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v14, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 13
+; GFX900-NEXT:    v_lshrrev_b32_e32 v15, 16, v7
+; GFX900-NEXT:    v_cndmask_b32_e32 v7, v7, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 10
+; GFX900-NEXT:    v_perm_b32 v8, v10, v8, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v15, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 11
+; GFX900-NEXT:    v_lshrrev_b32_e32 v16, 16, v6
+; GFX900-NEXT:    v_cndmask_b32_e32 v6, v6, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 8
+; GFX900-NEXT:    v_perm_b32 v7, v10, v7, s2
+; GFX900-NEXT:    v_cndmask_b32_e32 v10, v16, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    s_cmp_eq_u32 s7, 9
+; GFX900-NEXT:    v_lshrrev_b32_e32 v17, 16, v5
+; GFX900-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX900-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX900-NEXT:    v_cndmask_b32_e32 v9, v17, v9, vcc
+; GFX900-NEXT:    v_perm_b32 v6, v10, v6, s2
+; GFX900-NEXT:    v_perm_b32 v5, v9, v5, s2
+; GFX900-NEXT:    global_store_dwordx4 v0, v[5:8], s[0:1] offset:16
+; GFX900-NEXT:    global_store_dwordx4 v0, v[1:4], s[0:1]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX940-LABEL: v_insertelement_v16bf16_dynamic:
+; GFX940:       ; %bb.0:
+; GFX940-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x0
+; GFX940-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x10
+; GFX940-NEXT:    v_lshlrev_b32_e32 v8, 5, v0
+; GFX940-NEXT:    s_mov_b32 s0, 0x5040100
+; GFX940-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX940-NEXT:    global_load_dwordx4 v[0:3], v8, s[6:7]
+; GFX940-NEXT:    global_load_dwordx4 v[4:7], v8, s[6:7] offset:16
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 6
+; GFX940-NEXT:    v_mov_b32_e32 v9, s2
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 7
+; GFX940-NEXT:    s_waitcnt vmcnt(1)
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v3, v9, vcc
+; GFX940-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 4
+; GFX940-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 5
+; GFX940-NEXT:    v_lshrrev_b32_e32 v11, 16, v2
+; GFX940-NEXT:    v_cndmask_b32_e32 v2, v2, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 2
+; GFX940-NEXT:    v_perm_b32 v3, v3, v10, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v11, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 3
+; GFX940-NEXT:    v_lshrrev_b32_e32 v12, 16, v1
+; GFX940-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX940-NEXT:    v_perm_b32 v2, v10, v2, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v12, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 1
+; GFX940-NEXT:    v_lshrrev_b32_e32 v13, 16, v0
+; GFX940-NEXT:    v_cndmask_b32_e32 v0, v0, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 14
+; GFX940-NEXT:    v_perm_b32 v1, v10, v1, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v13, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 15
+; GFX940-NEXT:    s_waitcnt vmcnt(0)
+; GFX940-NEXT:    v_lshrrev_b32_e32 v14, 16, v7
+; GFX940-NEXT:    v_cndmask_b32_e32 v7, v7, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 12
+; GFX940-NEXT:    v_perm_b32 v0, v10, v0, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v14, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 13
+; GFX940-NEXT:    v_lshrrev_b32_e32 v15, 16, v6
+; GFX940-NEXT:    v_cndmask_b32_e32 v6, v6, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 10
+; GFX940-NEXT:    v_perm_b32 v7, v10, v7, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v15, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 11
+; GFX940-NEXT:    v_lshrrev_b32_e32 v16, 16, v5
+; GFX940-NEXT:    v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 8
+; GFX940-NEXT:    v_perm_b32 v6, v10, v6, s0
+; GFX940-NEXT:    v_cndmask_b32_e32 v10, v16, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    s_cmp_eq_u32 s3, 9
+; GFX940-NEXT:    v_lshrrev_b32_e32 v17, 16, v4
+; GFX940-NEXT:    v_cndmask_b32_e32 v4, v4, v9, vcc
+; GFX940-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX940-NEXT:    v_cndmask_b32_e32 v9, v17, v9, vcc
+; GFX940-NEXT:    v_perm_b32 v5, v10, v5, s0
+; GFX940-NEXT:    v_perm_b32 v4, v9, v4, s0
+; GFX940-NEXT:    global_store_dwordx4 v8, v[4:7], s[4:5] offset:16 sc0 sc1
+; GFX940-NEXT:    global_store_dwordx4 v8, v[0:3], s[4:5] sc0 sc1
+; GFX940-NEXT:    s_endpgm
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %in.gep = getelementptr inbounds <16 x bfloat>, ptr addrspace(1) %in, i64 %tid.ext
+  %out.gep = getelementptr inbounds <16 x bfloat>, ptr addrspace(1) %out, i64 %tid.ext
+  %vec = load <16 x bfloat>, ptr addrspace(1) %in.gep
+  %val.trunc = trunc i32 %val to i16
+  %val.cvt = bitcast i16 %val.trunc to bfloat
+  %vecins = insertelement <16 x bfloat> %vec, bfloat %val.cvt, i32 %n
+  store <16 x bfloat> %vecins, ptr addrspace(1) %out.gep
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }


        


More information about the llvm-commits mailing list