[llvm] r340295 - [AMDGPU] Support idot2 pattern.
Farhana Aleen via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 21 09:21:15 PDT 2018
Author: faaleen
Date: Tue Aug 21 09:21:15 2018
New Revision: 340295
URL: http://llvm.org/viewvc/llvm-project?rev=340295&view=rev
Log:
[AMDGPU] Support idot2 pattern.
Summary: Transform add (mul ((i32)S0.x, (i32)S1.x),
add( mul ((i32)S0.y, (i32)S1.y), (i32)S3) => i/udot2((v2i16)S0, (v2i16)S1, (i32)S3)
Author: FarhanaAleen
Reviewed By: arsenm
Subscribers: llvm-commits, AMDGPU
Differential Revision: https://reviews.llvm.org/D50024
Added:
llvm/trunk/test/CodeGen/AMDGPU/idot2.ll
Modified:
llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
llvm/trunk/lib/Target/AMDGPU/VOP3PInstructions.td
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td?rev=340295&r1=340294&r2=340295&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td Tue Aug 21 09:21:15 2018
@@ -167,6 +167,9 @@ def shl_oneuse : HasOneUseBinOp<shl>;
def select_oneuse : HasOneUseTernaryOp<select>;
+def AMDGPUmul_u24_oneuse : HasOneUseBinOp<AMDGPUmul_u24>;
+def AMDGPUmul_i24_oneuse : HasOneUseBinOp<AMDGPUmul_i24>;
+
def srl_16 : PatFrag<
(ops node:$src0), (srl_oneuse node:$src0, (i32 16))
>;
Modified: llvm/trunk/lib/Target/AMDGPU/VOP3PInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP3PInstructions.td?rev=340295&r1=340294&r2=340295&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP3PInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP3PInstructions.td Tue Aug 21 09:21:15 2018
@@ -165,6 +165,23 @@ def V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_
defm : MadFmaMixPats<fma, V_FMA_MIX_F32, V_FMA_MIXLO_F16, V_FMA_MIXHI_F16>;
}
+class UDot2Pat<Instruction Inst> : GCNPat <
+ (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)),
+ (srl i32:$src1, (i32 16))), i32:$src2),
+ (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)),
+ (and i32:$src1, (i32 65535)))
+ ),
+ (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))
+>;
+
+class SDot2Pat<Instruction Inst> : GCNPat <
+ (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)),
+ (sra i32:$src1, (i32 16))), i32:$src2),
+ (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16),
+ (sext_inreg i32:$src1, i16))),
+ (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))
+>;
+
let SubtargetPredicate = HasDLInsts in {
def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", VOP3_Profile<VOP_F32_V2F16_V2F16_F32>>;
@@ -192,6 +209,9 @@ defm : DotPats<int_amdgcn_udot4, V_DOT4_
defm : DotPats<int_amdgcn_sdot8, V_DOT8_I32_I4>;
defm : DotPats<int_amdgcn_udot8, V_DOT8_U32_U4>;
+def : UDot2Pat<V_DOT2_U32_U16>;
+def : SDot2Pat<V_DOT2_I32_I16>;
+
} // End SubtargetPredicate = HasDLInsts
multiclass VOP3P_Real_vi<bits<10> op> {
Added: llvm/trunk/test/CodeGen/AMDGPU/idot2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/idot2.ll?rev=340295&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/idot2.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/idot2.ll Tue Aug 21 09:21:15 2018
@@ -0,0 +1,1850 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX7 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX89 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX89 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN-DL %s
+
+; add(mul(S0.x, S1.y),
+; add (mul (S0.y, S1.y), S3)) -> v_dot2_{I|U}32_{I|U}16(S1, S2, S3)
+
+define amdgpu_kernel void @udot2(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s9, v0, v1
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s4
+; GCN-DL-NEXT: v_dot2_u32_u16 v2, s3, v2, v3
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+; TODO: Support this pattern
+; add(S3,
+; add (mul (S0.y, S1.y), mul (S0.y, S1.y))) -> v_dot2_{I|U}32_{I|U}16(S1, S2, S3)
+define amdgpu_kernel void @udot2_MulMul(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_MulMul:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mul_u32_u24_e32 v0, s5, v0
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: v_mad_u32_u24 v0, s9, v1, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, s6, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_MulMul:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: v_mov_b32_e32 v0, s6
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mul_u32_u24_e32 v0, s2, v0
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_add_u32_e32 v2
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_MulMul:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s2, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s3, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s5, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s6, s3, s2
+; GCN-DL-NEXT: s_and_b32 s2, s4, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s6
+; GCN-DL-NEXT: s_lshr_b32 s3, s3, 16
+; GCN-DL-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s3
+; GCN-DL-NEXT: v_mul_u32_u24_e32 v0, s2, v0
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GCN-DL-NEXT: v_add_u32_e32 v2, s5, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %mul1
+ %add6 = add i32 %add, %s3
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @idot2(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: idot2:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_sext_i32_i16 s7, s4
+; GFX7-NEXT: s_ashr_i32 s4, s4, 16
+; GFX7-NEXT: s_sext_i32_i16 s8, s5
+; GFX7-NEXT: s_ashr_i32 s5, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_i32_i24 v0, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: v_mad_i32_i24 v0, s8, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: idot2:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s3, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_sext_i32_i16 s0, s2
+; GFX89-NEXT: s_ashr_i32 s2, s2, 16
+; GFX89-NEXT: s_sext_i32_i16 s1, s3
+; GFX89-NEXT: s_ashr_i32 s3, s3, 16
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mov_b32_e32 v3, s2
+; GFX89-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GFX89-NEXT: v_mov_b32_e32 v3, s0
+; GFX89-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: idot2:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s4
+; GCN-DL-NEXT: v_dot2_i32_i16 v2, s3, v2, v3
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = sext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = sext i16 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = sext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = sext i16 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @idot2_MixedTypedMul(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: idot2_MixedTypedMul:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_lshr_b32 s8, s5, 16
+; GFX7-NEXT: s_sext_i32_i16 s4, s4
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s8, v0, v1
+; GFX7-NEXT: s_sext_i32_i16 s5, s5
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_i32_i24 v0, s5, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: idot2_MixedTypedMul:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s3, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_sext_i32_i16 s0, s2
+; GFX89-NEXT: s_lshr_b32 s2, s2, 16
+; GFX89-NEXT: s_sext_i32_i16 s1, s3
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mov_b32_e32 v3, s2
+; GFX89-NEXT: v_mad_u32_u24 v2, s3, v3, v2
+; GFX89-NEXT: v_mov_b32_e32 v3, s0
+; GFX89-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: idot2_MixedTypedMul:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_sext_i32_i16 s0, s2
+; GCN-DL-NEXT: s_lshr_b32 s2, s2, 16
+; GCN-DL-NEXT: s_sext_i32_i16 s1, s3
+; GCN-DL-NEXT: s_lshr_b32 s3, s3, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s4
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s2
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s3, v3, v2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s0
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = sext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = sext i16 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_alt_AddOperands(<2 x i16> addrspace(1)* %src1,
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %s3, %mul2
+ %add6 = add i32 %mul1, %add
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @idot2_MixedExt(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_alt_AddOperands:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s9, v0, v1
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_alt_AddOperands:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_alt_AddOperands:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s4
+; GCN-DL-NEXT: v_dot2_u32_u16 v2, s3, v2, v3
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = sext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = sext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = sext i16 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @notudot2_SameVec(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: notudot2_SameVec:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX7-NEXT: s_lshr_b32 s5, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, s5, v0
+; GFX7-NEXT: v_mad_u32_u24 v0, s4, s4, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: notudot2_SameVec:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s3, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s0, s2, 0xffff
+; GFX89-NEXT: s_lshr_b32 s1, s3, 16
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mad_u32_u24 v2, s1, s1, v2
+; GFX89-NEXT: v_mad_u32_u24 v2, s0, s0, v2
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: notudot2_SameVec:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s0, s2, 0xffff
+; GCN-DL-NEXT: s_lshr_b32 s1, s3, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s4
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s1, s1, v2
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s0, s0, v2
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_v4i16(<4 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_v4i16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s7, s4, s8
+; GFX7-NEXT: s_lshr_b32 s4, s4, 16
+; GFX7-NEXT: s_and_b32 s8, s5, s8
+; GFX7-NEXT: s_lshr_b32 s5, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: v_mad_u32_u24 v0, s8, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_v4i16:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_v4i16:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s4
+; GCN-DL-NEXT: v_dot2_u32_u16 v2, s3, v2, v3
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <4 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <4 x i16>, <4 x i16> addrspace(1)* %src1
+ %vec2 = load <4 x i16>, <4 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <4 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <4 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <4 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <4 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_v4i16_Hi(<4 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_v4i16_Hi:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x1
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x1
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s7, s4, s8
+; GFX7-NEXT: s_lshr_b32 s4, s4, 16
+; GFX7-NEXT: s_and_b32 s8, s5, s8
+; GFX7-NEXT: s_lshr_b32 s5, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: v_mad_u32_u24 v0, s8, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_v4i16_Hi:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x4
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x4
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_v4i16_Hi:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x4
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x4
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s4
+; GCN-DL-NEXT: v_dot2_u32_u16 v2, s3, v2, v3
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <4 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <4 x i16>, <4 x i16> addrspace(1)* %src1
+ %vec2 = load <4 x i16>, <4 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <4 x i16> %vec1, i64 2
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <4 x i16> %vec2, i64 2
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <4 x i16> %vec1, i64 3
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <4 x i16> %vec2, i64 3
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @notudot2_v4i16_Even(<4 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: notudot2_v4i16_Even:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s9, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: s_and_b32 s7, s7, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s5
+; GFX7-NEXT: v_mov_b32_e32 v1, s9
+; GFX7-NEXT: v_mad_u32_u24 v0, s7, v0, v1
+; GFX7-NEXT: s_and_b32 s6, s6, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v0, s6, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: notudot2_v4i16_Even:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s8, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX89-NEXT: s_load_dwordx2 s[4:5], s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s3, s3, s8
+; GFX89-NEXT: s_and_b32 s2, s2, s8
+; GFX89-NEXT: s_and_b32 s5, s5, s8
+; GFX89-NEXT: v_mov_b32_e32 v0, s6
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GFX89-NEXT: s_and_b32 s4, s4, s8
+; GFX89-NEXT: v_mov_b32_e32 v1, s2
+; GFX89-NEXT: v_mad_u32_u24 v2, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: notudot2_v4i16_Even:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s8, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dwordx2 s[4:5], s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s6, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s3, s3, s8
+; GCN-DL-NEXT: s_and_b32 s2, s2, s8
+; GCN-DL-NEXT: s_and_b32 s5, s5, s8
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s6
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s3
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GCN-DL-NEXT: s_and_b32 s4, s4, s8
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s2
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s4, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <4 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <4 x i16>, <4 x i16> addrspace(1)* %src1
+ %vec2 = load <4 x i16>, <4 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <4 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <4 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <4 x i16> %vec1, i64 2
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <4 x i16> %vec2, i64 2
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @notudot2_v4i16_Middle(<4 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: notudot2_v4i16_Middle:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s9, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: s_lshr_b32 s4, s4, 16
+; GFX7-NEXT: s_and_b32 s7, s7, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s5
+; GFX7-NEXT: v_mov_b32_e32 v1, s9
+; GFX7-NEXT: v_mad_u32_u24 v0, s7, v0, v1
+; GFX7-NEXT: s_lshr_b32 s6, s6, 16
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v0, s6, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: notudot2_v4i16_Middle:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s8, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX89-NEXT: s_load_dwordx2 s[4:5], s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s3, s3, s8
+; GFX89-NEXT: s_lshr_b32 s2, s2, 16
+; GFX89-NEXT: s_and_b32 s5, s5, s8
+; GFX89-NEXT: v_mov_b32_e32 v0, s6
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v1, s2
+; GFX89-NEXT: v_mad_u32_u24 v2, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: notudot2_v4i16_Middle:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s8, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dwordx2 s[4:5], s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s6, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s3, s3, s8
+; GCN-DL-NEXT: s_lshr_b32 s2, s2, 16
+; GCN-DL-NEXT: s_and_b32 s5, s5, s8
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s6
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s3
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GCN-DL-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s2
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s4, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <4 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <4 x i16>, <4 x i16> addrspace(1)* %src1
+ %vec2 = load <4 x i16>, <4 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <4 x i16> %vec1, i64 1
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <4 x i16> %vec2, i64 1
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <4 x i16> %vec1, i64 2
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <4 x i16> %vec2, i64 2
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @notudot2_DiffIndex(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: notudot2_DiffIndex:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v0, s9, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: notudot2_DiffIndex:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s2, v1, v0
+; GFX89-NEXT: s_lshr_b32 s7, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v2, s7, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: notudot2_DiffIndex:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s2, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s3, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s5, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s6, s3, s2
+; GCN-DL-NEXT: s_lshr_b32 s3, s3, 16
+; GCN-DL-NEXT: s_and_b32 s2, s4, s2
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s5
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s3
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s2, v1, v0
+; GCN-DL-NEXT: s_lshr_b32 s7, s4, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s6
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s7, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 1
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 0
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_MultipleUses_add1(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_MultipleUses_add1:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v0, s9, v0, v1
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v1, s5, v1, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_MultipleUses_add1:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v1, s2, v1, v0
+; GFX89-NEXT: v_add_u32_e32 v2
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_MultipleUses_add1:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s2, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s3, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s5, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s6, s3, s2
+; GCN-DL-NEXT: s_lshr_b32 s3, s3, 16
+; GCN-DL-NEXT: s_and_b32 s2, s4, s2
+; GCN-DL-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s5
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s3
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s6
+; GCN-DL-NEXT: v_mad_u32_u24 v1, s2, v1, v0
+; GCN-DL-NEXT: v_add_u32_e32 v2, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add1 = add i32 %mul2, %s3
+ %add2 = add i32 %add1, %mul1
+
+ %res = add i32 %add2, %add1
+ store i32 %res, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @idot2_MultipleUses_add1(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: idot2_MultipleUses_add1:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_sext_i32_i16 s7, s4
+; GFX7-NEXT: s_ashr_i32 s4, s4, 16
+; GFX7-NEXT: s_sext_i32_i16 s8, s5
+; GFX7-NEXT: s_ashr_i32 s5, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_i32_i24 v0, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: v_mad_i32_i24 v1, s8, v1, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: idot2_MultipleUses_add1:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s3, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_sext_i32_i16 s0, s2
+; GFX89-NEXT: s_ashr_i32 s2, s2, 16
+; GFX89-NEXT: s_sext_i32_i16 s1, s3
+; GFX89-NEXT: s_ashr_i32 s3, s3, 16
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mov_b32_e32 v3, s2
+; GFX89-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GFX89-NEXT: v_mov_b32_e32 v3, s0
+; GFX89-NEXT: v_mad_i32_i24 v3, s1, v3, v2
+; GFX89-NEXT: v_add_u32_e32 v2
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: idot2_MultipleUses_add1:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_sext_i32_i16 s0, s2
+; GCN-DL-NEXT: s_ashr_i32 s2, s2, 16
+; GCN-DL-NEXT: s_sext_i32_i16 s1, s3
+; GCN-DL-NEXT: s_ashr_i32 s3, s3, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s4
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s2
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s0
+; GCN-DL-NEXT: v_mad_i32_i24 v3, s1, v3, v2
+; GCN-DL-NEXT: v_add_u32_e32 v2, v3, v2
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = sext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = sext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = sext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = sext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add1 = add i32 %mul2, %s3
+ %add2 = add i32 %add1, %mul1
+
+ %res = add i32 %add2, %add1
+ store i32 %res, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_MultipleUses_mul1(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_MultipleUses_mul1:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v1, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v2, s7
+; GFX7-NEXT: v_mad_u32_u24 v1, s9, v2, v1
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v0, v1
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_MultipleUses_mul1:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mad_u32_u24 v0, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v2, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v2, v0
+; GFX89-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_MultipleUses_mul1:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s2, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s3, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s5, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s6, s3, s2
+; GCN-DL-NEXT: s_and_b32 s2, s4, s2
+; GCN-DL-NEXT: s_lshr_b32 s3, s3, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s5
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s6
+; GCN-DL-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s2, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s3
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s4, v2, v0
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add0 = add i32 %mul1, %s3
+
+ %add1 = add i32 %mul2, %add0
+ %add2 = add i32 %add1, %mul1
+
+ store i32 %add2, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @idot2_MultipleUses_mul1(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: idot2_MultipleUses_mul1:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_sext_i32_i16 s7, s4
+; GFX7-NEXT: s_sext_i32_i16 s8, s5
+; GFX7-NEXT: s_ashr_i32 s4, s4, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: s_ashr_i32 s5, s5, 16
+; GFX7-NEXT: v_mad_i32_i24 v1, s8, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v2, s4
+; GFX7-NEXT: v_mad_i32_i24 v1, s5, v2, v1
+; GFX7-NEXT: v_mad_i32_i24 v0, s8, v0, v1
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: idot2_MultipleUses_mul1:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s3, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_sext_i32_i16 s0, s2
+; GFX89-NEXT: s_sext_i32_i16 s1, s3
+; GFX89-NEXT: s_ashr_i32 s2, s2, 16
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mov_b32_e32 v3, s0
+; GFX89-NEXT: s_ashr_i32 s3, s3, 16
+; GFX89-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GFX89-NEXT: v_mov_b32_e32 v4, s2
+; GFX89-NEXT: v_mad_i32_i24 v2, s3, v4, v2
+; GFX89-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: idot2_MultipleUses_mul1:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_sext_i32_i16 s0, s2
+; GCN-DL-NEXT: s_sext_i32_i16 s1, s3
+; GCN-DL-NEXT: s_ashr_i32 s2, s2, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s4
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s0
+; GCN-DL-NEXT: s_ashr_i32 s3, s3, 16
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GCN-DL-NEXT: v_mov_b32_e32 v4, s2
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s3, v4, v2
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = sext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = sext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = sext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = sext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add0 = add i32 %mul1, %s3
+
+ %add1 = add i32 %mul2, %add0
+ %add2 = add i32 %add1, %mul1
+
+ store i32 %add2, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_MultipleUses_mul2(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_MultipleUses_mul2:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s7, s4, 16
+; GFX7-NEXT: s_lshr_b32 s9, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s7
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_u32_u24 v1, s9, v0, v1
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: v_mad_u32_u24 v0, s9, v0, v1
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, s4
+; GFX7-NEXT: v_mad_u32_u24 v0, s5, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_MultipleUses_mul2:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_mov_b32 s2, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s3, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s4, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s5, s[0:1], 0x0
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s6, s3, s2
+; GFX89-NEXT: s_lshr_b32 s3, s3, 16
+; GFX89-NEXT: s_and_b32 s2, s4, s2
+; GFX89-NEXT: s_lshr_b32 s4, s4, 16
+; GFX89-NEXT: v_mov_b32_e32 v0, s5
+; GFX89-NEXT: v_mov_b32_e32 v1, s3
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v1, s6
+; GFX89-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_MultipleUses_mul2:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_mov_b32 s2, 0xffff
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s3, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s5, s[0:1], 0x0
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_and_b32 s6, s3, s2
+; GCN-DL-NEXT: s_lshr_b32 s3, s3, 16
+; GCN-DL-NEXT: s_and_b32 s2, s4, s2
+; GCN-DL-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s5
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s3
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GCN-DL-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s6
+; GCN-DL-NEXT: v_mad_u32_u24 v2, s2, v1, v0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = zext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = zext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = zext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = zext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add0 = add i32 %mul2, %s3
+
+ %add1 = add i32 %mul2, %add0
+ %add2 = add i32 %add1, %mul1
+
+ store i32 %add2, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @idot2_MultipleUses_mul2(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: idot2_MultipleUses_mul2:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_load_dword s6, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_sext_i32_i16 s7, s4
+; GFX7-NEXT: s_ashr_i32 s4, s4, 16
+; GFX7-NEXT: s_sext_i32_i16 s8, s5
+; GFX7-NEXT: s_ashr_i32 s5, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s6
+; GFX7-NEXT: v_mad_i32_i24 v1, s5, v0, v1
+; GFX7-NEXT: v_mad_i32_i24 v0, s5, v0, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: v_mad_i32_i24 v0, s8, v1, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: idot2_MultipleUses_mul2:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s3, s[6:7], 0x0
+; GFX89-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_sext_i32_i16 s0, s2
+; GFX89-NEXT: s_ashr_i32 s2, s2, 16
+; GFX89-NEXT: s_sext_i32_i16 s1, s3
+; GFX89-NEXT: s_ashr_i32 s3, s3, 16
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mov_b32_e32 v3, s2
+; GFX89-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GFX89-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GFX89-NEXT: v_mov_b32_e32 v3, s0
+; GFX89-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: idot2_MultipleUses_mul2:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: s_load_dword s4, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_sext_i32_i16 s0, s2
+; GCN-DL-NEXT: s_ashr_i32 s2, s2, 16
+; GCN-DL-NEXT: s_sext_i32_i16 s1, s3
+; GCN-DL-NEXT: s_ashr_i32 s3, s3, 16
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s4
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s2
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s3, v3, v2
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s0
+; GCN-DL-NEXT: v_mad_i32_i24 v2, s1, v3, v2
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %vec2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i16> %vec1, i64 0
+ %conv = sext i16 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i16> %vec2, i64 0
+ %conv2 = sext i16 %s2.elt1 to i32
+ %mul1 = mul i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i16> %vec1, i64 1
+ %conv3 = sext i16 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i16> %vec2, i64 1
+ %conv4 = sext i16 %s2.elt2 to i32
+ %mul2 = mul i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add0 = add i32 %mul2, %s3
+
+ %add1 = add i32 %mul2, %add0
+ %add2 = add i32 %add1, %mul1
+
+ store i32 %add2, i32 addrspace(1)* %dst, align 4
+ ret void
+}
+
+define amdgpu_kernel void @udot2_acc16(<2 x i16> addrspace(1)* %src1,
+; GFX7-LABEL: udot2_acc16:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s8, 0xffff
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[4:5], 0x0
+; GFX7-NEXT: buffer_load_ushort v0, off, s[0:3], 0
+; GFX7-NEXT: s_load_dword s5, s[6:7], 0x0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshr_b32 s6, s4, 16
+; GFX7-NEXT: s_and_b32 s4, s4, s8
+; GFX7-NEXT: s_lshr_b32 s7, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: s_and_b32 s5, s5, s8
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mad_u32_u24 v0, s6, v1, v0
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: v_mad_u32_u24 v0, s4, v1, v0
+; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: udot2_acc16:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: {{flat|global}}_load_ushort v2, v[0:1]
+; GFX89-NEXT: s_load_dword s1, s[4:5], 0x0
+; GFX89-NEXT: s_load_dword s2, s[6:7], 0x0
+; GFX89-NEXT: s_mov_b32 s0, 0xffff
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_and_b32 s3, s1, s0
+; GFX89-NEXT: s_and_b32 s0, s2, s0
+; GFX89-NEXT: s_lshr_b32 s2, s2, 16
+; GFX89-NEXT: s_lshr_b32 s1, s1, 16
+; GFX89-NEXT: v_mov_b32_e32 v3, s2
+; GFX89-NEXT: s_waitcnt vmcnt(0)
+; GFX89-NEXT: v_mad_u32_u24 v2, s1, v3, v2
+; GFX89-NEXT: v_mov_b32_e32 v3, s0
+; GFX89-NEXT: v_mad_u32_u24 v2, s3, v3, v2
+; GFX89-NEXT: {{flat|global}}_store_short v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: udot2_acc16:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[4:5], 0x0
+; GCN-DL-NEXT: s_load_dword s3, s[6:7], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: global_load_ushort v2, v[0:1], off
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s3
+; GCN-DL-NEXT: s_waitcnt vmcnt(0)
+; GCN-DL-NEXT: v_dot2_u32_u16 v2, s2, v3, v2
+; GCN-DL-NEXT: global_store_short v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i16> addrspace(1)* %src2,
+ i16 addrspace(1)* nocapture %dst) {
+entry:
+ %v1 = load <2 x i16>, <2 x i16> addrspace(1)* %src1
+ %v2 = load <2 x i16>, <2 x i16> addrspace(1)* %src2
+
+ %v1e1 = extractelement <2 x i16> %v1, i64 0
+ %v2e1 = extractelement <2 x i16> %v2, i64 0
+ %mul1 = mul i16 %v1e1, %v2e1
+
+ %v1e2 = extractelement <2 x i16> %v1, i64 1
+ %v2e2 = extractelement <2 x i16> %v2, i64 1
+ %mul2 = mul i16 %v1e2, %v2e2
+
+ %s2 = load i16, i16 addrspace(1)* %dst, align 2
+ %add1 = add i16 %mul2, %s2
+ %add2 = add i16 %add1, %mul1
+ store i16 %add2, i16 addrspace(1)* %dst, align 2
+ ret void
+}
+
+
+define amdgpu_kernel void @notsdot2_sext8(<2 x i8> addrspace(1)* %src1,
+; GFX7-LABEL: notsdot2_sext8:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s10, s2
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_mov_b32 s8, s6
+; GFX7-NEXT: s_mov_b32 s9, s7
+; GFX7-NEXT: s_mov_b32 s11, s3
+; GFX7-NEXT: s_mov_b32 s6, s2
+; GFX7-NEXT: s_mov_b32 s7, s3
+; GFX7-NEXT: buffer_load_ushort v0, off, s[4:7], 0
+; GFX7-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; GFX7-NEXT: s_load_dword s4, s[0:1], 0x0
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_bfe_i32 v2, v0, 0, 8
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_bfe_i32 v3, v1, 0, 8
+; GFX7-NEXT: v_bfe_i32 v0, v0, 8, 8
+; GFX7-NEXT: v_bfe_i32 v1, v1, 8, 8
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mad_i32_i24 v0, v1, v0, s4
+; GFX7-NEXT: v_mad_i32_i24 v0, v3, v2, v0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX89-LABEL: notsdot2_sext8:
+; GFX89: ; %bb.0: ; %entry
+; GFX89-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX89-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX89-NEXT: s_waitcnt lgkmcnt(0)
+; GFX89-NEXT: s_load_dword s2, s[0:1], 0x0
+; GFX89-NEXT: v_mov_b32_e32 v0, s6
+; GFX89-NEXT: v_mov_b32_e32 v1, s7
+; GFX89-NEXT: v_mov_b32_e32 v2, s4
+; GFX89-NEXT: v_mov_b32_e32 v3, s5
+; GFX89-NEXT: {{flat|global}}_load_ushort v2, v[2:3]
+; GFX89-NEXT: {{flat|global}}_load_ushort v3, v[0:1]
+; GFX89-NEXT: v_mov_b32_e32 v0, s0
+; GFX89-NEXT: v_mov_b32_e32 v1, s1
+; GFX89-NEXT: s_waitcnt vmcnt(1)
+; GFX89-NEXT: v_lshrrev_b16_e32 v4, 8, v2
+; GFX89-NEXT: s_waitcnt vmcnt(0)
+; GFX89-NEXT: v_bfe_i32 v5, v3, 0, 8
+; GFX89-NEXT: v_lshrrev_b16_e32 v3, 8, v3
+; GFX89-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GFX89-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX89-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GFX89: v_mad_i32_i24 v3, v3, v4, s2
+; GFX89: v_mad_i32_i24 v2, v5, v2, v3
+; GFX89-NEXT: {{flat|global}}_store_dword v[0:1], v2
+; GFX89-NEXT: s_endpgm
+;
+; GCN-DL-LABEL: notsdot2_sext8:
+; GCN-DL: ; %bb.0: ; %entry
+; GCN-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GCN-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: s_load_dword s2, s[0:1], 0x0
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s6
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s7
+; GCN-DL-NEXT: v_mov_b32_e32 v2, s4
+; GCN-DL-NEXT: v_mov_b32_e32 v3, s5
+; GCN-DL-NEXT: global_load_ushort v2, v[2:3], off
+; GCN-DL-NEXT: global_load_ushort v3, v[0:1], off
+; GCN-DL-NEXT: v_mov_b32_e32 v0, s0
+; GCN-DL-NEXT: v_mov_b32_e32 v1, s1
+; GCN-DL-NEXT: s_waitcnt vmcnt(1)
+; GCN-DL-NEXT: v_lshrrev_b16_e32 v4, 8, v2
+; GCN-DL-NEXT: s_waitcnt vmcnt(0)
+; GCN-DL-NEXT: v_bfe_i32 v5, v3, 0, 8
+; GCN-DL-NEXT: v_lshrrev_b16_e32 v3, 8, v3
+; GCN-DL-NEXT: v_bfe_i32 v4, v4, 0, 8
+; GCN-DL-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GCN-DL-NEXT: v_bfe_i32 v2, v2, 0, 8
+; GCN-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-DL-NEXT: v_mad_i32_i24 v3, v3, v4, s2
+; GCN-DL-NEXT: v_mad_i32_i24 v2, v5, v2, v3
+; GCN-DL-NEXT: global_store_dword v[0:1], v2, off
+; GCN-DL-NEXT: s_endpgm
+ <2 x i8> addrspace(1)* %src2,
+ i32 addrspace(1)* nocapture %dst) {
+entry:
+ %vec1 = load <2 x i8>, <2 x i8> addrspace(1)* %src1
+ %vec2 = load <2 x i8>, <2 x i8> addrspace(1)* %src2
+
+ %s1.elt1 = extractelement <2 x i8> %vec1, i64 0
+ %conv = sext i8 %s1.elt1 to i32
+ %s2.elt1 = extractelement <2 x i8> %vec2, i64 0
+ %conv2 = sext i8 %s2.elt1 to i32
+ %mul1 = mul nuw i32 %conv2, %conv
+
+ %s1.elt2 = extractelement <2 x i8> %vec1, i64 1
+ %conv3 = sext i8 %s1.elt2 to i32
+ %s2.elt2 = extractelement <2 x i8> %vec2, i64 1
+ %conv4 = sext i8 %s2.elt2 to i32
+ %mul2 = mul nuw i32 %conv4, %conv3
+
+ %s3 = load i32, i32 addrspace(1)* %dst, align 4
+ %add = add i32 %mul2, %s3
+ %add6 = add i32 %add, %mul1
+ store i32 %add6, i32 addrspace(1)* %dst, align 4
+ ret void
+}
More information about the llvm-commits
mailing list