[llvm] fadea8c - [AMDGPU] ds_read2/ds_write2 gfx1250 tests. NFC (#159824)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 19 14:27:34 PDT 2025
Author: Stanislav Mekhanoshin
Date: 2025-09-19T14:27:30-07:00
New Revision: fadea8cd1da608fb3c378ab7edc53b7fb2e02c3e
URL: https://github.com/llvm/llvm-project/commit/fadea8cd1da608fb3c378ab7edc53b7fb2e02c3e
DIFF: https://github.com/llvm/llvm-project/commit/fadea8cd1da608fb3c378ab7edc53b7fb2e02c3e.diff
LOG: [AMDGPU] ds_read2/ds_write2 gfx1250 tests. NFC (#159824)
Added:
llvm/test/CodeGen/AMDGPU/ds_read2-gfx1250.ll
Modified:
llvm/test/CodeGen/AMDGPU/ds_write2.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/ds_read2-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/ds_read2-gfx1250.ll
new file mode 100644
index 0000000000000..23d2b18f5311b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/ds_read2-gfx1250.ll
@@ -0,0 +1,881 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1250 -mattr=+load-store-opt,+unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250,GFX1250-UNALIGNED %s
+
+; FIXME: We don't get cases where the address was an SGPR because we
+; get a copy to the address register for each one.
+
+ at lds = addrspace(3) global [512 x float] poison, align 4
+ at lds.f64 = addrspace(3) global [512 x double] poison, align 8
+
+define amdgpu_kernel void @simple_read2_f32(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v2 offset1:8
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32_max_offset(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_max_offset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v2 offset1:255
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 255
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32_too_far(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_too_far:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: ds_load_b32 v1, v0
+; GFX1250-NEXT: ds_load_b32 v2, v0 offset:1028
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 257
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32_x2(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_x2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v4 offset1:8
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v4 offset0:11 offset1:27
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_dual_add_f32 v0, v0, v1 :: v_dual_add_f32 v1, v2, v3
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v4, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %idx.0 = add nsw i32 %tid.x, 0
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.0
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+
+ %idx.1 = add nsw i32 %tid.x, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.1
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum.0 = fadd float %val0, %val1
+
+ %idx.2 = add nsw i32 %tid.x, 11
+ %arrayidx2 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.2
+ %val2 = load float, ptr addrspace(3) %arrayidx2, align 4
+
+ %idx.3 = add nsw i32 %tid.x, 27
+ %arrayidx3 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.3
+ %val3 = load float, ptr addrspace(3) %arrayidx3, align 4
+ %sum.1 = fadd float %val2, %val3
+
+ %sum = fadd float %sum.0, %sum.1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %idx.0
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+; Make sure there is an instruction between the two sets of reads.
+define amdgpu_kernel void @simple_read2_f32_x2_barrier(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_x2_barrier:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v4 offset1:8
+; GFX1250-NEXT: s_barrier_signal -1
+; GFX1250-NEXT: s_barrier_wait -1
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v4 offset0:11 offset1:27
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_dual_add_f32 v0, v0, v1 :: v_dual_add_f32 v1, v2, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v4, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %idx.0 = add nsw i32 %tid.x, 0
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.0
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+
+ %idx.1 = add nsw i32 %tid.x, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.1
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum.0 = fadd float %val0, %val1
+
+ call void @llvm.amdgcn.s.barrier() #2
+
+ %idx.2 = add nsw i32 %tid.x, 11
+ %arrayidx2 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.2
+ %val2 = load float, ptr addrspace(3) %arrayidx2, align 4
+
+ %idx.3 = add nsw i32 %tid.x, 27
+ %arrayidx3 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.3
+ %val3 = load float, ptr addrspace(3) %arrayidx3, align 4
+ %sum.1 = fadd float %val2, %val3
+
+ %sum = fadd float %sum.0, %sum.1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %idx.0
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+; For some reason adding something to the base address for the first
+; element results in only folding the inner pair.
+define amdgpu_kernel void @simple_read2_f32_x2_nonzero_base(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_x2_nonzero_base:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v4 offset0:2 offset1:8
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v4 offset0:11 offset1:27
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_dual_add_f32 v0, v0, v1 :: v_dual_add_f32 v1, v2, v3
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v4, v0, s[0:1] offset:8
+; GFX1250-NEXT: s_endpgm
+ %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.0
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+
+ %idx.1 = add nsw i32 %tid.x, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.1
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum.0 = fadd float %val0, %val1
+
+ %idx.2 = add nsw i32 %tid.x, 11
+ %arrayidx2 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.2
+ %val2 = load float, ptr addrspace(3) %arrayidx2, align 4
+
+ %idx.3 = add nsw i32 %tid.x, 27
+ %arrayidx3 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %idx.3
+ %val3 = load float, ptr addrspace(3) %arrayidx3, align 4
+ %sum.1 = fadd float %val2, %val3
+
+ %sum = fadd float %sum.0, %sum.1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %idx.0
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+; Be careful of vectors of pointers. We don't know if the 2 pointers
+; in the vectors are really the same base, so this is not safe to
+; merge.
+; Base pointers come from
diff erent subregister of same super
+; register. We can't safely merge this.
+define amdgpu_kernel void @read2_ptr_is_subreg_arg_f32(ptr addrspace(1) %out, <2 x ptr addrspace(3)> %lds.ptr) #0 {
+; GFX1250-LABEL: read2_ptr_is_subreg_arg_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
+; GFX1250-NEXT: ds_load_b32 v1, v1 offset:32
+; GFX1250-NEXT: ds_load_b32 v2, v2
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %index.0 = insertelement <2 x i32> poison, i32 %x.i, i32 0
+ %index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
+ %gep = getelementptr inbounds float, <2 x ptr addrspace(3)> %lds.ptr, <2 x i32> %index.1
+ %gep.0 = extractelement <2 x ptr addrspace(3)> %gep, i32 0
+ %gep.1 = extractelement <2 x ptr addrspace(3)> %gep, i32 1
+ %val0 = load float, ptr addrspace(3) %gep.0, align 4
+ %val1 = load float, ptr addrspace(3) %gep.1, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+; Apply a constant scalar offset after the pointer vector extract. We
+; are rejecting merges that have the same, constant 0 offset, so make
+; sure we are really rejecting it because of the
diff erent
+; subregisters.
+define amdgpu_kernel void @read2_ptr_is_subreg_arg_offset_f32(ptr addrspace(1) %out, <2 x ptr addrspace(3)> %lds.ptr) #0 {
+; GFX1250-LABEL: read2_ptr_is_subreg_arg_offset_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
+; GFX1250-NEXT: ds_load_b32 v1, v1 offset:32
+; GFX1250-NEXT: ds_load_b32 v2, v2 offset:32
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %index.0 = insertelement <2 x i32> poison, i32 %x.i, i32 0
+ %index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
+ %gep = getelementptr inbounds float, <2 x ptr addrspace(3)> %lds.ptr, <2 x i32> %index.1
+ %gep.0 = extractelement <2 x ptr addrspace(3)> %gep, i32 0
+ %gep.1 = extractelement <2 x ptr addrspace(3)> %gep, i32 1
+
+ ; Apply an additional offset after the vector that will be more obviously folded.
+ %gep.1.offset = getelementptr float, ptr addrspace(3) %gep.1, i32 8
+
+ %val0 = load float, ptr addrspace(3) %gep.0, align 4
+ %val1 = load float, ptr addrspace(3) %gep.1.offset, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @read2_ptr_is_subreg_f32(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: read2_ptr_is_subreg_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v2 offset1:8
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %ptr.0 = insertelement <2 x ptr addrspace(3)> poison, ptr addrspace(3) @lds, i32 0
+ %ptr.1 = insertelement <2 x ptr addrspace(3)> %ptr.0, ptr addrspace(3) @lds, i32 1
+ %x.i.v.0 = insertelement <2 x i32> poison, i32 %x.i, i32 0
+ %x.i.v.1 = insertelement <2 x i32> %x.i.v.0, i32 %x.i, i32 1
+ %idx = add <2 x i32> %x.i.v.1, <i32 0, i32 8>
+ %gep = getelementptr inbounds [512 x float], <2 x ptr addrspace(3)> %ptr.1, <2 x i32> <i32 0, i32 0>, <2 x i32> %idx
+ %gep.0 = extractelement <2 x ptr addrspace(3)> %gep, i32 0
+ %gep.1 = extractelement <2 x ptr addrspace(3)> %gep, i32 1
+ %val0 = load float, ptr addrspace(3) %gep.0, align 4
+ %val1 = load float, ptr addrspace(3) %gep.1, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32_volatile_0(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_volatile_0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: ds_load_b32 v1, v0
+; GFX1250-NEXT: ds_load_b32 v2, v0 offset:32
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load volatile float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32_volatile_1(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f32_volatile_1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: ds_load_b32 v1, v0
+; GFX1250-NEXT: ds_load_b32 v2, v0 offset:32
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load volatile float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+; Can't fold since not correctly aligned.
+define amdgpu_kernel void @unaligned_read2_f32(ptr addrspace(1) %out, ptr addrspace(3) %lds) #0 {
+; GFX1250-LABEL: unaligned_read2_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, s2, v2
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v0 offset1:8
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds float, ptr addrspace(3) %lds, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 1
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds float, ptr addrspace(3) %lds, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 1
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @unaligned_offset_read2_f32(ptr addrspace(1) %out, ptr addrspace(3) %lds) #0 {
+; GFX1250-LABEL: unaligned_offset_read2_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, s2, v2
+; GFX1250-NEXT: ds_load_b64 v[0:1], v0 offset:5
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %base = getelementptr inbounds float, ptr addrspace(3) %lds, i32 %x.i
+ %addr0.i8 = getelementptr inbounds i8, ptr addrspace(3) %base, i32 5
+ %val0 = load float, ptr addrspace(3) %addr0.i8, align 1
+ %addr1.i8 = getelementptr inbounds i8, ptr addrspace(3) %base, i32 9
+ %val1 = load float, ptr addrspace(3) %addr1.i8, align 1
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @misaligned_2_simple_read2_f32(ptr addrspace(1) %out, ptr addrspace(3) %lds) #0 {
+; GFX1250-LABEL: misaligned_2_simple_read2_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, s2, v2
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v0 offset1:8
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds float, ptr addrspace(3) %lds, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 2
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds float, ptr addrspace(3) %lds, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 2
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f64(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x1ff8, v0
+; GFX1250-NEXT: ds_load_2addr_b64 v[0:3], v4 offset1:8
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x double], ptr addrspace(3) @lds.f64, i32 0, i32 %x.i
+ %val0 = load double, ptr addrspace(3) %arrayidx0, align 8
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x double], ptr addrspace(3) @lds.f64, i32 0, i32 %add.x
+ %val1 = load double, ptr addrspace(3) %arrayidx1, align 8
+ %sum = fadd double %val0, %val1
+ %out.gep = getelementptr inbounds double, ptr addrspace(1) %out, i32 %x.i
+ store double %sum, ptr addrspace(1) %out.gep, align 8
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f64_max_offset(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f64_max_offset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x1ff8, v0
+; GFX1250-NEXT: ds_load_2addr_b64 v[0:3], v4 offset1:255
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x double], ptr addrspace(3) @lds.f64, i32 0, i32 %x.i
+ %val0 = load double, ptr addrspace(3) %arrayidx0, align 8
+ %add.x = add nsw i32 %x.i, 255
+ %arrayidx1 = getelementptr inbounds [512 x double], ptr addrspace(3) @lds.f64, i32 0, i32 %add.x
+ %val1 = load double, ptr addrspace(3) %arrayidx1, align 8
+ %sum = fadd double %val0, %val1
+ %out.gep = getelementptr inbounds double, ptr addrspace(1) %out, i32 %x.i
+ store double %sum, ptr addrspace(1) %out.gep, align 8
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f64_too_far(ptr addrspace(1) %out) #0 {
+; GFX1250-LABEL: simple_read2_f64_too_far:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x1ff8, v0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v4
+; GFX1250-NEXT: ds_load_b64 v[2:3], v4 offset:2056
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x double], ptr addrspace(3) @lds.f64, i32 0, i32 %x.i
+ %val0 = load double, ptr addrspace(3) %arrayidx0, align 8
+ %add.x = add nsw i32 %x.i, 257
+ %arrayidx1 = getelementptr inbounds [512 x double], ptr addrspace(3) @lds.f64, i32 0, i32 %add.x
+ %val1 = load double, ptr addrspace(3) %arrayidx1, align 8
+ %sum = fadd double %val0, %val1
+ %out.gep = getelementptr inbounds double, ptr addrspace(1) %out, i32 %x.i
+ store double %sum, ptr addrspace(1) %out.gep, align 8
+ ret void
+}
+
+; Alignment only 4
+define amdgpu_kernel void @misaligned_read2_f64(ptr addrspace(1) %out, ptr addrspace(3) %lds) #0 {
+; GFX1250-LABEL: misaligned_read2_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x1ff8, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, s2, v4
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v2 offset1:1
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v2 offset0:14 offset1:15
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds double, ptr addrspace(3) %lds, i32 %x.i
+ %val0 = load double, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 7
+ %arrayidx1 = getelementptr inbounds double, ptr addrspace(3) %lds, i32 %add.x
+ %val1 = load double, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd double %val0, %val1
+ %out.gep = getelementptr inbounds double, ptr addrspace(1) %out, i32 %x.i
+ store double %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+ at foo = addrspace(3) global [4 x i32] poison, align 4
+
+define amdgpu_kernel void @load_constant_adjacent_offsets(ptr addrspace(1) %out) {
+; GFX1250-LABEL: load_constant_adjacent_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v2
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %val0 = load i32, ptr addrspace(3) @foo, align 4
+ %val1 = load i32, ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @foo, i32 0, i32 1), align 4
+ %sum = add i32 %val0, %val1
+ store i32 %sum, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_kernel void @load_constant_disjoint_offsets(ptr addrspace(1) %out) {
+; GFX1250-LABEL: load_constant_disjoint_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v2 offset1:2
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %val0 = load i32, ptr addrspace(3) @foo, align 4
+ %val1 = load i32, ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @foo, i32 0, i32 2), align 4
+ %sum = add i32 %val0, %val1
+ store i32 %sum, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+ at bar = addrspace(3) global [4 x i64] poison, align 4
+
+define amdgpu_kernel void @load_misaligned64_constant_offsets(ptr addrspace(1) %out) {
+; GFX1250-LABEL: load_misaligned64_constant_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v4, 0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_b128 v[0:3], v4
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %val0 = load i64, ptr addrspace(3) @bar, align 4
+ %val1 = load i64, ptr addrspace(3) getelementptr inbounds ([4 x i64], ptr addrspace(3) @bar, i32 0, i32 1), align 4
+ %sum = add i64 %val0, %val1
+ store i64 %sum, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+ at bar.large = addrspace(3) global [4096 x i64] poison, align 4
+
+define amdgpu_kernel void @load_misaligned64_constant_large_offsets(ptr addrspace(1) %out) {
+; GFX1250-LABEL: load_misaligned64_constant_large_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b32_e32 v4, 0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v4 offset:16384
+; GFX1250-NEXT: ds_load_b64 v[2:3], v4 offset:32760
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %val0 = load i64, ptr addrspace(3) getelementptr inbounds ([4096 x i64], ptr addrspace(3) @bar.large, i32 0, i32 2048), align 4
+ %val1 = load i64, ptr addrspace(3) getelementptr inbounds ([4096 x i64], ptr addrspace(3) @bar.large, i32 0, i32 4095), align 4
+ %sum = add i64 %val0, %val1
+ store i64 %sum, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+ at sgemm.lA = internal unnamed_addr addrspace(3) global [264 x float] poison, align 4
+ at sgemm.lB = internal unnamed_addr addrspace(3) global [776 x float] poison, align 4
+
+define amdgpu_kernel void @sgemm_inner_loop_read2_sequence(ptr addrspace(1) %C, i32 %lda, i32 %ldb) #0 {
+; GFX1250-LABEL: sgemm_inner_loop_read2_sequence:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_bfe_u32 s0, ttmp6, 0x4000c
+; GFX1250-NEXT: s_and_b32 s1, ttmp6, 15
+; GFX1250-NEXT: s_add_co_i32 s0, s0, 1
+; GFX1250-NEXT: s_getreg_b32 s2, hwreg(HW_REG_IB_STS2, 6, 4)
+; GFX1250-NEXT: s_mul_i32 s0, ttmp9, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_add_co_i32 s1, s1, s0
+; GFX1250-NEXT: s_cmp_eq_u32 s2, 0
+; GFX1250-NEXT: s_cselect_b32 s0, ttmp9, s1
+; GFX1250-NEXT: s_lshl_b32 s0, s0, 2
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_add_co_i32 s1, s0, 0xc20
+; GFX1250-NEXT: s_addk_co_i32 s0, 0xc60
+; GFX1250-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v4, s0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v1 offset1:1
+; GFX1250-NEXT: ds_load_2addr_b32 v[4:5], v4 offset1:1
+; GFX1250-NEXT: s_wait_dscnt 0x1
+; GFX1250-NEXT: v_dual_lshrrev_b32 v0, 8, v0 :: v_dual_add_f32 v2, v2, v3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v8, 0xffc, v0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v8 offset1:1
+; GFX1250-NEXT: ds_load_2addr_b32 v[6:7], v8 offset0:32 offset1:33
+; GFX1250-NEXT: s_wait_dscnt 0x2
+; GFX1250-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX1250-NEXT: v_add_f32_e32 v4, v2, v5
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v8 offset0:64 offset1:65
+; GFX1250-NEXT: s_wait_dscnt 0x2
+; GFX1250-NEXT: v_add_f32_e32 v0, v4, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_add_f32 v0, v0, v1 :: v_dual_mov_b32 v1, 0
+; GFX1250-NEXT: s_wait_dscnt 0x1
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v7
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %x.i = tail call i32 @llvm.amdgcn.workgroup.id.x() #1
+ %y.i = tail call i32 @llvm.amdgcn.workitem.id.y() #1
+ %arrayidx44 = getelementptr inbounds [264 x float], ptr addrspace(3) @sgemm.lA, i32 0, i32 %x.i
+ %tmp16 = load float, ptr addrspace(3) %arrayidx44, align 4
+ %add47 = add nsw i32 %x.i, 1
+ %arrayidx48 = getelementptr inbounds [264 x float], ptr addrspace(3) @sgemm.lA, i32 0, i32 %add47
+ %tmp17 = load float, ptr addrspace(3) %arrayidx48, align 4
+ %add51 = add nsw i32 %x.i, 16
+ %arrayidx52 = getelementptr inbounds [264 x float], ptr addrspace(3) @sgemm.lA, i32 0, i32 %add51
+ %tmp18 = load float, ptr addrspace(3) %arrayidx52, align 4
+ %add55 = add nsw i32 %x.i, 17
+ %arrayidx56 = getelementptr inbounds [264 x float], ptr addrspace(3) @sgemm.lA, i32 0, i32 %add55
+ %tmp19 = load float, ptr addrspace(3) %arrayidx56, align 4
+ %arrayidx60 = getelementptr inbounds [776 x float], ptr addrspace(3) @sgemm.lB, i32 0, i32 %y.i
+ %tmp20 = load float, ptr addrspace(3) %arrayidx60, align 4
+ %add63 = add nsw i32 %y.i, 1
+ %arrayidx64 = getelementptr inbounds [776 x float], ptr addrspace(3) @sgemm.lB, i32 0, i32 %add63
+ %tmp21 = load float, ptr addrspace(3) %arrayidx64, align 4
+ %add67 = add nsw i32 %y.i, 32
+ %arrayidx68 = getelementptr inbounds [776 x float], ptr addrspace(3) @sgemm.lB, i32 0, i32 %add67
+ %tmp22 = load float, ptr addrspace(3) %arrayidx68, align 4
+ %add71 = add nsw i32 %y.i, 33
+ %arrayidx72 = getelementptr inbounds [776 x float], ptr addrspace(3) @sgemm.lB, i32 0, i32 %add71
+ %tmp23 = load float, ptr addrspace(3) %arrayidx72, align 4
+ %add75 = add nsw i32 %y.i, 64
+ %arrayidx76 = getelementptr inbounds [776 x float], ptr addrspace(3) @sgemm.lB, i32 0, i32 %add75
+ %tmp24 = load float, ptr addrspace(3) %arrayidx76, align 4
+ %add79 = add nsw i32 %y.i, 65
+ %arrayidx80 = getelementptr inbounds [776 x float], ptr addrspace(3) @sgemm.lB, i32 0, i32 %add79
+ %tmp25 = load float, ptr addrspace(3) %arrayidx80, align 4
+ %sum.0 = fadd float %tmp16, %tmp17
+ %sum.1 = fadd float %sum.0, %tmp18
+ %sum.2 = fadd float %sum.1, %tmp19
+ %sum.3 = fadd float %sum.2, %tmp20
+ %sum.4 = fadd float %sum.3, %tmp21
+ %sum.5 = fadd float %sum.4, %tmp22
+ %sum.6 = fadd float %sum.5, %tmp23
+ %sum.7 = fadd float %sum.6, %tmp24
+ %sum.8 = fadd float %sum.7, %tmp25
+ store float %sum.8, ptr addrspace(1) %C, align 4
+ ret void
+}
+
+define amdgpu_kernel void @misaligned_read2_v2i32(ptr addrspace(1) %out, ptr addrspace(3) %in) #0 {
+; GFX1250-LABEL: misaligned_read2_v2i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s2
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v0 offset1:1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %load = load <2 x i32>, ptr addrspace(3) %in, align 4
+ store <2 x i32> %load, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @misaligned_read2_i64(ptr addrspace(1) %out, ptr addrspace(3) %in) #0 {
+; GFX1250-LABEL: misaligned_read2_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s2
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v0 offset1:1
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+ %load = load i64, ptr addrspace(3) %in, align 4
+ store i64 %load, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @ds_read_
diff _base_interleaving(
+; GFX1250-LABEL: ds_read_
diff _base_interleaving:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_dual_lshrrev_b32 v1, 6, v0 :: v_dual_lshlrev_b32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_and_b32_e32 v1, 0x3ff0, v1
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_add_nc_u32 v2, s0, v1 :: v_dual_add_nc_u32 v3, s1, v0
+; GFX1250-NEXT: v_dual_add_nc_u32 v4, s2, v1 :: v_dual_add_nc_u32 v6, s3, v0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_2addr_b32 v[0:1], v2 offset1:1
+; GFX1250-NEXT: ds_load_2addr_b32 v[2:3], v3 offset1:4
+; GFX1250-NEXT: ds_load_2addr_b32 v[4:5], v4 offset1:1
+; GFX1250-NEXT: ds_load_2addr_b32 v[6:7], v6 offset1:4
+; GFX1250-NEXT: s_wait_dscnt 0x2
+; GFX1250-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_mul_f32 v1, v1, v3
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_mul_f32 v2, v4, v6 :: v_dual_add_f32 v0, 2.0, v0
+; GFX1250-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_mov_b32 v2, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_sub_f32 v0, v0, v1 :: v_dual_mul_f32 v1, v5, v7
+; GFX1250-NEXT: v_sub_f32_e32 v0, v0, v1
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] offset:40
+; GFX1250-NEXT: s_endpgm
+ ptr addrspace(1) nocapture %arg,
+ ptr addrspace(3) %arg1,
+ ptr addrspace(3) %arg2,
+ ptr addrspace(3) %arg3,
+ ptr addrspace(3) %arg4) #1 {
+bb:
+ %tmp = getelementptr float, ptr addrspace(1) %arg, i64 10
+ %tmp5 = tail call i32 @llvm.amdgcn.workitem.id.x() #2
+ %tmp6 = tail call i32 @llvm.amdgcn.workitem.id.y() #2
+ %tmp7 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg1, i32 0, i32 %tmp6, i32 0
+ %tmp8 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg2, i32 0, i32 0, i32 %tmp5
+ %tmp9 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg3, i32 0, i32 %tmp6, i32 0
+ %tmp10 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg4, i32 0, i32 0, i32 %tmp5
+ %tmp11 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg1, i32 0, i32 %tmp6, i32 1
+ %tmp12 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg2, i32 0, i32 1, i32 %tmp5
+ %tmp13 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg3, i32 0, i32 %tmp6, i32 1
+ %tmp14 = getelementptr [4 x [4 x float]], ptr addrspace(3) %arg4, i32 0, i32 1, i32 %tmp5
+ %tmp15 = load float, ptr addrspace(3) %tmp7
+ %tmp16 = load float, ptr addrspace(3) %tmp8
+ %tmp17 = fmul float %tmp15, %tmp16
+ %tmp18 = fadd float 2.000000e+00, %tmp17
+ %tmp19 = load float, ptr addrspace(3) %tmp9
+ %tmp20 = load float, ptr addrspace(3) %tmp10
+ %tmp21 = fmul float %tmp19, %tmp20
+ %tmp22 = fsub float %tmp18, %tmp21
+ %tmp23 = load float, ptr addrspace(3) %tmp11
+ %tmp24 = load float, ptr addrspace(3) %tmp12
+ %tmp25 = fmul float %tmp23, %tmp24
+ %tmp26 = fsub float %tmp22, %tmp25
+ %tmp27 = load float, ptr addrspace(3) %tmp13
+ %tmp28 = load float, ptr addrspace(3) %tmp14
+ %tmp29 = fmul float %tmp27, %tmp28
+ %tmp30 = fsub float %tmp26, %tmp29
+ store float %tmp30, ptr addrspace(1) %tmp
+ ret void
+}
+
+define amdgpu_kernel void @ds_read_call_read(ptr addrspace(1) %out, ptr addrspace(3) %arg) {
+; GFX1250-LABEL: ds_read_call_read:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[36:38], s[4:5], 0x0
+; GFX1250-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; GFX1250-NEXT: v_dual_mov_b32 v42, 0 :: v_dual_mov_b32 v31, v0
+; GFX1250-NEXT: s_mov_b64 s[10:11], s[6:7]
+; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], 12
+; GFX1250-NEXT: s_mov_b64 s[12:13], void_func_void at abs64
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1250-NEXT: s_mov_b64 s[6:7], s[2:3]
+; GFX1250-NEXT: s_mov_b32 s32, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_lshl_add_u32 v40, v1, 2, s38
+; GFX1250-NEXT: ds_load_b32 v41, v40
+; GFX1250-NEXT: s_swap_pc_i64 s[30:31], s[12:13]
+; GFX1250-NEXT: ds_load_b32 v0, v40 offset:4
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v0, v41, v0
+; GFX1250-NEXT: global_store_b32 v42, v0, s[36:37]
+; GFX1250-NEXT: s_endpgm
+ %x = call i32 @llvm.amdgcn.workitem.id.x()
+ %arrayidx0 = getelementptr i32, ptr addrspace(3) %arg, i32 %x
+ %arrayidx1 = getelementptr i32, ptr addrspace(3) %arrayidx0, i32 1
+ %v0 = load i32, ptr addrspace(3) %arrayidx0, align 4
+ call void @void_func_void()
+ %v1 = load i32, ptr addrspace(3) %arrayidx1, align 4
+ %r = add i32 %v0, %v1
+ store i32 %r, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+ at v2i32_align1 = internal addrspace(3) global [100 x <2 x i32>] poison, align 1
+
+define amdgpu_kernel void @read2_v2i32_align1_odd_offset(ptr addrspace(1) %out) {
+; GFX1250-LABEL: read2_v2i32_align1_odd_offset:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1250-NEXT: ds_load_b64 v[0:1], v2 offset:65
+; GFX1250-NEXT: s_wait_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: s_endpgm
+entry:
+ %load = load <2 x i32>, ptr addrspace(3) getelementptr (i8, ptr addrspace(3) @v2i32_align1, i32 65), align 1
+ store <2 x i32> %load, ptr addrspace(1) %out
+ ret void
+}
+
+declare void @void_func_void() #3
+
+declare i32 @llvm.amdgcn.workgroup.id.x() #1
+declare i32 @llvm.amdgcn.workgroup.id.y() #1
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare i32 @llvm.amdgcn.workitem.id.y() #1
+
+declare void @llvm.amdgcn.s.barrier() #2
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone speculatable }
+attributes #2 = { convergent nounwind }
+attributes #3 = { nounwind noinline }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250-UNALIGNED: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
index 91bd837b2938c..f82bb59eb7906 100644
--- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=bonaire -mattr=+load-store-opt < %s | FileCheck -enable-var-scope --check-prefix=CI %s
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -mattr=+load-store-opt,-unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-ALIGNED %s
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -mattr=+load-store-opt,+unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-UNALIGNED %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1250 -mattr=+load-store-opt,+unaligned-access-mode < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250,GFX1250-UNALIGNED %s
@lds = addrspace(3) global [512 x float] poison, align 4
@lds.f64 = addrspace(3) global [512 x double] poison, align 8
@@ -30,6 +31,18 @@ define amdgpu_kernel void @simple_write2_one_val_f32(ptr addrspace(1) %C, ptr ad
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b32 v0, v1, v1 offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_one_val_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v1 offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr float, ptr addrspace(1) %in, i32 %x.i
%val = load float, ptr addrspace(1) %in.gep, align 4
@@ -69,6 +82,20 @@ define amdgpu_kernel void @simple_write2_two_val_f32(ptr addrspace(1) %C, ptr ad
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_load_b32 v2, v0, s[0:1] offset:4 scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %x.i
%in.gep.1 = getelementptr float, ptr addrspace(1) %in.gep.0, i32 1
@@ -115,6 +142,21 @@ define amdgpu_kernel void @simple_write2_two_val_f32_volatile_0(ptr addrspace(1)
; GFX9-NEXT: ds_write_b32 v0, v1
; GFX9-NEXT: ds_write_b32 v0, v2 offset:32
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_f32_volatile_0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_b32 v0, v1
+; GFX1250-NEXT: ds_store_b32 v0, v2 offset:32
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i
%in1.gep = getelementptr float, ptr addrspace(1) %in1, i32 %x.i
@@ -161,6 +203,21 @@ define amdgpu_kernel void @simple_write2_two_val_f32_volatile_1(ptr addrspace(1)
; GFX9-NEXT: ds_write_b32 v0, v1
; GFX9-NEXT: ds_write_b32 v0, v2 offset:32
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_f32_volatile_1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_b32 v0, v1
+; GFX1250-NEXT: ds_store_b32 v0, v2 offset:32
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i
%in1.gep = getelementptr float, ptr addrspace(1) %in1, i32 %x.i
@@ -211,6 +268,19 @@ define amdgpu_kernel void @simple_write2_two_val_subreg2_mixed_f32(ptr addrspace
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b32 v0, v1, v3 offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_subreg2_mixed_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[0:1] offset:8 scale_offset scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 2, v4
+; GFX1250-NEXT: ds_store_2addr_b32 v1, v0, v3 offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %x.i
%in.gep.1 = getelementptr <2 x float>, ptr addrspace(1) %in.gep.0, i32 1
@@ -252,6 +322,18 @@ define amdgpu_kernel void @simple_write2_two_val_subreg2_f32(ptr addrspace(1) %C
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_subreg2_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 2, v2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v2, v0, v1 offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %x.i
%val = load <2 x float>, ptr addrspace(1) %in.gep, align 8
@@ -291,6 +373,17 @@ define amdgpu_kernel void @simple_write2_two_val_subreg4_f32(ptr addrspace(1) %C
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b32 v0, v1, v4 offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_subreg4_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 2, v4
+; GFX1250-NEXT: ds_store_2addr_b32 v1, v0, v3 offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr <4 x float>, ptr addrspace(1) %in, i32 %x.i
%val = load <4 x float>, ptr addrspace(1) %in.gep, align 16
@@ -332,6 +425,20 @@ define amdgpu_kernel void @simple_write2_two_val_max_offset_f32(ptr addrspace(1)
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset1:255
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_max_offset_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_load_b32 v2, v0, s[0:1] offset:4 scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset1:255
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %x.i
%in.gep.1 = getelementptr float, ptr addrspace(1) %in.gep.0, i32 1
@@ -378,6 +485,22 @@ define amdgpu_kernel void @simple_write2_two_val_too_far_f32(ptr addrspace(1) %C
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write_b32 v0, v2 offset:1028
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_too_far_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3]
+; GFX1250-NEXT: s_wait_loadcnt 0x1
+; GFX1250-NEXT: ds_store_b32 v0, v1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_b32 v0, v2 offset:1028
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i
%in1.gep = getelementptr float, ptr addrspace(1) %in1, i32 %x.i
@@ -422,6 +545,21 @@ define amdgpu_kernel void @simple_write2_two_val_f32_x2(ptr addrspace(1) %C, ptr
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset1:8
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset0:11 offset1:27
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_f32_x2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset1:8
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset0:11 offset1:27
+; GFX1250-NEXT: s_endpgm
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %tid.x
%in1.gep = getelementptr float, ptr addrspace(1) %in1, i32 %tid.x
@@ -478,6 +616,21 @@ define amdgpu_kernel void @simple_write2_two_val_f32_x2_nonzero_base(ptr addrspa
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset0:3 offset1:8
; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset0:11 offset1:27
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_f32_x2_nonzero_base:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1]
+; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset0:3 offset1:8
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset0:11 offset1:27
+; GFX1250-NEXT: s_endpgm
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %tid.x
%in1.gep = getelementptr float, ptr addrspace(1) %in1, i32 %tid.x
@@ -542,6 +695,23 @@ define amdgpu_kernel void @write2_ptr_subreg_arg_two_val_f32(ptr addrspace(1) %C
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write_b32 v3, v2 offset:32
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: write2_ptr_subreg_arg_two_val_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x18
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x1
+; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b32 v0, v0, s[2:3] scale_offset
+; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1250-NEXT: s_wait_loadcnt 0x1
+; GFX1250-NEXT: ds_store_b32 v2, v1 offset:32
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_b32 v3, v0 offset:32
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i
%in1.gep = getelementptr float, ptr addrspace(1) %in1, i32 %x.i
@@ -587,6 +757,18 @@ define amdgpu_kernel void @simple_write2_one_val_f64(ptr addrspace(1) %C, ptr ad
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b64 v2, v[0:1], v[0:1] offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_one_val_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0x1ff8, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b64 v2, v[0:1], v[0:1] offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr double, ptr addrspace(1) %in, i32 %x.i
%val = load double, ptr addrspace(1) %in.gep, align 8
@@ -628,6 +810,21 @@ define amdgpu_kernel void @misaligned_simple_write2_one_val_f64(ptr addrspace(1)
; GFX9-NEXT: ds_write2_b32 v2, v0, v1 offset1:1
; GFX9-NEXT: ds_write2_b32 v2, v0, v1 offset0:14 offset1:15
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: misaligned_simple_write2_one_val_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0x1ff8, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1]
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, s2, v2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b32 v2, v0, v1 offset1:1
+; GFX1250-NEXT: ds_store_2addr_b32 v2, v0, v1 offset0:14 offset1:15
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr double, ptr addrspace(1) %in, i32 %x.i
%val = load double, ptr addrspace(1) %in.gep, align 8
@@ -712,6 +909,21 @@ define amdgpu_kernel void @unaligned_offset_simple_write2_one_val_f64(ptr addrsp
; GFX9-UNALIGNED-NEXT: ds_write_b64 v2, v[0:1] offset:5
; GFX9-UNALIGNED-NEXT: ds_write_b64 v2, v[0:1] offset:9
; GFX9-UNALIGNED-NEXT: s_endpgm
+;
+; GFX1250-LABEL: unaligned_offset_simple_write2_one_val_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v2, 0x1ff8, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1]
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v2, s2, v2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:5
+; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:9
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr double, ptr addrspace(1) %in, i32 %x.i
%val = load double, ptr addrspace(1) %in.gep, align 8
@@ -751,6 +963,20 @@ define amdgpu_kernel void @simple_write2_two_val_f64(ptr addrspace(1) %C, ptr ad
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ds_write2_b64 v4, v[0:1], v[2:3] offset1:8
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_two_val_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x1ff8, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[0:1] offset:8 scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_store_2addr_b64 v4, v[0:1], v[2:3] offset1:8
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep.0 = getelementptr double, ptr addrspace(1) %in, i32 %x.i
%in.gep.1 = getelementptr double, ptr addrspace(1) %in.gep.0, i32 1
@@ -783,6 +1009,13 @@ define amdgpu_kernel void @store_constant_adjacent_offsets() {
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: ds_write_b64 v2, v[0:1]
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: store_constant_adjacent_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], lit64(0x7b0000007b)
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: ds_store_b64 v2, v[0:1]
+; GFX1250-NEXT: s_endpgm
store i32 123, ptr addrspace(3) @foo, align 4
store i32 123, ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @foo, i32 0, i32 1), align 4
ret void
@@ -803,6 +1036,12 @@ define amdgpu_kernel void @store_constant_disjoint_offsets() {
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: ds_write2_b32 v1, v0, v0 offset1:2
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: store_constant_disjoint_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v0, 0x7b :: v_dual_mov_b32 v1, 0
+; GFX1250-NEXT: ds_store_2addr_b32 v1, v0, v0 offset1:2
+; GFX1250-NEXT: s_endpgm
store i32 123, ptr addrspace(3) @foo, align 4
store i32 123, ptr addrspace(3) getelementptr inbounds ([4 x i32], ptr addrspace(3) @foo, i32 0, i32 2), align 4
ret void
@@ -829,6 +1068,14 @@ define amdgpu_kernel void @store_misaligned64_constant_offsets() {
; GFX9-NEXT: v_mov_b32_e32 v3, v1
; GFX9-NEXT: ds_write_b128 v1, v[0:3]
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: store_misaligned64_constant_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v0, 0x7b :: v_dual_mov_b32 v1, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX1250-NEXT: ds_store_b128 v1, v[0:3]
+; GFX1250-NEXT: s_endpgm
store i64 123, ptr addrspace(3) @bar, align 4
store i64 123, ptr addrspace(3) getelementptr inbounds ([4 x i64], ptr addrspace(3) @bar, i32 0, i32 1), align 4
ret void
@@ -857,6 +1104,14 @@ define amdgpu_kernel void @store_misaligned64_constant_large_offsets() {
; GFX9-NEXT: ds_write_b64 v2, v[0:1] offset:16384
; GFX9-NEXT: ds_write_b64 v2, v[0:1] offset:32760
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: store_misaligned64_constant_large_offsets:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 0x7b
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:16384
+; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:32760
+; GFX1250-NEXT: s_endpgm
store i64 123, ptr addrspace(3) getelementptr inbounds ([4096 x i64], ptr addrspace(3) @bar.large, i32 0, i32 2048), align 4
store i64 123, ptr addrspace(3) getelementptr inbounds ([4096 x i64], ptr addrspace(3) @bar.large, i32 0, i32 4095), align 4
ret void
@@ -908,6 +1163,38 @@ define amdgpu_kernel void @write2_sgemm_sequence(ptr addrspace(1) %C, i32 %lda,
; GFX9-NEXT: ds_write2_b32 v0, v3, v4 offset0:32 offset1:33
; GFX9-NEXT: ds_write2_b32 v0, v3, v4 offset0:64 offset1:65
; GFX9-NEXT: s_endpgm
+;
+; GFX1250-LABEL: write2_sgemm_sequence:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x10
+; GFX1250-NEXT: s_and_b32 s2, ttmp6, 15
+; GFX1250-NEXT: s_getreg_b32 s3, hwreg(HW_REG_IB_STS2, 6, 4)
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b32 s0, s[0:1], 0x0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_bfe_u32 s1, ttmp6, 0x4000c
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_add_co_i32 s1, s1, 1
+; GFX1250-NEXT: s_mul_i32 s1, ttmp9, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_add_co_i32 s2, s2, s1
+; GFX1250-NEXT: s_cmp_eq_u32 s3, 0
+; GFX1250-NEXT: s_cselect_b32 s1, ttmp9, s2
+; GFX1250-NEXT: s_lshl_b32 s1, s1, 2
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_add_co_i32 s2, s1, 0xc20
+; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_lshrrev_b32 v0, 8, v0
+; GFX1250-NEXT: s_addk_co_i32 s1, 0xc60
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: v_mov_b32_e32 v3, s0
+; GFX1250-NEXT: v_and_b32_e32 v0, 0xffc, v0
+; GFX1250-NEXT: ds_store_2addr_b32 v1, v2, v3 offset1:1
+; GFX1250-NEXT: ds_store_2addr_b32 v4, v2, v3 offset1:1
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v2, v3 offset1:1
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v2, v3 offset0:32 offset1:33
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v2, v3 offset0:64 offset1:65
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workgroup.id.x() #1
%y.i = tail call i32 @llvm.amdgcn.workitem.id.y() #1
%val = load float, ptr addrspace(1) %in
@@ -992,6 +1279,23 @@ define amdgpu_kernel void @simple_write2_v4f32_superreg_align4(ptr addrspace(3)
; GFX9-UNALIGNED-NEXT: ds_write2_b32 v0, v1, v2 offset0:2 offset1:3
; GFX9-UNALIGNED-NEXT: ds_write2_b32 v0, v3, v4 offset1:1
; GFX9-UNALIGNED-NEXT: s_endpgm
+;
+; GFX1250-LABEL: simple_write2_v4f32_superreg_align4:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0
+; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s4
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
+; GFX1250-NEXT: v_dual_mov_b32 v3, s0 :: v_dual_mov_b32 v4, s1
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v1, v2 offset0:2 offset1:3
+; GFX1250-NEXT: ds_store_2addr_b32 v0, v3, v4 offset1:1
+; GFX1250-NEXT: s_endpgm
%x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
%in.gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %in
%val0 = load <4 x float>, ptr addrspace(1) %in.gep, align 4
@@ -1043,6 +1347,13 @@ define amdgpu_kernel void @write2_v2i32_align1_odd_offset() {
; GFX9-UNALIGNED-NEXT: v_mov_b32_e32 v2, 0
; GFX9-UNALIGNED-NEXT: ds_write_b64 v2, v[0:1] offset:65
; GFX9-UNALIGNED-NEXT: s_endpgm
+;
+; GFX1250-LABEL: write2_v2i32_align1_odd_offset:
+; GFX1250: ; %bb.0: ; %entry
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], lit64(0x1c80000007b)
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:65
+; GFX1250-NEXT: s_endpgm
entry:
store <2 x i32> <i32 123, i32 456>, ptr addrspace(3) getelementptr (i8, ptr addrspace(3) @v2i32_align1, i32 65), align 1
ret void
@@ -1056,3 +1367,5 @@ declare i32 @llvm.amdgcn.workitem.id.y() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { convergent nounwind }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250-UNALIGNED: {{.*}}
More information about the llvm-commits
mailing list