[llvm] [AMDGPU][GISel] Add 64bit pattern to emit `v_lshl_add_u64` (PR #124763)

Alan Li via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 6 20:04:32 PST 2025


================
@@ -1,108 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefix=GI %s
 
 define i64 @lshl_add_u64_v1v(i64 %v, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_v1v:
-; GCN: v_lshl_add_u64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 1, v[{{[0-9:]+}}]
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 1, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_v1v:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 1, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, 1
   %add = add i64 %shl, %a
   ret i64 %add
 }
 
 define i64 @lshl_add_u64_v4v(i64 %v, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_v4v:
-; GCN: v_lshl_add_u64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 4, v[{{[0-9:]+}}]
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 4, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_v4v:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 4, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, 4
   %add = add i64 %shl, %a
   ret i64 %add
 }
 
 define i64 @lshl_add_u64_v5v(i64 %v, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_v5v:
-; GCN:      v_lshlrev_b64
-; GCN-NEXT: v_lshl_add_u64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 0, v[{{[0-9:]+}}]
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 5, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_v5v:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshlrev_b64 v[0:1], 5, v[0:1]
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, 5
   %add = add i64 %shl, %a
   ret i64 %add
 }
 
 define i64 @lshl_add_u64_vvv(i64 %v, i64 %s, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_vvv:
-; GCN:      v_lshlrev_b64
-; GCN-NEXT: v_lshl_add_u64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 0, v[{{[0-9:]+}}]
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], v2, v[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_vvv:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[4:5]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, %s
   %add = add i64 %shl, %a
   ret i64 %add
 }
 
-define amdgpu_kernel void @lshl_add_u64_s2v(i64 %v) {
+define i64 @lshl_add_u64_s2v(i64 %v, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_s2v:
-; GCN: v_lshl_add_u64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], 2, v[{{[0-9:]+}}]
-  %a = load i64, ptr undef
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 2, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_s2v:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 2, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, 2
   %add = add i64 %shl, %a
-  store i64 %add, ptr undef
-  ret void
+  ret i64 %add
 }
 
-define amdgpu_kernel void @lshl_add_u64_v2s(i64 %a) {
+define i64 @lshl_add_u64_v2s(i64 %a, i64 %v) {
 ; GCN-LABEL: lshl_add_u64_v2s:
-; GCN: v_lshl_add_u64 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 2, s[{{[0-9:]+}}]
-  %v = load i64, ptr undef
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_v2s:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, 2
   %add = add i64 %shl, %a
-  store i64 %add, ptr undef
-  ret void
+  ret i64 %add
 }
 
-define amdgpu_kernel void @lshl_add_u64_s2s(i64 %v, i64 %a) {
+define i64 @lshl_add_u64_s2s(i64 %v, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_s2s:
-; GCN:    s_lshl_b64
-; GCN:    s_add_u32
-; GCN:    s_addc_u32
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 2, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_s2s:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 2, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %shl = shl i64 %v, 2
   %add = add i64 %shl, %a
-  store i64 %add, ptr undef
-  ret void
+  ret i64 %add
 }
 
 define i64 @add_u64_vv(i64 %v, i64 %a) {
 ; GCN-LABEL: add_u64_vv:
-; GCN: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: add_u64_vv:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %add = add i64 %v, %a
   ret i64 %add
 }
 
-define amdgpu_kernel void @add_u64_sv(i64 %v) {
+define i64 @add_u64_sv(i64 %v, i64 %a) {
 ; GCN-LABEL: add_u64_sv:
-; GCN: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1]
-  %a = load i64, ptr undef
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: add_u64_sv:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %add = add i64 %v, %a
-  store i64 %add, ptr undef
-  ret void
+  ret i64 %add
 }
 
-define amdgpu_kernel void @add_u64_vs(i64 %a) {
+define i64 @add_u64_vs(i64 %a, i64 %v) {
 ; GCN-LABEL: add_u64_vs:
-; GCN: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
-  %v = load i64, ptr undef
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 0, v[0:1]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: add_u64_vs:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 0, v[0:1]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %add = add i64 %v, %a
-  store i64 %add, ptr undef
-  ret void
+  ret i64 %add
 }
 
-define amdgpu_kernel void @add_u64_ss(i64 %v, i64 %a) {
+define i64 @add_u64_ss(i64 %v, i64 %a) {
 ; GCN-LABEL: add_u64_ss:
-; GCN: s_add_u32
-; GCN: s_addc_u32 s1, s1, s3
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: add_u64_ss:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %add = add i64 %v, %a
-  store i64 %add, ptr undef
-  ret void
+  ret i64 %add
 }
 
 define i32 @lshl_add_u64_gep(ptr %p, i64 %a) {
 ; GCN-LABEL: lshl_add_u64_gep:
-; GCN: v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1]
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1]
+; GCN-NEXT:    flat_load_dword v0, v[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_gep:
+; GI:       ; %bb.0:
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1]
+; GI-NEXT:    flat_load_dword v0, v[0:1]
+; GI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GI-NEXT:    s_setpc_b64 s[30:31]
   %gep = getelementptr inbounds i32, ptr %p, i64 %a
   %v = load i32, ptr %gep
   ret i32 %v
 }
+
+ at arr = global [10 x [10 x i64]] zeroinitializer
+define i64 @lshl_add_u64_gep_shift(i64 %row, i64 %col) {
+; GCN-LABEL: lshl_add_u64_gep_shift:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_getpc_b64 s[0:1]
+; GCN-NEXT:    s_add_u32 s0, s0, arr at gotpcrel32@lo+4
+; GCN-NEXT:    s_addc_u32 s1, s1, arr at gotpcrel32@hi+12
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GCN-NEXT:    s_movk_i32 s2, 0x50
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b64_e32 v[4:5], s[0:1]
+; GCN-NEXT:    v_mad_u64_u32 v[4:5], s[0:1], v0, s2, v[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v0, v5
+; GCN-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v1, s2, v[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v5, v0
+; GCN-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 2, v[4:5]
+; GCN-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GI-LABEL: lshl_add_u64_gep_shift:
+; GI:       ; %bb.0: ; %entry
+; GI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GI-NEXT:    s_getpc_b64 s[0:1]
+; GI-NEXT:    s_add_u32 s0, s0, arr at gotpcrel32@lo+4
+; GI-NEXT:    s_addc_u32 s1, s1, arr at gotpcrel32@hi+12
+; GI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GI-NEXT:    v_mov_b32_e32 v6, 0x50
+; GI-NEXT:    v_mad_u64_u32 v[4:5], s[2:3], v0, v6, 0
+; GI-NEXT:    v_mad_u64_u32 v[0:1], s[2:3], v1, v6, 0
+; GI-NEXT:    v_add_u32_e32 v5, v5, v0
+; GI-NEXT:    s_waitcnt lgkmcnt(0)
+; GI-NEXT:    v_mov_b64_e32 v[0:1], s[0:1]
+; GI-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v4
+; GI-NEXT:    s_nop 1
+; GI-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GI-NEXT:    v_lshl_add_u64 v[0:1], v[2:3], 2, v[0:1]
+; GI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GI-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %base = getelementptr [10 x [10 x i64]], ptr @arr, i64 0, i64 %row, i64 0
+  %shifted_col = shl i64 %col, 2 ; multiply by sizeof(i64) (shift left by 2)
+  %ptr = getelementptr i8, ptr %base, i64 %shifted_col
+  %val = load i64, ptr %ptr
+  ret i64 %val
+}
----------------
lialan wrote:

Added SGPR input tests, and vector pointer tests.

https://github.com/llvm/llvm-project/pull/124763


More information about the llvm-commits mailing list