[llvm] [AMDGPU] Create hi-half of 64-bit ashr with mov of -1 (PR #146569)
Shilei Tian via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 1 13:18:21 PDT 2025
================
@@ -0,0 +1,102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+
+; Test that negative 64-bit values shifted by [32-63] bits have
+; a hi-result created by moving an all-ones constant.
+
+; FIXME: Range metadata is invalidated when i64 types are legalized to v2i32 types.
+; We could call performSraCombine before legalization, but other optimizations only work
+; with 64-bit sra.
+define i64 @scalar_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: scalar_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v4, v[2:3]
+; CHECK-NEXT: ; kill: killed $vgpr0 killed $vgpr1
+; CHECK-NEXT: ; kill: killed $vgpr2 killed $vgpr3
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v5
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v4, v5
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load i64, ptr %arg0.ptr, !range !0, !noundef !{}
+ %shift.amt = load i64, ptr %arg1.ptr, !range !1, !noundef !{}
+ %ashr = ashr i64 %val, %shift.amt
+ ret i64 %ashr
+}
+
+define <2 x i64> @v2_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: v2_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v1, -1
+; CHECK-NEXT: v_mov_b32_e32 v3, -1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v8, v5
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, v10, v7
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <2 x i64>, ptr %arg0.ptr, !range !2, !noundef !{}
+ %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !3, !noundef !{}
+ %ashr = ashr <2 x i64> %val, %shift.amt
+ ret <2 x i64> %ashr
+}
+
+define <3 x i64> @v3_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: v3_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v4, v[0:1] offset:20
+; CHECK-NEXT: flat_load_dword v6, v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v1, -1
+; CHECK-NEXT: v_mov_b32_e32 v3, -1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v4, v6, v4
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v8, v5
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, v10, v7
+; CHECK-NEXT: v_mov_b32_e32 v5, -1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <3 x i64>, ptr %arg0.ptr, !range !4, !noundef !{}
+ %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !5, !noundef !{}
+ %ashr = ashr <3 x i64> %val, %shift.amt
+ ret <3 x i64> %ashr
+}
+
+define <4 x i64> @v4_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: v4_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[0:1]
+; CHECK-NEXT: flat_load_dwordx4 v[11:14], v[0:1] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[15:18], v[2:3] offset:16
+; CHECK-NEXT: v_mov_b32_e32 v1, -1
+; CHECK-NEXT: v_mov_b32_e32 v3, -1
+; CHECK-NEXT: v_mov_b32_e32 v5, -1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v7, -1
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v4, v8
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, v6, v10
+; CHECK-NEXT: v_ashrrev_i32_e32 v4, v15, v12
+; CHECK-NEXT: v_ashrrev_i32_e32 v6, v17, v14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <4 x i64>, ptr %arg0.ptr, !range !6, !noundef !{}
+ %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !7, !noundef !{}
+ %ashr = ashr <4 x i64> %val, %shift.amt
+ ret <4 x i64> %ashr
+}
+
+!0 = !{i64 -6000000000, i64 0}
+!1 = !{i64 32, i64 64}
----------------
shiltian wrote:
Can you add a negative tests showing that when the amount is not in the range of `[32,64)`, we don't have this optimization?
https://github.com/llvm/llvm-project/pull/146569
More information about the llvm-commits
mailing list