[llvm] [AMDGPU] Convert more 64-bit lshr to 32-bit if shift amt>=32 (PR #138204)

via llvm-commits llvm-commits at lists.llvm.org
Fri May 2 11:12:19 PDT 2025


================
@@ -0,0 +1,561 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+;; Test reduction of:
+;;
+;;   DST = lshr i64 X, Y
+;;
+;; where Y is in the range [63-32] to:
+;;
+;;   DST = [srl i32 X, (Y & 0x1F), 0]
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with metadata
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define i64 @srl_metadata(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v0, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b32_e32 v0, v0, v1
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
+  %srl = lshr i64 %arg0, %shift.amt
+  ret i64 %srl
+}
+
+; Shifted bits matter for exact shift.  Reduction must not be done.
+define i64 @srl_exact_metadata(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_exact_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load i64, ptr %arg1.ptr, !range !0, !noundef !{}
+  %srl = lshr exact i64 %arg0, %shift.amt
+  ret i64 %srl
+}
+
+define i64 @srl_metadata_two_ranges(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_metadata_two_ranges:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v0, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b32_e32 v0, v0, v1
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load i64, ptr %arg1.ptr, !range !1, !noundef !{}
+  %srl = lshr i64 %arg0, %shift.amt
+  ret i64 %srl
+}
+
+; Known minimum is too low.  Reduction must not be done.
+define i64 @srl_metadata_out_of_range(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_metadata_out_of_range:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load i64, ptr %arg1.ptr, !range !2, !noundef !{}
+  %srl = lshr i64 %arg0, %shift.amt
+  ret i64 %srl
+}
+
+; Bounds cannot be truncated to i32 when load is narrowed to i32.
+; Reduction must not be done.
+; Bounds were chosen so that if bounds were truncated to i32 the
+; known minimum would be 32 and the srl would be erroneously reduced.
+define i64 @srl_metadata_cant_be_narrowed_to_i32(i64 %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_metadata_cant_be_narrowed_to_i32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load i64, ptr %arg1.ptr, !range !3, !noundef !{}
+  %srl = lshr i64 %arg0, %shift.amt
+  ret i64 %srl
+}
+
+define <2 x i64> @srl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_v2_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b32_e32 v0, v4, v1
+; CHECK-NEXT:    v_lshrrev_b32_e32 v2, v6, v3
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
+  %srl = lshr <2 x i64> %arg0, %shift.amt
+  ret <2 x i64> %srl
+}
+
+; Shifted bits matter for exact shift.  Reduction must not be done.
+define <2 x i64> @srl_exact_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_exact_v2_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    v_lshrrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
+  %srl = lshr exact <2 x i64> %arg0, %shift.amt
+  ret <2 x i64> %srl
+}
+
+define <3 x i64> @srl_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_v3_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v0, v[6:7] offset:16
+; CHECK-NEXT:    flat_load_dwordx4 v[8:11], v[6:7]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b32_e32 v4, v0, v5
+; CHECK-NEXT:    v_lshrrev_b32_e32 v0, v8, v1
+; CHECK-NEXT:    v_lshrrev_b32_e32 v2, v10, v3
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    v_mov_b32_e32 v5, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
+  %srl = lshr <3 x i64> %arg0, %shift.amt
+  ret <3 x i64> %srl
+}
+
+define <4 x i64> @srl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: srl_v4_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[10:13], v[8:9]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[13:16], v[8:9] offset:16
+; CHECK-NEXT:    ; kill: killed $vgpr8 killed $vgpr9
+; CHECK-NEXT:    v_lshrrev_b32_e32 v0, v10, v1
+; CHECK-NEXT:    v_lshrrev_b32_e32 v2, v12, v3
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshrrev_b32_e32 v4, v13, v5
+; CHECK-NEXT:    v_lshrrev_b32_e32 v6, v15, v7
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    v_mov_b32_e32 v5, 0
+; CHECK-NEXT:    v_mov_b32_e32 v7, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
+  %srl = lshr <4 x i64> %arg0, %shift.amt
+  ret <4 x i64> %srl
+}
+
+!0 = !{i64 32, i64 64}
+!1 = !{i64 32, i64 38, i64 42, i64 48}
+!2 = !{i64 31, i64 38, i64 42, i64 48}
+!3 = !{i64 32, i64 38, i64 2147483680, i64 2147483681}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 16"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases must not be reduced because the known minimum, 16, is not in range.
+
+define i64 @srl_or16(i64 %arg0, i64 %shift_amt) {
+; CHECK-LABEL: srl_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v2, 16, v2
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 16
+  %srl = lshr i64 %arg0, %or
+  ret i64 %srl
+}
+
+define <2 x i64> @srl_v2_or16(<2 x i64> %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: srl_v2_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v5, 16, v6
+; CHECK-NEXT:    v_or_b32_e32 v4, 16, v4
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    v_lshrrev_b64 v[2:3], v5, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 16)
+  %srl = lshr <2 x i64> %arg0, %or
+  ret <2 x i64> %srl
+}
+
+define <3 x i64> @srl_v3_or16(<3 x i64> %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: srl_v3_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v7, 16, v10
+; CHECK-NEXT:    v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT:    v_or_b32_e32 v6, 16, v6
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT:    v_lshrrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT:    v_lshrrev_b64 v[4:5], v7, v[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 16)
+  %srl = lshr <3 x i64> %arg0, %or
+  ret <3 x i64> %srl
+}
+
+define <4 x i64> @srl_v4_or16(<4 x i64> %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: srl_v4_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v9, 16, v14
+; CHECK-NEXT:    v_or_b32_e32 v11, 16, v12
+; CHECK-NEXT:    v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT:    v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    v_lshrrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    v_lshrrev_b64 v[4:5], v11, v[4:5]
+; CHECK-NEXT:    v_lshrrev_b64 v[6:7], v9, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 16)
+  %srl = lshr <4 x i64> %arg0, %or
+  ret <4 x i64> %srl
+}
+
+; test SGPR
+
+define i64 @srl_or16_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
----------------
LU-JOHN wrote:

Pre-commit in https://github.com/llvm/llvm-project/pull/138281 has tests with sgpr return.

https://github.com/llvm/llvm-project/pull/138204


More information about the llvm-commits mailing list