[llvm] Reduce shl64 to shl32 if shift range is [63-32] (PR #125574)

via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 7 07:33:25 PST 2025


================
@@ -0,0 +1,1318 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+;; Test reduction of:
+;;
+;;   DST = shl i64 X, Y
+;;
+;; where Y is in the range [63-32] to:
+;;
+;;   DST = [0, shl i32 X, (Y & 0x1F)]
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with metadata
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
+;        determine the minimum from metadata in this case.  Match current results
+;        for now.
+
+define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load i64, ptr %arg1.ptr, !range !0
+  %shl = shl i64 %arg0, %shift.amt
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_metadata(<2 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v2_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <2 x i64> %arg0, %shift.amt
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_metadata(<3 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v3_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v12, v[6:7] offset:16
+; CHECK-NEXT:    flat_load_dwordx4 v[8:11], v[6:7]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v12, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <3 x i64> %arg0, %shift.amt
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_metadata(<4 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v4_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[10:13], v[8:9]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[13:16], v[8:9] offset:16
+; CHECK-NEXT:    ; kill: killed $vgpr8 killed $vgpr9
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v13, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v15, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <4 x i64> %arg0, %shift.amt
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_metadata(<5 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v5_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v19, v[10:11] offset:32
+; CHECK-NEXT:    flat_load_dwordx4 v[12:15], v[10:11]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[15:18], v[10:11] offset:16
+; CHECK-NEXT:    ; kill: killed $vgpr10 killed $vgpr11
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v19, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v12, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v14, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v15, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v17, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <5 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <5 x i64> %arg0, %shift.amt
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_metadata(<8 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v8_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[18:21], v[16:17]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v18, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v20, v[2:3]
+; CHECK-NEXT:    flat_load_dwordx4 v[18:21], v[16:17] offset:16
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v18, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v20, v[6:7]
+; CHECK-NEXT:    flat_load_dwordx4 v[18:21], v[16:17] offset:32
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v18, v[8:9]
+; CHECK-NEXT:    flat_load_dwordx4 v[16:19], v[16:17] offset:48
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v20, v[10:11]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v16, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v18, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <8 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <8 x i64> %arg0, %shift.amt
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_metadata(<16 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v16_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:8
+; CHECK-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v32, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v34, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v35, v[4:5]
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49] offset:32
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v37, v[6:7]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:48
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v32, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v34, v[10:11]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v35, v[12:13]
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49] offset:64
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v37, v[14:15]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:80
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v32, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v34, v[18:19]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v35, v[20:21]
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49] offset:96
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v37, v[22:23]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:112
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v32, v[24:25]
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v34, v[26:27]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v35, v[28:29]
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v37, v[30:31]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <16 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <16 x i64> %arg0, %shift.amt
+  ret <16 x i64> %shl
+}
+
+!0 = !{i64 32, i64 64}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 16"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases must not be reduced because the known minimum, 16, is not in range.
+
+define i64 @shl_or16(i64 noundef %arg0, i64 %shift_amt) {
+; CHECK-LABEL: shl_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v2, 16, v2
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 16
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or16(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v2_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v5, 16, v6
+; CHECK-NEXT:    v_or_b32_e32 v4, 16, v4
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v5, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or16(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v3_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v7, 16, v10
+; CHECK-NEXT:    v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT:    v_or_b32_e32 v6, 16, v6
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v7, v[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or16(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v4_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v9, 16, v14
+; CHECK-NEXT:    v_or_b32_e32 v11, 16, v12
+; CHECK-NEXT:    v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT:    v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v11, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v9, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or16(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v5_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v11, 16, v18
+; CHECK-NEXT:    v_or_b32_e32 v13, 16, v16
+; CHECK-NEXT:    v_or_b32_e32 v14, 16, v14
+; CHECK-NEXT:    v_or_b32_e32 v12, 16, v12
+; CHECK-NEXT:    v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v14, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v13, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v11, v[8:9]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or16(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v8_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v16, v[0:1]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v18
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v16, v[2:3]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v20
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v16, v[4:5]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v22
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v16, v[6:7]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v24
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v16, v[8:9]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v26
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v16, v[10:11]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v28
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v16, v[12:13]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v30
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or16(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v16_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v31, v[0:1]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v31, v[2:3]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v31, v[4:5]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v31, v[6:7]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v31, v[8:9]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v31, v[10:11]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v31, v[12:13]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v31, v[14:15]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v31, v[16:17]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:76
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v31, v[18:19]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:84
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v31, v[20:21]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:92
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v31, v[22:23]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:100
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v31, v[24:25]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:108
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v31, v[26:27]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:116
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v31, v[28:29]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:124
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v32, 16, v32
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v32, v[30:31]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+; test inreg
+
+define i64 @shl_or16_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s4, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 16
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or16_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s18, 16
+; CHECK-NEXT:    s_or_b32 s5, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s5, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s4, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or16_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s20, 16
+; CHECK-NEXT:    s_or_b32 s5, s18, 16
+; CHECK-NEXT:    s_or_b32 s6, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s5, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s4, v[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or16_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s22, 16
+; CHECK-NEXT:    s_or_b32 s5, s20, 16
+; CHECK-NEXT:    s_or_b32 s6, s18, 16
+; CHECK-NEXT:    s_or_b32 s7, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s7, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s6, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s5, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s4, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or16_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v5_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s24, 16
+; CHECK-NEXT:    s_or_b32 s5, s22, 16
+; CHECK-NEXT:    s_or_b32 s6, s20, 16
+; CHECK-NEXT:    s_or_b32 s7, s18, 16
+; CHECK-NEXT:    s_or_b32 s8, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s8, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s7, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s6, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s5, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], s4, v[8:9]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or16_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v8_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v16
+; CHECK-NEXT:    s_or_b32 s4, s28, 16
+; CHECK-NEXT:    s_or_b32 s5, s26, 16
+; CHECK-NEXT:    s_or_b32 s6, s24, 16
+; CHECK-NEXT:    s_or_b32 s7, s22, 16
+; CHECK-NEXT:    s_or_b32 s8, s20, 16
+; CHECK-NEXT:    s_or_b32 s9, s18, 16
+; CHECK-NEXT:    s_or_b32 s10, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s9, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s8, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s7, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], s6, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], s5, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], s4, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or16_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v16_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    s_or_b32 s4, s28, 16
+; CHECK-NEXT:    s_or_b32 s5, s26, 16
+; CHECK-NEXT:    s_or_b32 s6, s24, 16
+; CHECK-NEXT:    s_or_b32 s7, s22, 16
+; CHECK-NEXT:    s_or_b32 s8, s20, 16
+; CHECK-NEXT:    s_or_b32 s9, s18, 16
+; CHECK-NEXT:    s_or_b32 s10, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s9, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s8, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s7, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], s6, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], s5, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], s4, v[12:13]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v31, v[14:15]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v31, v[16:17]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v31, v[18:19]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v31, v[20:21]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v31, v[22:23]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v31, v[24:25]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v31, v[26:27]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v31, v[28:29]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v32, 16, v32
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v32, v[30:31]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 32"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases are reduced because computeKnownBits() can calculate a minimum of 32
+; based on the OR with 32.
+
+define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
+; CHECK-LABEL: shl_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v2, v0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 32
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v2_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v4, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v6, v2
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v3_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v6, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v8, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v10, v4
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v4_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v8, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v10, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v12, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v14, v6
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v5_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v10, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v12, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v14, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v16, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v18, v8
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v8_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v22, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v24, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v26, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v28, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v30, v14
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v16_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v3, v2
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v5, v4
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v7, v6
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v2, v10
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:76
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v4, v12
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:84
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v6, v14
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:92
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v0, v16
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:100
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v2, v18
+; CHECK-NEXT:    v_mov_b32_e32 v16, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v4, v20
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:116
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:108
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v6, v22
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:124
+; CHECK-NEXT:    v_mov_b32_e32 v18, 0
+; CHECK-NEXT:    v_mov_b32_e32 v20, 0
+; CHECK-NEXT:    v_mov_b32_e32 v22, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v0, v24
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v24, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v6, v30
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v26, 0
+; CHECK-NEXT:    v_mov_b32_e32 v28, 0
+; CHECK-NEXT:    v_mov_b32_e32 v30, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+; test inreg
+
+define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 32
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v5_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v8_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s26, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s28, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v16, v14
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v16_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s26, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s28, v12
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v1, v14
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v3, v16
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v5, v18
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v7, v20
+; CHECK-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v1, v22
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v14, v24
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v16, v28
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v18, v26
+; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v20, v30
+; CHECK-NEXT:    v_mov_b32_e32 v16, 0
+; CHECK-NEXT:    v_mov_b32_e32 v18, 0
+; CHECK-NEXT:    v_mov_b32_e32 v20, 0
+; CHECK-NEXT:    v_mov_b32_e32 v22, 0
+; CHECK-NEXT:    v_mov_b32_e32 v24, 0
+; CHECK-NEXT:    v_mov_b32_e32 v26, 0
+; CHECK-NEXT:    v_mov_b32_e32 v28, 0
+; CHECK-NEXT:    v_mov_b32_e32 v30, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range from max/min
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; FIXME: This case should be reduced too, but computeKnownBits() cannot
+;        determine the range.  Match current results for now.
+
+define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
+; CHECK-LABEL: shl_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v2, 32, v2, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v2, 63, v2, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call i64 @llvm.umax.i64(i64 %arg1, i64 32)
+  %min = call i64 @llvm.umin.i64(i64 %max,  i64 63)
+  %shl = shl i64 %arg0, %min
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_maxmin(<2 x i64> noundef %arg0, <2 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v2_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, 32, v4, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 32, v6, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 63, v6, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, 63, v4, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <2 x i64> @llvm.umax.i64(<2 x i64> %arg1, <2 x i64> splat (i64 32))
+  %min = call <2 x i64> @llvm.umin.i64(<2 x i64> %max,  <2 x i64> splat (i64 63))
+  %shl = shl <2 x i64> %arg0, %min
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_maxmin(<3 x i64> noundef %arg0, <3 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v3_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 32, v6, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 32, v8, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v10, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 63, v8, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 63, v6, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <3 x i64> @llvm.umax.i64(<3 x i64> %arg1, <3 x i64> splat (i64 32))
+  %min = call <3 x i64> @llvm.umin.i64(<3 x i64> %max,  <3 x i64> splat (i64 63))
+  %shl = shl <3 x i64> %arg0, %min
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_maxmin(<4 x i64> noundef %arg0, <4 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v4_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 32, v8, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[12:13]
+; CHECK-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[14:15]
+; CHECK-NEXT:    v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 32, v14, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[14:15]
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v14, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 63, v12, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v12, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 63, v8, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <4 x i64> @llvm.umax.i64(<4 x i64> %arg1, <4 x i64> splat (i64 32))
+  %min = call <4 x i64> @llvm.umin.i64(<4 x i64> %max,  <4 x i64> splat (i64 63))
+  %shl = shl <4 x i64> %arg0, %min
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_maxmin(<5 x i64> noundef %arg0, <5 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v5_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[12:13]
+; CHECK-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[14:15]
+; CHECK-NEXT:    v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 32, v14, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[16:17]
+; CHECK-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 32, v16, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[18:19]
+; CHECK-NEXT:    v_cndmask_b32_e32 v19, 0, v19, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 32, v18, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[18:19]
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 63, v18, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v18, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[14:15]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v16, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v14, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 63, v12, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <5 x i64> @llvm.umax.i64(<5 x i64> %arg1, <5 x i64> splat (i64 32))
+  %min = call <5 x i64> @llvm.umin.i64(<5 x i64> %max,  <5 x i64> splat (i64 63))
+  %shl = shl <5 x i64> %arg0, %min
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_maxmin(<8 x i64> noundef %arg0, <8 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v8_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[16:17]
+; CHECK-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 32, v16, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[18:19]
+; CHECK-NEXT:    v_cndmask_b32_e32 v19, 0, v19, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 32, v18, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[20:21]
+; CHECK-NEXT:    v_cndmask_b32_e32 v21, 0, v21, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v20, 32, v20, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[22:23]
+; CHECK-NEXT:    v_cndmask_b32_e32 v23, 0, v23, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v22, 32, v22, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[24:25]
+; CHECK-NEXT:    v_cndmask_b32_e32 v25, 0, v25, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v24, 32, v24, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[26:27]
+; CHECK-NEXT:    v_cndmask_b32_e32 v27, 0, v27, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v26, 32, v26, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[28:29]
+; CHECK-NEXT:    v_cndmask_b32_e32 v29, 0, v29, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v28, 32, v28, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[28:29]
+; CHECK-NEXT:    v_cndmask_b32_e32 v28, 63, v28, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[26:27]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v28, v[12:13]
+; CHECK-NEXT:    v_cndmask_b32_e32 v26, 63, v26, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[24:25]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v26, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v24, 63, v24, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[22:23]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v24, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v22, 63, v22, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[20:21]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v22, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v20, 63, v20, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[18:19]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v20, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 63, v18, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v18, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v16, v[0:1]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[30:31]
+; CHECK-NEXT:    v_cndmask_b32_e32 v17, 0, v31, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 32, v30, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <8 x i64> @llvm.umax.i64(<8 x i64> %arg1, <8 x i64> splat (i64 32))
+  %min = call <8 x i64> @llvm.umin.i64(<8 x i64> %max,  <8 x i64> splat (i64 63))
+  %shl = shl <8 x i64> %arg0, %min
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_maxmin(<16 x i64> noundef %arg0, <16 x i64> noundef %arg1) {
----------------
LU-JOHN wrote:

Removed

https://github.com/llvm/llvm-project/pull/125574


More information about the llvm-commits mailing list