[llvm] Reduce shl64 to shl32 if shift range is [63-32] (PR #125574)

via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 6 13:54:17 PST 2025


https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/125574

>From bcf4934e5401052ea97a95b34e8fcc654b1c47d0 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 5 Feb 2025 11:01:30 -0600
Subject: [PATCH 1/4] Reduce shl64 to shl32 if shift range is [63-32]

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 32 ++++++---
 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll      | 67 +++++++++++++++++++
 2 files changed, 91 insertions(+), 8 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 792e17eeedab141..d7c004e1308c7f6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4040,19 +4040,35 @@ SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
   EVT VT = N->getValueType(0);
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
+  SDLoc SL(N);
+  SelectionDAG &DAG = DCI.DAG;
 
-  ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
-  if (!RHS)
+  if (!CRHS) {
+    // shl i64 X, Y -> [0, shl i32 X, (Y - 32)]
+    if (VT == MVT::i64) {
+      KnownBits Known = DAG.computeKnownBits(RHS);
+      if (Known.getMinValue().getZExtValue() >= 32) {
+        SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, RHS);
+        const SDValue C32 = DAG.getConstant(32, SL, MVT::i32);
+        SDValue ShiftAmt =
+            DAG.getNode(ISD::SUB, SL, MVT::i32, truncShiftAmt, C32);
+        SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
+        SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
+        const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+        SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
+        return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+      }
+    }
     return SDValue();
+  }
 
-  SDValue LHS = N->getOperand(0);
-  unsigned RHSVal = RHS->getZExtValue();
+  unsigned RHSVal = CRHS->getZExtValue();
   if (!RHSVal)
     return LHS;
 
-  SDLoc SL(N);
-  SelectionDAG &DAG = DCI.DAG;
-
   switch (LHS->getOpcode()) {
   default:
     break;
@@ -4078,7 +4094,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
     if (LZ < RHSVal)
       break;
     EVT XVT = X.getValueType();
-    SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
+    SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(CRHS, 0));
     return DAG.getZExtOrTrunc(Shl, SL, VT);
   }
   }
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
new file mode 100644
index 000000000000000..d18dc2bf3731011
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -0,0 +1,67 @@
+;; Test reduction of:
+;;
+;;   DST = shl i64 X, Y
+;;
+;; where Y is in the range [63-32] to:
+;;
+;;   DST = [0, shl i32 X, (Y - 32)]
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck %s
+
+; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
+;        determine the minimum from metadata in this case.  Match current results
+;        for now.
+define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
+  %shift.amt = load i64, ptr %arg1.ptr, !range !0
+  %shl = shl i64 %arg0, %shift.amt
+  ret i64 %shl
+
+; CHECK: .globl  shl_metadata
+; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
+}
+
+!0 = !{i64 32, i64 64}
+
+; This case is reduced because computeKnownBits() can calculates a minimum of 32
+; based on the OR with 32.
+define i64 @shl_or32(i64 noundef %arg0, ptr %arg1.ptr) {
+  %shift.amt = load i64, ptr %arg1.ptr
+  %or = or i64 %shift.amt, 32
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+
+; CHECK: .globl  shl_or32
+; CHECK: v_or_b32_e32 v1, 32, v1
+; CHECK: v_subrev_i32_e32 v1, vcc, 32, v1
+; CHECK: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK: v_mov_b32_e32 v0, 0
+}
+
+; This case must not be reduced because the known minimum, 16, is not in range.
+define i64 @shl_or16(i64 noundef %arg0, ptr %arg1.ptr) {
+  %shift.amt = load i64, ptr %arg1.ptr
+  %or = or i64 %shift.amt, 16
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+
+; CHECK: .globl  shl_or16
+; CHECK: v_or_b32_e32 v2, 16, v2
+; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
+}
+
+; FIXME: This case should be reduced too, but computeKnownBits() cannot
+;        determine the range.  Match current results for now.
+define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
+  %max = call i64 @llvm.umax.i64(i64 %arg1, i64 32)
+  %min = call i64 @llvm.umin.i64(i64 %max,  i64 63)
+  %shl = shl i64 %arg0, %min
+  ret i64 %shl
+
+; CHECK: .globl  shl_maxmin
+; CHECK: v_cmp_lt_u64_e32 vcc, 32, v[2:3]
+; CHECK: v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK: v_cndmask_b32_e32 v2, 32, v2, vcc
+; CHECK: v_cmp_gt_u64_e32 vcc, 63, v[2:3]
+; CHECK: v_cndmask_b32_e32 v2, 63, v2, vcc
+; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
+}

>From dafda9e2430cb4108608019ac884d97b5b5671eb Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 6 Feb 2025 09:06:16 -0600
Subject: [PATCH 2/4] Use explicit cpu and update_llc_test_checks

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 61 +++++++++++++++---------
 1 file changed, 38 insertions(+), 23 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index d18dc2bf3731011..26cd04082cf4292 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ;; Test reduction of:
 ;;
 ;;   DST = shl i64 X, Y
@@ -6,62 +7,76 @@
 ;;
 ;;   DST = [0, shl i32 X, (Y - 32)]
 
-; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
 
 ; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
 ;        determine the minimum from metadata in this case.  Match current results
 ;        for now.
 define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load i64, ptr %arg1.ptr, !range !0
   %shl = shl i64 %arg0, %shift.amt
   ret i64 %shl
-
-; CHECK: .globl  shl_metadata
-; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
 }
 
 !0 = !{i64 32, i64 64}
 
-; This case is reduced because computeKnownBits() can calculates a minimum of 32
+; This case is reduced because computeKnownBits() can calculate a minimum of 32
 ; based on the OR with 32.
 define i64 @shl_or32(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v1, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load i64, ptr %arg1.ptr
   %or = or i64 %shift.amt, 32
   %shl = shl i64 %arg0, %or
   ret i64 %shl
-
-; CHECK: .globl  shl_or32
-; CHECK: v_or_b32_e32 v1, 32, v1
-; CHECK: v_subrev_i32_e32 v1, vcc, 32, v1
-; CHECK: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK: v_mov_b32_e32 v0, 0
 }
 
 ; This case must not be reduced because the known minimum, 16, is not in range.
 define i64 @shl_or16(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v2, 16, v2
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load i64, ptr %arg1.ptr
   %or = or i64 %shift.amt, 16
   %shl = shl i64 %arg0, %or
   ret i64 %shl
-
-; CHECK: .globl  shl_or16
-; CHECK: v_or_b32_e32 v2, 16, v2
-; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
 }
 
 ; FIXME: This case should be reduced too, but computeKnownBits() cannot
 ;        determine the range.  Match current results for now.
 define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
+; CHECK-LABEL: shl_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v2, 32, v2, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v2, 63, v2, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %max = call i64 @llvm.umax.i64(i64 %arg1, i64 32)
   %min = call i64 @llvm.umin.i64(i64 %max,  i64 63)
   %shl = shl i64 %arg0, %min
   ret i64 %shl
-
-; CHECK: .globl  shl_maxmin
-; CHECK: v_cmp_lt_u64_e32 vcc, 32, v[2:3]
-; CHECK: v_cndmask_b32_e32 v3, 0, v3, vcc
-; CHECK: v_cndmask_b32_e32 v2, 32, v2, vcc
-; CHECK: v_cmp_gt_u64_e32 vcc, 63, v[2:3]
-; CHECK: v_cndmask_b32_e32 v2, 63, v2, vcc
-; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
 }

>From 5a0b7f5f4d7feded3bb1fe2a59510b7bd451b3cd Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 6 Feb 2025 14:01:49 -0600
Subject: [PATCH 3/4] Test vector and inreg variations

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 1421 +++++++++++++++++++++-
 1 file changed, 1405 insertions(+), 16 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 26cd04082cf4292..a49694d8da5a839 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -9,9 +9,14 @@
 
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with metadata
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
 ; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
 ;        determine the minimum from metadata in this case.  Match current results
 ;        for now.
+
 define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
 ; CHECK-LABEL: shl_metadata:
 ; CHECK:       ; %bb.0:
@@ -25,45 +30,1020 @@ define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
   ret i64 %shl
 }
 
+define <2 x i64> @shl_v2_metadata(<2 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v2_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <2 x i64> %arg0, %shift.amt
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_metadata(<3 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v3_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v12, v[6:7] offset:16
+; CHECK-NEXT:    flat_load_dwordx4 v[8:11], v[6:7]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v12, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <3 x i64> %arg0, %shift.amt
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_metadata(<4 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v4_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[10:13], v[8:9]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[13:16], v[8:9] offset:16
+; CHECK-NEXT:    ; kill: killed $vgpr8 killed $vgpr9
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v13, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v15, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <4 x i64> %arg0, %shift.amt
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_metadata(<5 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v5_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v19, v[10:11] offset:32
+; CHECK-NEXT:    flat_load_dwordx4 v[12:15], v[10:11]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[15:18], v[10:11] offset:16
+; CHECK-NEXT:    ; kill: killed $vgpr10 killed $vgpr11
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v19, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v12, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v14, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v15, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v17, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <5 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <5 x i64> %arg0, %shift.amt
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_metadata(<8 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v8_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[18:21], v[16:17]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v18, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v20, v[2:3]
+; CHECK-NEXT:    flat_load_dwordx4 v[18:21], v[16:17] offset:16
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v18, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v20, v[6:7]
+; CHECK-NEXT:    flat_load_dwordx4 v[18:21], v[16:17] offset:32
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v18, v[8:9]
+; CHECK-NEXT:    flat_load_dwordx4 v[16:19], v[16:17] offset:48
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v20, v[10:11]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v16, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v18, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <8 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <8 x i64> %arg0, %shift.amt
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_metadata(<16 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v16_metadata:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:8
+; CHECK-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v32, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v34, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v35, v[4:5]
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49] offset:32
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v37, v[6:7]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:48
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v32, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v34, v[10:11]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v35, v[12:13]
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49] offset:64
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v37, v[14:15]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:80
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v32, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v34, v[18:19]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v35, v[20:21]
+; CHECK-NEXT:    flat_load_dwordx4 v[32:35], v[48:49] offset:96
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v37, v[22:23]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[35:38], v[48:49] offset:112
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v32, v[24:25]
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v34, v[26:27]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v35, v[28:29]
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v37, v[30:31]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %shift.amt = load <16 x i64>, ptr %arg1.ptr, !range !0
+  %shl = shl <16 x i64> %arg0, %shift.amt
+  ret <16 x i64> %shl
+}
+
 !0 = !{i64 32, i64 64}
 
-; This case is reduced because computeKnownBits() can calculate a minimum of 32
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 16"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases must not be reduced because the known minimum, 16, is not in range.
+
+define i64 @shl_or16(i64 noundef %arg0, i64 %shift_amt) {
+; CHECK-LABEL: shl_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v2, 16, v2
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 16
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or16(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v2_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v5, 16, v6
+; CHECK-NEXT:    v_or_b32_e32 v4, 16, v4
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v5, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or16(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v3_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v7, 16, v10
+; CHECK-NEXT:    v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT:    v_or_b32_e32 v6, 16, v6
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v7, v[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or16(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v4_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v9, 16, v14
+; CHECK-NEXT:    v_or_b32_e32 v11, 16, v12
+; CHECK-NEXT:    v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT:    v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v11, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v9, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or16(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v5_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v11, 16, v18
+; CHECK-NEXT:    v_or_b32_e32 v13, 16, v16
+; CHECK-NEXT:    v_or_b32_e32 v14, 16, v14
+; CHECK-NEXT:    v_or_b32_e32 v12, 16, v12
+; CHECK-NEXT:    v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v14, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v13, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v11, v[8:9]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or16(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v8_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v16, v[0:1]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v18
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v16, v[2:3]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v20
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v16, v[4:5]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v22
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v16, v[6:7]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v24
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v16, v[8:9]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v26
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v16, v[10:11]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v28
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v16, v[12:13]
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v30
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or16(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v16_or16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v31, v[0:1]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v31, v[2:3]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v31, v[4:5]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v31, v[6:7]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v31, v[8:9]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v31, v[10:11]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v31, v[12:13]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v31, v[14:15]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v31, v[16:17]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:76
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v31, v[18:19]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:84
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v31, v[20:21]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:92
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v31, v[22:23]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:100
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v31, v[24:25]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:108
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v31, v[26:27]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:116
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v31, v[28:29]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:124
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v32, 16, v32
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v32, v[30:31]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+; test inreg
+
+define i64 @shl_or16_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s4, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 16
+  %shl = shl i64 %arg0, %or
+  ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or16_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s18, 16
+; CHECK-NEXT:    s_or_b32 s5, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s5, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s4, v[2:3]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or16_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s20, 16
+; CHECK-NEXT:    s_or_b32 s5, s18, 16
+; CHECK-NEXT:    s_or_b32 s6, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s6, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s5, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s4, v[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or16_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s22, 16
+; CHECK-NEXT:    s_or_b32 s5, s20, 16
+; CHECK-NEXT:    s_or_b32 s6, s18, 16
+; CHECK-NEXT:    s_or_b32 s7, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s7, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s6, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s5, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s4, v[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or16_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v5_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s24, 16
+; CHECK-NEXT:    s_or_b32 s5, s22, 16
+; CHECK-NEXT:    s_or_b32 s6, s20, 16
+; CHECK-NEXT:    s_or_b32 s7, s18, 16
+; CHECK-NEXT:    s_or_b32 s8, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s8, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s7, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s6, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s5, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], s4, v[8:9]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or16_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v8_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v16, 16, v16
+; CHECK-NEXT:    s_or_b32 s4, s28, 16
+; CHECK-NEXT:    s_or_b32 s5, s26, 16
+; CHECK-NEXT:    s_or_b32 s6, s24, 16
+; CHECK-NEXT:    s_or_b32 s7, s22, 16
+; CHECK-NEXT:    s_or_b32 s8, s20, 16
+; CHECK-NEXT:    s_or_b32 s9, s18, 16
+; CHECK-NEXT:    s_or_b32 s10, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s9, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s8, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s7, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], s6, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], s5, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], s4, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or16_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v16_or16_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    s_or_b32 s4, s28, 16
+; CHECK-NEXT:    s_or_b32 s5, s26, 16
+; CHECK-NEXT:    s_or_b32 s6, s24, 16
+; CHECK-NEXT:    s_or_b32 s7, s22, 16
+; CHECK-NEXT:    s_or_b32 s8, s20, 16
+; CHECK-NEXT:    s_or_b32 s9, s18, 16
+; CHECK-NEXT:    s_or_b32 s10, s16, 16
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], s10, v[0:1]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], s9, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], s8, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], s7, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], s6, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], s5, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], s4, v[12:13]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v31, v[14:15]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v31, v[16:17]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v31, v[18:19]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v31, v[20:21]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v31, v[22:23]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v31, v[24:25]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v31, v[26:27]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v31, v[28:29]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v32, 16, v32
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v32, v[30:31]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 16)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 32"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases are reduced because computeKnownBits() can calculate a minimum of 32
 ; based on the OR with 32.
-define i64 @shl_or32(i64 noundef %arg0, ptr %arg1.ptr) {
+
+define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
 ; CHECK-LABEL: shl_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v1, v[2:3]
-; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v2
 ; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
-  %shift.amt = load i64, ptr %arg1.ptr
-  %or = or i64 %shift.amt, 32
+  %or = or i64 %shift_amt, 32
   %shl = shl i64 %arg0, %or
   ret i64 %shl
 }
 
-; This case must not be reduced because the known minimum, 16, is not in range.
-define i64 @shl_or16(i64 noundef %arg0, ptr %arg1.ptr) {
-; CHECK-LABEL: shl_or16:
+define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v2_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v2, v[2:3]
-; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v2, 16, v2
-; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v4
+; CHECK-NEXT:    v_or_b32_e32 v3, 32, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v3_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v6
+; CHECK-NEXT:    v_or_b32_e32 v3, 32, v8
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_or_b32_e32 v5, 32, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v4_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v8
+; CHECK-NEXT:    v_or_b32_e32 v3, 32, v10
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_or_b32_e32 v5, 32, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT:    v_or_b32_e32 v7, 32, v14
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v7
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v5_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v10
+; CHECK-NEXT:    v_or_b32_e32 v3, 32, v12
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_or_b32_e32 v5, 32, v14
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT:    v_or_b32_e32 v7, 32, v16
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT:    v_or_b32_e32 v9, 32, v18
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v7
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v8_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v16
+; CHECK-NEXT:    v_or_b32_e32 v3, 32, v18
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_or_b32_e32 v5, 32, v20
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT:    v_or_b32_e32 v7, 32, v22
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT:    v_or_b32_e32 v9, 32, v24
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v7
+; CHECK-NEXT:    v_or_b32_e32 v11, 32, v26
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v9
+; CHECK-NEXT:    v_or_b32_e32 v13, 32, v28
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v11
+; CHECK-NEXT:    v_or_b32_e32 v15, 32, v30
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v0, v10
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v13
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v0, v12
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v15
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
-  %shift.amt = load i64, ptr %arg1.ptr
-  %or = or i64 %shift.amt, 16
+  %or = or <8 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v16_or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    s_waitcnt vmcnt(7)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(6)
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v3
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    s_waitcnt vmcnt(5)
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v5
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v7
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_or_b32_e32 v2, 32, v11
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_or_b32_e32 v4, 32, v13
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v6, 32, v15
+; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v6, v8
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v4, v10
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:76
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v2, v12
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:84
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:92
+; CHECK-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:124
+; CHECK-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:116
+; CHECK-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:108
+; CHECK-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:100
+; CHECK-NEXT:    s_waitcnt vmcnt(7)
+; CHECK-NEXT:    v_or_b32_e32 v6, 32, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT:    s_waitcnt vmcnt(6)
+; CHECK-NEXT:    v_or_b32_e32 v4, 32, v4
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v0
+; CHECK-NEXT:    v_or_b32_e32 v2, 32, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v6, v16
+; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v4, v18
+; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v2, v20
+; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v0, v22
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v8
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_or_b32_e32 v2, 32, v10
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_or_b32_e32 v4, 32, v12
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v6, 32, v14
+; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v6, v24
+; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v0, v30
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    v_mov_b32_e32 v16, 0
+; CHECK-NEXT:    v_mov_b32_e32 v18, 0
+; CHECK-NEXT:    v_mov_b32_e32 v20, 0
+; CHECK-NEXT:    v_mov_b32_e32 v22, 0
+; CHECK-NEXT:    v_mov_b32_e32 v24, 0
+; CHECK-NEXT:    v_mov_b32_e32 v26, 0
+; CHECK-NEXT:    v_mov_b32_e32 v28, 0
+; CHECK-NEXT:    v_mov_b32_e32 v30, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+; test inreg
+
+define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s16, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s4, v0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or i64 %shift_amt, 32
   %shl = shl i64 %arg0, %or
   ret i64 %shl
 }
 
+define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s18, 32
+; CHECK-NEXT:    s_or_b32 s5, s16, 32
+; CHECK-NEXT:    s_sub_i32 s5, s5, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s5, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s4, v2
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <2 x i64> %arg0, %or
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s20, 32
+; CHECK-NEXT:    s_or_b32 s5, s18, 32
+; CHECK-NEXT:    s_or_b32 s6, s16, 32
+; CHECK-NEXT:    s_sub_i32 s6, s6, 32
+; CHECK-NEXT:    s_sub_i32 s5, s5, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s6, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s5, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s4, v4
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <3 x i64> %arg0, %or
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s22, 32
+; CHECK-NEXT:    s_or_b32 s5, s20, 32
+; CHECK-NEXT:    s_or_b32 s6, s18, 32
+; CHECK-NEXT:    s_or_b32 s7, s16, 32
+; CHECK-NEXT:    s_sub_i32 s7, s7, 32
+; CHECK-NEXT:    s_sub_i32 s6, s6, 32
+; CHECK-NEXT:    s_sub_i32 s5, s5, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s7, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s6, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s5, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s4, v6
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <4 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <4 x i64> %arg0, %or
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v5_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s4, s24, 32
+; CHECK-NEXT:    s_or_b32 s5, s22, 32
+; CHECK-NEXT:    s_or_b32 s6, s20, 32
+; CHECK-NEXT:    s_or_b32 s7, s18, 32
+; CHECK-NEXT:    s_or_b32 s8, s16, 32
+; CHECK-NEXT:    s_sub_i32 s8, s8, 32
+; CHECK-NEXT:    s_sub_i32 s7, s7, 32
+; CHECK-NEXT:    s_sub_i32 s6, s6, 32
+; CHECK-NEXT:    s_sub_i32 s5, s5, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s8, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s7, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s6, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s5, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s4, v8
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <5 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <5 x i64> %arg0, %or
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v8_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_b32 s10, s16, 32
+; CHECK-NEXT:    v_or_b32_e32 v15, 32, v16
+; CHECK-NEXT:    s_or_b32 s4, s28, 32
+; CHECK-NEXT:    s_or_b32 s5, s26, 32
+; CHECK-NEXT:    s_or_b32 s6, s24, 32
+; CHECK-NEXT:    s_or_b32 s7, s22, 32
+; CHECK-NEXT:    s_or_b32 s8, s20, 32
+; CHECK-NEXT:    s_or_b32 s9, s18, 32
+; CHECK-NEXT:    s_sub_i32 s10, s10, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s10, v0
+; CHECK-NEXT:    s_sub_i32 s9, s9, 32
+; CHECK-NEXT:    s_sub_i32 s8, s8, 32
+; CHECK-NEXT:    s_sub_i32 s7, s7, 32
+; CHECK-NEXT:    s_sub_i32 s6, s6, 32
+; CHECK-NEXT:    s_sub_i32 s5, s5, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v15
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s9, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s8, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s7, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s6, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s5, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s4, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <8 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <8 x i64> %arg0, %or
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v16_or32_inreg:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_or_b32 s7, s22, 32
+; CHECK-NEXT:    s_or_b32 s8, s20, 32
+; CHECK-NEXT:    s_or_b32 s9, s18, 32
+; CHECK-NEXT:    s_or_b32 s10, s16, 32
+; CHECK-NEXT:    s_sub_i32 s10, s10, 32
+; CHECK-NEXT:    s_sub_i32 s9, s9, 32
+; CHECK-NEXT:    s_sub_i32 s8, s8, 32
+; CHECK-NEXT:    s_sub_i32 s7, s7, 32
+; CHECK-NEXT:    s_or_b32 s4, s28, 32
+; CHECK-NEXT:    s_or_b32 s5, s26, 32
+; CHECK-NEXT:    s_or_b32 s6, s24, 32
+; CHECK-NEXT:    s_sub_i32 s6, s6, 32
+; CHECK-NEXT:    s_sub_i32 s5, s5, 32
+; CHECK-NEXT:    s_sub_i32 s4, s4, 32
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s5, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s4, v12
+; CHECK-NEXT:    v_mov_b32_e32 v10, 0
+; CHECK-NEXT:    v_mov_b32_e32 v12, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(7)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v1, v14
+; CHECK-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(7)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v3
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v1, v16
+; CHECK-NEXT:    s_waitcnt vmcnt(6)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v5
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v1, v18
+; CHECK-NEXT:    s_waitcnt vmcnt(5)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v7
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v1, v20
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_or_b32_e32 v1, 32, v9
+; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v1, v22
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s10, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s9, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s8, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s7, v6
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_or_b32_e32 v2, 32, v29
+; CHECK-NEXT:    v_or_b32_e32 v4, 32, v27
+; CHECK-NEXT:    v_or_b32_e32 v6, 32, v25
+; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s6, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v6, v24
+; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    v_mov_b32_e32 v16, 0
+; CHECK-NEXT:    v_mov_b32_e32 v18, 0
+; CHECK-NEXT:    v_mov_b32_e32 v20, 0
+; CHECK-NEXT:    v_mov_b32_e32 v22, 0
+; CHECK-NEXT:    v_mov_b32_e32 v24, 0
+; CHECK-NEXT:    v_mov_b32_e32 v26, 0
+; CHECK-NEXT:    v_mov_b32_e32 v28, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v0, 32, v14
+; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v0, v30
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    v_mov_b32_e32 v30, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <16 x i64> %shift_amt, splat (i64 32)
+  %shl = shl <16 x i64> %arg0, %or
+  ret <16 x i64> %shl
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range from max/min
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
 ; FIXME: This case should be reduced too, but computeKnownBits() cannot
 ;        determine the range.  Match current results for now.
+
 define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
 ; CHECK-LABEL: shl_maxmin:
 ; CHECK:       ; %bb.0:
@@ -80,3 +1060,412 @@ define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
   %shl = shl i64 %arg0, %min
   ret i64 %shl
 }
+
+define <2 x i64> @shl_v2_maxmin(<2 x i64> noundef %arg0, <2 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v2_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, 32, v4, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 32, v6, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 63, v6, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, 63, v4, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <2 x i64> @llvm.umax.i64(<2 x i64> %arg1, <2 x i64> splat (i64 32))
+  %min = call <2 x i64> @llvm.umin.i64(<2 x i64> %max,  <2 x i64> splat (i64 63))
+  %shl = shl <2 x i64> %arg0, %min
+  ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_maxmin(<3 x i64> noundef %arg0, <3 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v3_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 32, v6, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 32, v8, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v10, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 63, v8, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v6, 63, v6, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <3 x i64> @llvm.umax.i64(<3 x i64> %arg1, <3 x i64> splat (i64 32))
+  %min = call <3 x i64> @llvm.umin.i64(<3 x i64> %max,  <3 x i64> splat (i64 63))
+  %shl = shl <3 x i64> %arg0, %min
+  ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_maxmin(<4 x i64> noundef %arg0, <4 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v4_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 32, v8, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[12:13]
+; CHECK-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[14:15]
+; CHECK-NEXT:    v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 32, v14, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[14:15]
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v14, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 63, v12, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v12, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v8, 63, v8, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <4 x i64> @llvm.umax.i64(<4 x i64> %arg1, <4 x i64> splat (i64 32))
+  %min = call <4 x i64> @llvm.umin.i64(<4 x i64> %max,  <4 x i64> splat (i64 63))
+  %shl = shl <4 x i64> %arg0, %min
+  ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_maxmin(<5 x i64> noundef %arg0, <5 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v5_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[12:13]
+; CHECK-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 32, v12, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[14:15]
+; CHECK-NEXT:    v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 32, v14, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[16:17]
+; CHECK-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 32, v16, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[18:19]
+; CHECK-NEXT:    v_cndmask_b32_e32 v19, 0, v19, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 32, v18, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[18:19]
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 63, v18, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v18, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[14:15]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v16, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v14, 63, v14, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v14, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v12, 63, v12, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <5 x i64> @llvm.umax.i64(<5 x i64> %arg1, <5 x i64> splat (i64 32))
+  %min = call <5 x i64> @llvm.umin.i64(<5 x i64> %max,  <5 x i64> splat (i64 63))
+  %shl = shl <5 x i64> %arg0, %min
+  ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_maxmin(<8 x i64> noundef %arg0, <8 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v8_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[16:17]
+; CHECK-NEXT:    v_cndmask_b32_e32 v17, 0, v17, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 32, v16, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[18:19]
+; CHECK-NEXT:    v_cndmask_b32_e32 v19, 0, v19, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 32, v18, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[20:21]
+; CHECK-NEXT:    v_cndmask_b32_e32 v21, 0, v21, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v20, 32, v20, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[22:23]
+; CHECK-NEXT:    v_cndmask_b32_e32 v23, 0, v23, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v22, 32, v22, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[24:25]
+; CHECK-NEXT:    v_cndmask_b32_e32 v25, 0, v25, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v24, 32, v24, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[26:27]
+; CHECK-NEXT:    v_cndmask_b32_e32 v27, 0, v27, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v26, 32, v26, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[28:29]
+; CHECK-NEXT:    v_cndmask_b32_e32 v29, 0, v29, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v28, 32, v28, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[28:29]
+; CHECK-NEXT:    v_cndmask_b32_e32 v28, 63, v28, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[26:27]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v28, v[12:13]
+; CHECK-NEXT:    v_cndmask_b32_e32 v26, 63, v26, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[24:25]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v26, v[10:11]
+; CHECK-NEXT:    v_cndmask_b32_e32 v24, 63, v24, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[22:23]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v24, v[8:9]
+; CHECK-NEXT:    v_cndmask_b32_e32 v22, 63, v22, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[20:21]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v22, v[6:7]
+; CHECK-NEXT:    v_cndmask_b32_e32 v20, 63, v20, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[18:19]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v20, v[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v18, 63, v18, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v18, v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v16, v[0:1]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[30:31]
+; CHECK-NEXT:    v_cndmask_b32_e32 v17, 0, v31, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 32, v30, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT:    v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <8 x i64> @llvm.umax.i64(<8 x i64> %arg1, <8 x i64> splat (i64 32))
+  %min = call <8 x i64> @llvm.umin.i64(<8 x i64> %max,  <8 x i64> splat (i64 63))
+  %shl = shl <8 x i64> %arg0, %min
+  ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_maxmin(<16 x i64> noundef %arg0, <16 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v16_maxmin:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v47, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v56, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v59, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v60, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v62, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v63, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    buffer_store_dword v29, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; CHECK-NEXT:    buffer_load_dword v32, off, s[0:3], s32 offset:8
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT:    buffer_load_dword v35, off, s[0:3], s32 offset:16
+; CHECK-NEXT:    buffer_load_dword v34, off, s[0:3], s32 offset:12
+; CHECK-NEXT:    buffer_load_dword v37, off, s[0:3], s32 offset:24
+; CHECK-NEXT:    buffer_load_dword v36, off, s[0:3], s32 offset:20
+; CHECK-NEXT:    buffer_load_dword v39, off, s[0:3], s32 offset:32
+; CHECK-NEXT:    buffer_load_dword v38, off, s[0:3], s32 offset:28
+; CHECK-NEXT:    buffer_load_dword v48, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    buffer_load_dword v49, off, s[0:3], s32 offset:40
+; CHECK-NEXT:    buffer_load_dword v51, off, s[0:3], s32 offset:48
+; CHECK-NEXT:    buffer_load_dword v50, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    buffer_load_dword v53, off, s[0:3], s32 offset:56
+; CHECK-NEXT:    buffer_load_dword v52, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    buffer_load_dword v55, off, s[0:3], s32 offset:64
+; CHECK-NEXT:    buffer_load_dword v54, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:72
+; CHECK-NEXT:    buffer_load_dword v43, off, s[0:3], s32 offset:80
+; CHECK-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:76
+; CHECK-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:88
+; CHECK-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:84
+; CHECK-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:96
+; CHECK-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:92
+; CHECK-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:100
+; CHECK-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:104
+; CHECK-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:112
+; CHECK-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:108
+; CHECK-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:120
+; CHECK-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:116
+; CHECK-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:128
+; CHECK-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:124
+; CHECK-NEXT:    v_mov_b32_e32 v29, v27
+; CHECK-NEXT:    v_mov_b32_e32 v28, v26
+; CHECK-NEXT:    v_mov_b32_e32 v27, v25
+; CHECK-NEXT:    v_mov_b32_e32 v26, v24
+; CHECK-NEXT:    v_mov_b32_e32 v25, v23
+; CHECK-NEXT:    v_mov_b32_e32 v24, v22
+; CHECK-NEXT:    v_mov_b32_e32 v23, v21
+; CHECK-NEXT:    v_mov_b32_e32 v22, v20
+; CHECK-NEXT:    v_mov_b32_e32 v21, v19
+; CHECK-NEXT:    v_mov_b32_e32 v20, v18
+; CHECK-NEXT:    v_mov_b32_e32 v19, v17
+; CHECK-NEXT:    v_mov_b32_e32 v18, v16
+; CHECK-NEXT:    v_mov_b32_e32 v17, v15
+; CHECK-NEXT:    v_mov_b32_e32 v16, v14
+; CHECK-NEXT:    v_mov_b32_e32 v15, v13
+; CHECK-NEXT:    v_mov_b32_e32 v14, v12
+; CHECK-NEXT:    v_mov_b32_e32 v13, v11
+; CHECK-NEXT:    v_mov_b32_e32 v12, v10
+; CHECK-NEXT:    v_mov_b32_e32 v11, v9
+; CHECK-NEXT:    v_mov_b32_e32 v10, v8
+; CHECK-NEXT:    v_mov_b32_e32 v9, v7
+; CHECK-NEXT:    v_mov_b32_e32 v8, v6
+; CHECK-NEXT:    v_mov_b32_e32 v7, v5
+; CHECK-NEXT:    v_mov_b32_e32 v6, v4
+; CHECK-NEXT:    v_mov_b32_e32 v5, v3
+; CHECK-NEXT:    v_mov_b32_e32 v4, v2
+; CHECK-NEXT:    v_mov_b32_e32 v3, v1
+; CHECK-NEXT:    v_mov_b32_e32 v2, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(30)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[31:32]
+; CHECK-NEXT:    v_cndmask_b32_e32 v33, 0, v32, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v32, 32, v31, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(26)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[36:37]
+; CHECK-NEXT:    v_cmp_lt_u64_e64 s[4:5], 32, v[34:35]
+; CHECK-NEXT:    v_cndmask_b32_e32 v37, 0, v37, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v36, 32, v36, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(22)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[48:49]
+; CHECK-NEXT:    v_cndmask_b32_e64 v35, 0, v35, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v49, 0, v49, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v48, 32, v48, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(18)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[52:53]
+; CHECK-NEXT:    v_cndmask_b32_e64 v34, 32, v34, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v53, 0, v53, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v52, 32, v52, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(14)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[40:41]
+; CHECK-NEXT:    v_cmp_lt_u64_e64 s[4:5], 32, v[38:39]
+; CHECK-NEXT:    v_cndmask_b32_e32 v41, 0, v41, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v40, 32, v40, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(12)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[42:43]
+; CHECK-NEXT:    v_cndmask_b32_e64 v39, 0, v39, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v43, 0, v43, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v42, 32, v42, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(10)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[44:45]
+; CHECK-NEXT:    v_cndmask_b32_e64 v38, 32, v38, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v45, 0, v45, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v44, 32, v44, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(8)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[46:47]
+; CHECK-NEXT:    v_cmp_lt_u64_e64 s[4:5], 32, v[50:51]
+; CHECK-NEXT:    v_cndmask_b32_e32 v47, 0, v47, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v46, 32, v46, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(6)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[56:57]
+; CHECK-NEXT:    v_cndmask_b32_e64 v51, 0, v51, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, 0, v57, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, 32, v56, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[58:59]
+; CHECK-NEXT:    v_cndmask_b32_e64 v50, 32, v50, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v59, 0, v59, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v58, 32, v58, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[60:61]
+; CHECK-NEXT:    v_cmp_lt_u64_e64 s[4:5], 32, v[54:55]
+; CHECK-NEXT:    v_cndmask_b32_e32 v61, 0, v61, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v60, 32, v60, vcc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, 32, v[62:63]
+; CHECK-NEXT:    v_cndmask_b32_e64 v55, 0, v55, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v57, 0, v63, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v56, 32, v62, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[56:57]
+; CHECK-NEXT:    v_cndmask_b32_e64 v54, 32, v54, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e32 v56, 63, v56, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[60:61]
+; CHECK-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT:    v_cndmask_b32_e32 v57, 63, v60, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[58:59]
+; CHECK-NEXT:    v_cndmask_b32_e32 v58, 63, v58, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[0:1]
+; CHECK-NEXT:    v_cndmask_b32_e32 v59, 63, v0, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[46:47]
+; CHECK-NEXT:    v_cndmask_b32_e32 v46, 63, v46, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[44:45]
+; CHECK-NEXT:    v_cndmask_b32_e32 v44, 63, v44, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[42:43]
+; CHECK-NEXT:    v_cndmask_b32_e32 v42, 63, v42, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[40:41]
+; CHECK-NEXT:    v_cndmask_b32_e32 v40, 63, v40, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[54:55]
+; CHECK-NEXT:    v_cndmask_b32_e32 v54, 63, v54, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[52:53]
+; CHECK-NEXT:    v_cndmask_b32_e32 v52, 63, v52, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[50:51]
+; CHECK-NEXT:    v_cndmask_b32_e32 v50, 63, v50, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[48:49]
+; CHECK-NEXT:    v_cndmask_b32_e32 v48, 63, v48, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[38:39]
+; CHECK-NEXT:    v_cndmask_b32_e32 v38, 63, v38, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[36:37]
+; CHECK-NEXT:    v_cndmask_b32_e32 v36, 63, v36, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[34:35]
+; CHECK-NEXT:    v_cndmask_b32_e32 v34, 63, v34, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, 63, v[32:33]
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, 63, v32, vcc
+; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v0, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v34, v[4:5]
+; CHECK-NEXT:    v_lshlrev_b64 v[4:5], v36, v[6:7]
+; CHECK-NEXT:    v_lshlrev_b64 v[6:7], v38, v[8:9]
+; CHECK-NEXT:    v_lshlrev_b64 v[8:9], v48, v[10:11]
+; CHECK-NEXT:    v_lshlrev_b64 v[10:11], v50, v[12:13]
+; CHECK-NEXT:    v_lshlrev_b64 v[12:13], v52, v[14:15]
+; CHECK-NEXT:    v_lshlrev_b64 v[14:15], v54, v[16:17]
+; CHECK-NEXT:    v_lshlrev_b64 v[16:17], v40, v[18:19]
+; CHECK-NEXT:    v_lshlrev_b64 v[18:19], v42, v[20:21]
+; CHECK-NEXT:    v_lshlrev_b64 v[20:21], v44, v[22:23]
+; CHECK-NEXT:    v_lshlrev_b64 v[22:23], v46, v[24:25]
+; CHECK-NEXT:    v_lshlrev_b64 v[24:25], v59, v[26:27]
+; CHECK-NEXT:    v_lshlrev_b64 v[26:27], v58, v[28:29]
+; CHECK-NEXT:    buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b64 v[30:31], v56, v[30:31]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b64 v[28:29], v57, v[28:29]
+; CHECK-NEXT:    buffer_load_dword v63, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v62, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v60, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v59, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v56, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v47, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %max = call <16 x i64> @llvm.umax.i64(<16 x i64> %arg1, <16 x i64> splat (i64 32))
+  %min = call <16 x i64> @llvm.umin.i64(<16 x i64> %max,  <16 x i64> splat (i64 63))
+  %shl = shl <16 x i64> %arg0, %min
+  ret <16 x i64> %shl
+}

>From 862c784132611675e09703a24b1956255dd5b1c3 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 6 Feb 2025 15:29:13 -0600
Subject: [PATCH 4/4] Use & instead of sub to adjust shift

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |  10 +-
 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll      | 399 ++++++------------
 2 files changed, 128 insertions(+), 281 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d7c004e1308c7f6..33e6b64cddcd708 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4047,16 +4047,16 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
   SelectionDAG &DAG = DCI.DAG;
 
   if (!CRHS) {
-    // shl i64 X, Y -> [0, shl i32 X, (Y - 32)]
+    // shl i64 X, Y -> [0, shl i32 X, (Y & 0x1F)]
     if (VT == MVT::i64) {
       KnownBits Known = DAG.computeKnownBits(RHS);
       if (Known.getMinValue().getZExtValue() >= 32) {
         SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, RHS);
-        const SDValue C32 = DAG.getConstant(32, SL, MVT::i32);
-        SDValue ShiftAmt =
-            DAG.getNode(ISD::SUB, SL, MVT::i32, truncShiftAmt, C32);
+        const SDValue C31 = DAG.getConstant(31, SL, MVT::i32);
+        SDValue MaskedShiftAmt =
+            DAG.getNode(ISD::AND, SL, MVT::i32, truncShiftAmt, C31);
         SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
-        SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
+        SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, MaskedShiftAmt);
         const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
         SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
         return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index a49694d8da5a839..c3d11d632df8d8e 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -5,7 +5,7 @@
 ;;
 ;; where Y is in the range [63-32] to:
 ;;
-;;   DST = [0, shl i32 X, (Y - 32)]
+;;   DST = [0, shl i32 X, (Y & 0x1F)]
 
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
 
@@ -541,9 +541,7 @@ define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
 ; CHECK-LABEL: shl_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v2, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or i64 %shift_amt, 32
@@ -555,12 +553,8 @@ define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
 ; CHECK-LABEL: shl_v2_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v4
-; CHECK-NEXT:    v_or_b32_e32 v3, 32, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v4, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v6, v2
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -573,15 +567,9 @@ define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
 ; CHECK-LABEL: shl_v3_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v6
-; CHECK-NEXT:    v_or_b32_e32 v3, 32, v8
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_or_b32_e32 v5, 32, v10
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v6, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v8, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v10, v4
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -595,18 +583,10 @@ define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
 ; CHECK-LABEL: shl_v4_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v8
-; CHECK-NEXT:    v_or_b32_e32 v3, 32, v10
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_or_b32_e32 v5, 32, v12
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT:    v_or_b32_e32 v7, 32, v14
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v7
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v8, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v10, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v12, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v14, v6
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -621,21 +601,11 @@ define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
 ; CHECK-LABEL: shl_v5_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v10
-; CHECK-NEXT:    v_or_b32_e32 v3, 32, v12
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_or_b32_e32 v5, 32, v14
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT:    v_or_b32_e32 v7, 32, v16
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT:    v_or_b32_e32 v9, 32, v18
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v7
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v9
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v10, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v12, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v14, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v16, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v18, v8
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -651,30 +621,14 @@ define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
 ; CHECK-LABEL: shl_v8_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v16
-; CHECK-NEXT:    v_or_b32_e32 v3, 32, v18
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_or_b32_e32 v5, 32, v20
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT:    v_or_b32_e32 v7, 32, v22
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT:    v_or_b32_e32 v9, 32, v24
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v7
-; CHECK-NEXT:    v_or_b32_e32 v11, 32, v26
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v9
-; CHECK-NEXT:    v_or_b32_e32 v13, 32, v28
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v0, v8
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v11
-; CHECK-NEXT:    v_or_b32_e32 v15, 32, v30
-; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v0, v10
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v13
-; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v0, v12
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v15
-; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v22, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v24, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v26, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v28, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v30, v14
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -697,93 +651,63 @@ define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt)
 ; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
 ; CHECK-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20
 ; CHECK-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:28
-; CHECK-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:60
-; CHECK-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:52
-; CHECK-NEXT:    buffer_load_dword v13, off, s[0:3], s32 offset:44
-; CHECK-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:36
-; CHECK-NEXT:    s_waitcnt vmcnt(7)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v1
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT:    s_waitcnt vmcnt(6)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v3
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT:    s_waitcnt vmcnt(5)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v5
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT:    s_waitcnt vmcnt(4)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v7
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v0, v6
-; CHECK-NEXT:    s_waitcnt vmcnt(2)
-; CHECK-NEXT:    v_or_b32_e32 v2, 32, v11
-; CHECK-NEXT:    s_waitcnt vmcnt(1)
-; CHECK-NEXT:    v_or_b32_e32 v4, 32, v13
-; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v6, 32, v15
-; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v9
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v6, v8
-; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:68
-; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v4, v10
-; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:76
-; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v2, v12
-; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:84
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v0, v14
-; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:92
-; CHECK-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:124
-; CHECK-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:116
-; CHECK-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:108
-; CHECK-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:100
-; CHECK-NEXT:    s_waitcnt vmcnt(7)
-; CHECK-NEXT:    v_or_b32_e32 v6, 32, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT:    s_waitcnt vmcnt(6)
-; CHECK-NEXT:    v_or_b32_e32 v4, 32, v4
-; CHECK-NEXT:    s_waitcnt vmcnt(4)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v0
-; CHECK-NEXT:    v_or_b32_e32 v2, 32, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v6, v16
-; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v4, v18
-; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v2, v20
-; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v0, v22
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:36
 ; CHECK-NEXT:    s_waitcnt vmcnt(3)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v8
-; CHECK-NEXT:    s_waitcnt vmcnt(2)
-; CHECK-NEXT:    v_or_b32_e32 v2, 32, v10
-; CHECK-NEXT:    s_waitcnt vmcnt(1)
-; CHECK-NEXT:    v_or_b32_e32 v4, 32, v12
-; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v6, 32, v14
-; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v6, v24
-; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v4, v26
-; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v2, v28
-; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v0, v30
-; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    v_mov_b32_e32 v2, 0
-; CHECK-NEXT:    v_mov_b32_e32 v4, 0
-; CHECK-NEXT:    v_mov_b32_e32 v6, 0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v3, v2
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:44
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, v5, v4
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, v7, v6
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, v2, v10
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:76
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, v4, v12
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:84
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v6, v14
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:92
 ; CHECK-NEXT:    v_mov_b32_e32 v8, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v10, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v12, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v0, v16
+; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:100
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v2, v18
 ; CHECK-NEXT:    v_mov_b32_e32 v16, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v4, v20
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:116
+; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:108
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v6, v22
+; CHECK-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:124
 ; CHECK-NEXT:    v_mov_b32_e32 v18, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v20, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v22, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v0, v24
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v24, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v6, v30
+; CHECK-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v26, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v28, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v30, 0
@@ -799,9 +723,7 @@ define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
 ; CHECK-LABEL: shl_or32_inreg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s4, s16, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s4, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or i64 %shift_amt, 32
@@ -813,12 +735,8 @@ define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %sh
 ; CHECK-LABEL: shl_v2_or32_inreg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s4, s18, 32
-; CHECK-NEXT:    s_or_b32 s5, s16, 32
-; CHECK-NEXT:    s_sub_i32 s5, s5, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s5, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s4, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -831,15 +749,9 @@ define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %sh
 ; CHECK-LABEL: shl_v3_or32_inreg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s4, s20, 32
-; CHECK-NEXT:    s_or_b32 s5, s18, 32
-; CHECK-NEXT:    s_or_b32 s6, s16, 32
-; CHECK-NEXT:    s_sub_i32 s6, s6, 32
-; CHECK-NEXT:    s_sub_i32 s5, s5, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s6, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s5, v2
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s4, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -853,18 +765,10 @@ define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %sh
 ; CHECK-LABEL: shl_v4_or32_inreg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s4, s22, 32
-; CHECK-NEXT:    s_or_b32 s5, s20, 32
-; CHECK-NEXT:    s_or_b32 s6, s18, 32
-; CHECK-NEXT:    s_or_b32 s7, s16, 32
-; CHECK-NEXT:    s_sub_i32 s7, s7, 32
-; CHECK-NEXT:    s_sub_i32 s6, s6, 32
-; CHECK-NEXT:    s_sub_i32 s5, s5, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s7, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s6, v2
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s5, v4
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s4, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -879,21 +783,11 @@ define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %sh
 ; CHECK-LABEL: shl_v5_or32_inreg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s4, s24, 32
-; CHECK-NEXT:    s_or_b32 s5, s22, 32
-; CHECK-NEXT:    s_or_b32 s6, s20, 32
-; CHECK-NEXT:    s_or_b32 s7, s18, 32
-; CHECK-NEXT:    s_or_b32 s8, s16, 32
-; CHECK-NEXT:    s_sub_i32 s8, s8, 32
-; CHECK-NEXT:    s_sub_i32 s7, s7, 32
-; CHECK-NEXT:    s_sub_i32 s6, s6, 32
-; CHECK-NEXT:    s_sub_i32 s5, s5, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s8, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s7, v2
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s6, v4
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s5, v6
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s4, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s24, v8
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -909,30 +803,14 @@ define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %sh
 ; CHECK-LABEL: shl_v8_or32_inreg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_b32 s10, s16, 32
-; CHECK-NEXT:    v_or_b32_e32 v15, 32, v16
-; CHECK-NEXT:    s_or_b32 s4, s28, 32
-; CHECK-NEXT:    s_or_b32 s5, s26, 32
-; CHECK-NEXT:    s_or_b32 s6, s24, 32
-; CHECK-NEXT:    s_or_b32 s7, s22, 32
-; CHECK-NEXT:    s_or_b32 s8, s20, 32
-; CHECK-NEXT:    s_or_b32 s9, s18, 32
-; CHECK-NEXT:    s_sub_i32 s10, s10, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s10, v0
-; CHECK-NEXT:    s_sub_i32 s9, s9, 32
-; CHECK-NEXT:    s_sub_i32 s8, s8, 32
-; CHECK-NEXT:    s_sub_i32 s7, s7, 32
-; CHECK-NEXT:    s_sub_i32 s6, s6, 32
-; CHECK-NEXT:    s_sub_i32 s5, s5, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v15
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s9, v2
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s8, v4
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s7, v6
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s6, v8
-; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s5, v10
-; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s4, v12
-; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s26, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s28, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v16, v14
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
@@ -955,68 +833,43 @@ define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg
 ; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12
 ; CHECK-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20
 ; CHECK-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:28
-; CHECK-NEXT:    buffer_load_dword v9, off, s[0:3], s32 offset:36
-; CHECK-NEXT:    buffer_load_dword v25, off, s[0:3], s32 offset:44
-; CHECK-NEXT:    buffer_load_dword v27, off, s[0:3], s32 offset:52
-; CHECK-NEXT:    buffer_load_dword v29, off, s[0:3], s32 offset:60
-; CHECK-NEXT:    s_or_b32 s7, s22, 32
-; CHECK-NEXT:    s_or_b32 s8, s20, 32
-; CHECK-NEXT:    s_or_b32 s9, s18, 32
-; CHECK-NEXT:    s_or_b32 s10, s16, 32
-; CHECK-NEXT:    s_sub_i32 s10, s10, 32
-; CHECK-NEXT:    s_sub_i32 s9, s9, 32
-; CHECK-NEXT:    s_sub_i32 s8, s8, 32
-; CHECK-NEXT:    s_sub_i32 s7, s7, 32
-; CHECK-NEXT:    s_or_b32 s4, s28, 32
-; CHECK-NEXT:    s_or_b32 s5, s26, 32
-; CHECK-NEXT:    s_or_b32 s6, s24, 32
-; CHECK-NEXT:    s_sub_i32 s6, s6, 32
-; CHECK-NEXT:    s_sub_i32 s5, s5, 32
-; CHECK-NEXT:    s_sub_i32 s4, s4, 32
-; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s5, v10
-; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s4, v12
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, s26, v10
+; CHECK-NEXT:    v_lshlrev_b32_e32 v13, s28, v12
+; CHECK-NEXT:    v_mov_b32_e32 v8, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v10, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v12, 0
-; CHECK-NEXT:    s_waitcnt vmcnt(7)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v1
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v15, v1, v14
-; CHECK-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:68
-; CHECK-NEXT:    s_waitcnt vmcnt(7)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v3
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v1, v16
-; CHECK-NEXT:    s_waitcnt vmcnt(6)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v5
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v1, v18
-; CHECK-NEXT:    s_waitcnt vmcnt(5)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v7
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v1, v20
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:36
+; CHECK-NEXT:    buffer_load_dword v14, off, s[0:3], s32 offset:44
 ; CHECK-NEXT:    s_waitcnt vmcnt(4)
-; CHECK-NEXT:    v_or_b32_e32 v1, 32, v9
-; CHECK-NEXT:    v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v1, v22
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s10, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s9, v2
-; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s8, v4
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s7, v6
-; CHECK-NEXT:    s_waitcnt vmcnt(1)
-; CHECK-NEXT:    v_or_b32_e32 v2, 32, v29
-; CHECK-NEXT:    v_or_b32_e32 v4, 32, v27
-; CHECK-NEXT:    v_or_b32_e32 v6, 32, v25
-; CHECK-NEXT:    v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT:    v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT:    v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, s6, v8
-; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v6, v24
-; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v4, v26
-; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT:    v_lshlrev_b32_e32 v17, v3, v16
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v19, v5, v18
+; CHECK-NEXT:    s_waitcnt vmcnt(2)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v21, v7, v20
+; CHECK-NEXT:    buffer_load_dword v20, off, s[0:3], s32 offset:68
+; CHECK-NEXT:    buffer_load_dword v16, off, s[0:3], s32 offset:60
+; CHECK-NEXT:    buffer_load_dword v18, off, s[0:3], s32 offset:52
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT:    v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v7, s22, v6
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v4, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v6, 0
-; CHECK-NEXT:    v_mov_b32_e32 v8, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(4)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v23, v1, v22
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(3)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v25, v14, v24
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v14, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(1)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v29, v16, v28
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_lshlrev_b32_e32 v27, v18, v26
+; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v20, v30
 ; CHECK-NEXT:    v_mov_b32_e32 v16, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v18, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v20, 0
@@ -1024,12 +877,6 @@ define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg
 ; CHECK-NEXT:    v_mov_b32_e32 v24, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v26, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v28, 0
-; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v0, 32, v14
-; CHECK-NEXT:    v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v31, v0, v30
-; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    v_mov_b32_e32 v14, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v30, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %or = or <16 x i64> %shift_amt, splat (i64 32)



More information about the llvm-commits mailing list