[llvm] Reduce shl64 to shl32 if shift range is [63-32] (PR #125574)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 12 08:02:03 PST 2025
https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/125574
>From bcf4934e5401052ea97a95b34e8fcc654b1c47d0 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 5 Feb 2025 11:01:30 -0600
Subject: [PATCH 01/12] Reduce shl64 to shl32 if shift range is [63-32]
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 32 ++++++---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 67 +++++++++++++++++++
2 files changed, 91 insertions(+), 8 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 792e17eeedab1..d7c004e1308c7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4040,19 +4040,35 @@ SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
+ SDLoc SL(N);
+ SelectionDAG &DAG = DCI.DAG;
- ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (!RHS)
+ if (!CRHS) {
+ // shl i64 X, Y -> [0, shl i32 X, (Y - 32)]
+ if (VT == MVT::i64) {
+ KnownBits Known = DAG.computeKnownBits(RHS);
+ if (Known.getMinValue().getZExtValue() >= 32) {
+ SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, RHS);
+ const SDValue C32 = DAG.getConstant(32, SL, MVT::i32);
+ SDValue ShiftAmt =
+ DAG.getNode(ISD::SUB, SL, MVT::i32, truncShiftAmt, C32);
+ SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
+ SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
+ const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+ SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+ }
+ }
return SDValue();
+ }
- SDValue LHS = N->getOperand(0);
- unsigned RHSVal = RHS->getZExtValue();
+ unsigned RHSVal = CRHS->getZExtValue();
if (!RHSVal)
return LHS;
- SDLoc SL(N);
- SelectionDAG &DAG = DCI.DAG;
-
switch (LHS->getOpcode()) {
default:
break;
@@ -4078,7 +4094,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
if (LZ < RHSVal)
break;
EVT XVT = X.getValueType();
- SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
+ SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(CRHS, 0));
return DAG.getZExtOrTrunc(Shl, SL, VT);
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
new file mode 100644
index 0000000000000..d18dc2bf37310
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -0,0 +1,67 @@
+;; Test reduction of:
+;;
+;; DST = shl i64 X, Y
+;;
+;; where Y is in the range [63-32] to:
+;;
+;; DST = [0, shl i32 X, (Y - 32)]
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck %s
+
+; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
+; determine the minimum from metadata in this case. Match current results
+; for now.
+define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
+ %shift.amt = load i64, ptr %arg1.ptr, !range !0
+ %shl = shl i64 %arg0, %shift.amt
+ ret i64 %shl
+
+; CHECK: .globl shl_metadata
+; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
+}
+
+!0 = !{i64 32, i64 64}
+
+; This case is reduced because computeKnownBits() can calculates a minimum of 32
+; based on the OR with 32.
+define i64 @shl_or32(i64 noundef %arg0, ptr %arg1.ptr) {
+ %shift.amt = load i64, ptr %arg1.ptr
+ %or = or i64 %shift.amt, 32
+ %shl = shl i64 %arg0, %or
+ ret i64 %shl
+
+; CHECK: .globl shl_or32
+; CHECK: v_or_b32_e32 v1, 32, v1
+; CHECK: v_subrev_i32_e32 v1, vcc, 32, v1
+; CHECK: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK: v_mov_b32_e32 v0, 0
+}
+
+; This case must not be reduced because the known minimum, 16, is not in range.
+define i64 @shl_or16(i64 noundef %arg0, ptr %arg1.ptr) {
+ %shift.amt = load i64, ptr %arg1.ptr
+ %or = or i64 %shift.amt, 16
+ %shl = shl i64 %arg0, %or
+ ret i64 %shl
+
+; CHECK: .globl shl_or16
+; CHECK: v_or_b32_e32 v2, 16, v2
+; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
+}
+
+; FIXME: This case should be reduced too, but computeKnownBits() cannot
+; determine the range. Match current results for now.
+define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
+ %max = call i64 @llvm.umax.i64(i64 %arg1, i64 32)
+ %min = call i64 @llvm.umin.i64(i64 %max, i64 63)
+ %shl = shl i64 %arg0, %min
+ ret i64 %shl
+
+; CHECK: .globl shl_maxmin
+; CHECK: v_cmp_lt_u64_e32 vcc, 32, v[2:3]
+; CHECK: v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK: v_cndmask_b32_e32 v2, 32, v2, vcc
+; CHECK: v_cmp_gt_u64_e32 vcc, 63, v[2:3]
+; CHECK: v_cndmask_b32_e32 v2, 63, v2, vcc
+; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
+}
>From dafda9e2430cb4108608019ac884d97b5b5671eb Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 6 Feb 2025 09:06:16 -0600
Subject: [PATCH 02/12] Use explicit cpu and update_llc_test_checks
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 61 +++++++++++++++---------
1 file changed, 38 insertions(+), 23 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index d18dc2bf37310..26cd04082cf42 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
;; Test reduction of:
;;
;; DST = shl i64 X, Y
@@ -6,62 +7,76 @@
;;
;; DST = [0, shl i32 X, (Y - 32)]
-; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
; determine the minimum from metadata in this case. Match current results
; for now.
define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load i64, ptr %arg1.ptr, !range !0
%shl = shl i64 %arg0, %shift.amt
ret i64 %shl
-
-; CHECK: .globl shl_metadata
-; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
}
!0 = !{i64 32, i64 64}
-; This case is reduced because computeKnownBits() can calculates a minimum of 32
+; This case is reduced because computeKnownBits() can calculate a minimum of 32
; based on the OR with 32.
define i64 @shl_or32(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_or32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v1, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load i64, ptr %arg1.ptr
%or = or i64 %shift.amt, 32
%shl = shl i64 %arg0, %or
ret i64 %shl
-
-; CHECK: .globl shl_or32
-; CHECK: v_or_b32_e32 v1, 32, v1
-; CHECK: v_subrev_i32_e32 v1, vcc, 32, v1
-; CHECK: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK: v_mov_b32_e32 v0, 0
}
; This case must not be reduced because the known minimum, 16, is not in range.
define i64 @shl_or16(i64 noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v2, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v2, 16, v2
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%shift.amt = load i64, ptr %arg1.ptr
%or = or i64 %shift.amt, 16
%shl = shl i64 %arg0, %or
ret i64 %shl
-
-; CHECK: .globl shl_or16
-; CHECK: v_or_b32_e32 v2, 16, v2
-; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
}
; FIXME: This case should be reduced too, but computeKnownBits() cannot
; determine the range. Match current results for now.
define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
+; CHECK-LABEL: shl_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v2, 32, v2, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v2, 63, v2, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%max = call i64 @llvm.umax.i64(i64 %arg1, i64 32)
%min = call i64 @llvm.umin.i64(i64 %max, i64 63)
%shl = shl i64 %arg0, %min
ret i64 %shl
-
-; CHECK: .globl shl_maxmin
-; CHECK: v_cmp_lt_u64_e32 vcc, 32, v[2:3]
-; CHECK: v_cndmask_b32_e32 v3, 0, v3, vcc
-; CHECK: v_cndmask_b32_e32 v2, 32, v2, vcc
-; CHECK: v_cmp_gt_u64_e32 vcc, 63, v[2:3]
-; CHECK: v_cndmask_b32_e32 v2, 63, v2, vcc
-; CHECK: v_lshl_b64 v[0:1], v[0:1], v2
}
>From 5a0b7f5f4d7feded3bb1fe2a59510b7bd451b3cd Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 6 Feb 2025 14:01:49 -0600
Subject: [PATCH 03/12] Test vector and inreg variations
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 1421 +++++++++++++++++++++-
1 file changed, 1405 insertions(+), 16 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 26cd04082cf42..a49694d8da5a8 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -9,9 +9,14 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with metadata
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
; FIXME: This case should be reduced, but SelectionDAG::computeKnownBits() cannot
; determine the minimum from metadata in this case. Match current results
; for now.
+
define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_metadata:
; CHECK: ; %bb.0:
@@ -25,45 +30,1020 @@ define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
ret i64 %shl
}
+define <2 x i64> @shl_v2_metadata(<2 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v2_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0
+ %shl = shl <2 x i64> %arg0, %shift.amt
+ ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_metadata(<3 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v3_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v12, v[6:7] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[6:7]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v12, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !0
+ %shl = shl <3 x i64> %arg0, %shift.amt
+ ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_metadata(<4 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v4_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[10:13], v[8:9]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[13:16], v[8:9] offset:16
+; CHECK-NEXT: ; kill: killed $vgpr8 killed $vgpr9
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v13, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v15, v[6:7]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !0
+ %shl = shl <4 x i64> %arg0, %shift.amt
+ ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_metadata(<5 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v5_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v19, v[10:11] offset:32
+; CHECK-NEXT: flat_load_dwordx4 v[12:15], v[10:11]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[15:18], v[10:11] offset:16
+; CHECK-NEXT: ; kill: killed $vgpr10 killed $vgpr11
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v19, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v12, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v14, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v15, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v17, v[6:7]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load <5 x i64>, ptr %arg1.ptr, !range !0
+ %shl = shl <5 x i64> %arg0, %shift.amt
+ ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_metadata(<8 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v8_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[18:21], v[16:17]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v18, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v20, v[2:3]
+; CHECK-NEXT: flat_load_dwordx4 v[18:21], v[16:17] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v18, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v20, v[6:7]
+; CHECK-NEXT: flat_load_dwordx4 v[18:21], v[16:17] offset:32
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v18, v[8:9]
+; CHECK-NEXT: flat_load_dwordx4 v[16:19], v[16:17] offset:48
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], v20, v[10:11]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], v16, v[12:13]
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v18, v[14:15]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load <8 x i64>, ptr %arg1.ptr, !range !0
+ %shl = shl <8 x i64> %arg0, %shift.amt
+ ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_metadata(<16 x i64> noundef %arg0, ptr %arg1.ptr) {
+; CHECK-LABEL: shl_v16_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8
+; CHECK-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v32, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v34, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v35, v[4:5]
+; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49] offset:32
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v37, v[6:7]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:48
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v32, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], v34, v[10:11]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], v35, v[12:13]
+; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49] offset:64
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v37, v[14:15]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:80
+; CHECK-NEXT: v_lshlrev_b64 v[16:17], v32, v[16:17]
+; CHECK-NEXT: v_lshlrev_b64 v[18:19], v34, v[18:19]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[20:21], v35, v[20:21]
+; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49] offset:96
+; CHECK-NEXT: v_lshlrev_b64 v[22:23], v37, v[22:23]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:112
+; CHECK-NEXT: v_lshlrev_b64 v[24:25], v32, v[24:25]
+; CHECK-NEXT: v_lshlrev_b64 v[26:27], v34, v[26:27]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[28:29], v35, v[28:29]
+; CHECK-NEXT: v_lshlrev_b64 v[30:31], v37, v[30:31]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %shift.amt = load <16 x i64>, ptr %arg1.ptr, !range !0
+ %shl = shl <16 x i64> %arg0, %shift.amt
+ ret <16 x i64> %shl
+}
+
!0 = !{i64 32, i64 64}
-; This case is reduced because computeKnownBits() can calculate a minimum of 32
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 16"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases must not be reduced because the known minimum, 16, is not in range.
+
+define i64 @shl_or16(i64 noundef %arg0, i64 %shift_amt) {
+; CHECK-LABEL: shl_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v2, 16, v2
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or i64 %shift_amt, 16
+ %shl = shl i64 %arg0, %or
+ ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or16(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v2_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v5, 16, v6
+; CHECK-NEXT: v_or_b32_e32 v4, 16, v4
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v5, v[2:3]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <2 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <2 x i64> %arg0, %or
+ ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or16(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v3_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v7, 16, v10
+; CHECK-NEXT: v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT: v_or_b32_e32 v6, 16, v6
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v7, v[4:5]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <3 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <3 x i64> %arg0, %or
+ ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or16(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v4_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v9, 16, v14
+; CHECK-NEXT: v_or_b32_e32 v11, 16, v12
+; CHECK-NEXT: v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT: v_or_b32_e32 v8, 16, v8
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v11, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v9, v[6:7]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <4 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <4 x i64> %arg0, %or
+ ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or16(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v5_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v11, 16, v18
+; CHECK-NEXT: v_or_b32_e32 v13, 16, v16
+; CHECK-NEXT: v_or_b32_e32 v14, 16, v14
+; CHECK-NEXT: v_or_b32_e32 v12, 16, v12
+; CHECK-NEXT: v_or_b32_e32 v10, 16, v10
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v14, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v13, v[6:7]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v11, v[8:9]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <5 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <5 x i64> %arg0, %or
+ ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or16(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v8_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v16, v[0:1]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v18
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v16, v[2:3]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v20
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v16, v[4:5]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v22
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v16, v[6:7]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v24
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v16, v[8:9]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v26
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], v16, v[10:11]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v28
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], v16, v[12:13]
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v30
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <8 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <8 x i64> %arg0, %or
+ ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or16(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v16_or16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v31, v[0:1]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v31, v[2:3]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v31, v[4:5]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v31, v[6:7]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:36
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v31, v[8:9]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], v31, v[10:11]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:52
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], v31, v[12:13]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v31, v[14:15]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:68
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[16:17], v31, v[16:17]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[18:19], v31, v[18:19]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:84
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[20:21], v31, v[20:21]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[22:23], v31, v[22:23]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[24:25], v31, v[24:25]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:108
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[26:27], v31, v[26:27]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[28:29], v31, v[28:29]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v32, 16, v32
+; CHECK-NEXT: v_lshlrev_b64 v[30:31], v32, v[30:31]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <16 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <16 x i64> %arg0, %or
+ ret <16 x i64> %shl
+}
+
+; test inreg
+
+define i64 @shl_or16_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s4, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or i64 %shift_amt, 16
+ %shl = shl i64 %arg0, %or
+ ret i64 %shl
+}
+
+define <2 x i64> @shl_v2_or16_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s18, 16
+; CHECK-NEXT: s_or_b32 s5, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s5, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <2 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <2 x i64> %arg0, %or
+ ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or16_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s20, 16
+; CHECK-NEXT: s_or_b32 s5, s18, 16
+; CHECK-NEXT: s_or_b32 s6, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s6, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], s5, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], s4, v[4:5]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <3 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <3 x i64> %arg0, %or
+ ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or16_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s22, 16
+; CHECK-NEXT: s_or_b32 s5, s20, 16
+; CHECK-NEXT: s_or_b32 s6, s18, 16
+; CHECK-NEXT: s_or_b32 s7, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s7, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], s6, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], s5, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], s4, v[6:7]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <4 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <4 x i64> %arg0, %or
+ ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or16_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v5_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s24, 16
+; CHECK-NEXT: s_or_b32 s5, s22, 16
+; CHECK-NEXT: s_or_b32 s6, s20, 16
+; CHECK-NEXT: s_or_b32 s7, s18, 16
+; CHECK-NEXT: s_or_b32 s8, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s8, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], s7, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], s6, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], s5, v[6:7]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], s4, v[8:9]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <5 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <5 x i64> %arg0, %or
+ ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or16_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v8_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v16, 16, v16
+; CHECK-NEXT: s_or_b32 s4, s28, 16
+; CHECK-NEXT: s_or_b32 s5, s26, 16
+; CHECK-NEXT: s_or_b32 s6, s24, 16
+; CHECK-NEXT: s_or_b32 s7, s22, 16
+; CHECK-NEXT: s_or_b32 s8, s20, 16
+; CHECK-NEXT: s_or_b32 s9, s18, 16
+; CHECK-NEXT: s_or_b32 s10, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s10, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], s9, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], s8, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], s7, v[6:7]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], s6, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], s5, v[10:11]
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], s4, v[12:13]
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <8 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <8 x i64> %arg0, %or
+ ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or16_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v16_or16_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT: s_or_b32 s4, s28, 16
+; CHECK-NEXT: s_or_b32 s5, s26, 16
+; CHECK-NEXT: s_or_b32 s6, s24, 16
+; CHECK-NEXT: s_or_b32 s7, s22, 16
+; CHECK-NEXT: s_or_b32 s8, s20, 16
+; CHECK-NEXT: s_or_b32 s9, s18, 16
+; CHECK-NEXT: s_or_b32 s10, s16, 16
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], s10, v[0:1]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], s9, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], s8, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], s7, v[6:7]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], s6, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], s5, v[10:11]
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], s4, v[12:13]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v31, v[14:15]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[16:17], v31, v[16:17]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:20
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[18:19], v31, v[18:19]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[20:21], v31, v[20:21]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:36
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[22:23], v31, v[22:23]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[24:25], v31, v[24:25]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:52
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[26:27], v31, v[26:27]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
+; CHECK-NEXT: v_lshlrev_b64 v[28:29], v31, v[28:29]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v32, 16, v32
+; CHECK-NEXT: v_lshlrev_b64 v[30:31], v32, v[30:31]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <16 x i64> %shift_amt, splat (i64 16)
+ %shl = shl <16 x i64> %arg0, %or
+ ret <16 x i64> %shl
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range with an "or X, 32"
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; These cases are reduced because computeKnownBits() can calculate a minimum of 32
; based on the OR with 32.
-define i64 @shl_or32(i64 noundef %arg0, ptr %arg1.ptr) {
+
+define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
; CHECK-LABEL: shl_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dword v1, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v2
; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load i64, ptr %arg1.ptr
- %or = or i64 %shift.amt, 32
+ %or = or i64 %shift_amt, 32
%shl = shl i64 %arg0, %or
ret i64 %shl
}
-; This case must not be reduced because the known minimum, 16, is not in range.
-define i64 @shl_or16(i64 noundef %arg0, ptr %arg1.ptr) {
-; CHECK-LABEL: shl_or16:
+define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v2_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dword v2, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v2, 16, v2
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v4
+; CHECK-NEXT: v_or_b32_e32 v3, 32, v6
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <2 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <2 x i64> %arg0, %or
+ ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v3_or32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v6
+; CHECK-NEXT: v_or_b32_e32 v3, 32, v8
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_or_b32_e32 v5, 32, v10
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <3 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <3 x i64> %arg0, %or
+ ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v4_or32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v8
+; CHECK-NEXT: v_or_b32_e32 v3, 32, v10
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_or_b32_e32 v5, 32, v12
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT: v_or_b32_e32 v7, 32, v14
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v7
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <4 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <4 x i64> %arg0, %or
+ ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v5_or32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v10
+; CHECK-NEXT: v_or_b32_e32 v3, 32, v12
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_or_b32_e32 v5, 32, v14
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT: v_or_b32_e32 v7, 32, v16
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT: v_or_b32_e32 v9, 32, v18
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v7
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v9
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <5 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <5 x i64> %arg0, %or
+ ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v8_or32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v16
+; CHECK-NEXT: v_or_b32_e32 v3, 32, v18
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_or_b32_e32 v5, 32, v20
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
+; CHECK-NEXT: v_or_b32_e32 v7, 32, v22
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
+; CHECK-NEXT: v_or_b32_e32 v9, 32, v24
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v7
+; CHECK-NEXT: v_or_b32_e32 v11, 32, v26
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v9
+; CHECK-NEXT: v_or_b32_e32 v13, 32, v28
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v11
+; CHECK-NEXT: v_or_b32_e32 v15, 32, v30
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, v0, v10
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v13
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, v0, v12
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v15
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: v_mov_b32_e32 v10, 0
+; CHECK-NEXT: v_mov_b32_e32 v12, 0
+; CHECK-NEXT: v_mov_b32_e32 v14, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load i64, ptr %arg1.ptr
- %or = or i64 %shift.amt, 16
+ %or = or <8 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <8 x i64> %arg0, %or
+ ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+; CHECK-LABEL: shl_v16_or32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
+; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12
+; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
+; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28
+; CHECK-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:60
+; CHECK-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:52
+; CHECK-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:44
+; CHECK-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:36
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v3
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v5
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v7
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_or_b32_e32 v2, 32, v11
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_or_b32_e32 v4, 32, v13
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v6, 32, v15
+; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v9
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, v6, v8
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:68
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, v4, v10
+; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:76
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, v2, v12
+; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:92
+; CHECK-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:124
+; CHECK-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:116
+; CHECK-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:108
+; CHECK-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:100
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_or_b32_e32 v6, 32, v6
+; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_or_b32_e32 v4, 32, v4
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v0
+; CHECK-NEXT: v_or_b32_e32 v2, 32, v2
+; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v17, v6, v16
+; CHECK-NEXT: v_lshlrev_b32_e32 v19, v4, v18
+; CHECK-NEXT: v_lshlrev_b32_e32 v21, v2, v20
+; CHECK-NEXT: v_lshlrev_b32_e32 v23, v0, v22
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v8
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_or_b32_e32 v2, 32, v10
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_or_b32_e32 v4, 32, v12
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v6, 32, v14
+; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v25, v6, v24
+; CHECK-NEXT: v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT: v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT: v_lshlrev_b32_e32 v31, v0, v30
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: v_mov_b32_e32 v10, 0
+; CHECK-NEXT: v_mov_b32_e32 v12, 0
+; CHECK-NEXT: v_mov_b32_e32 v14, 0
+; CHECK-NEXT: v_mov_b32_e32 v16, 0
+; CHECK-NEXT: v_mov_b32_e32 v18, 0
+; CHECK-NEXT: v_mov_b32_e32 v20, 0
+; CHECK-NEXT: v_mov_b32_e32 v22, 0
+; CHECK-NEXT: v_mov_b32_e32 v24, 0
+; CHECK-NEXT: v_mov_b32_e32 v26, 0
+; CHECK-NEXT: v_mov_b32_e32 v28, 0
+; CHECK-NEXT: v_mov_b32_e32 v30, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <16 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <16 x i64> %arg0, %or
+ ret <16 x i64> %shl
+}
+
+; test inreg
+
+define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s16, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s4, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or i64 %shift_amt, 32
%shl = shl i64 %arg0, %or
ret i64 %shl
}
+define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s18, 32
+; CHECK-NEXT: s_or_b32 s5, s16, 32
+; CHECK-NEXT: s_sub_i32 s5, s5, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s5, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s4, v2
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <2 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <2 x i64> %arg0, %or
+ ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s20, 32
+; CHECK-NEXT: s_or_b32 s5, s18, 32
+; CHECK-NEXT: s_or_b32 s6, s16, 32
+; CHECK-NEXT: s_sub_i32 s6, s6, 32
+; CHECK-NEXT: s_sub_i32 s5, s5, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s6, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s5, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s4, v4
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <3 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <3 x i64> %arg0, %or
+ ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s22, 32
+; CHECK-NEXT: s_or_b32 s5, s20, 32
+; CHECK-NEXT: s_or_b32 s6, s18, 32
+; CHECK-NEXT: s_or_b32 s7, s16, 32
+; CHECK-NEXT: s_sub_i32 s7, s7, 32
+; CHECK-NEXT: s_sub_i32 s6, s6, 32
+; CHECK-NEXT: s_sub_i32 s5, s5, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s7, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s6, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s5, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s4, v6
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <4 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <4 x i64> %arg0, %or
+ ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v5_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s4, s24, 32
+; CHECK-NEXT: s_or_b32 s5, s22, 32
+; CHECK-NEXT: s_or_b32 s6, s20, 32
+; CHECK-NEXT: s_or_b32 s7, s18, 32
+; CHECK-NEXT: s_or_b32 s8, s16, 32
+; CHECK-NEXT: s_sub_i32 s8, s8, 32
+; CHECK-NEXT: s_sub_i32 s7, s7, 32
+; CHECK-NEXT: s_sub_i32 s6, s6, 32
+; CHECK-NEXT: s_sub_i32 s5, s5, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s8, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s7, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s6, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s5, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, s4, v8
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <5 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <5 x i64> %arg0, %or
+ ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v8_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_or_b32 s10, s16, 32
+; CHECK-NEXT: v_or_b32_e32 v15, 32, v16
+; CHECK-NEXT: s_or_b32 s4, s28, 32
+; CHECK-NEXT: s_or_b32 s5, s26, 32
+; CHECK-NEXT: s_or_b32 s6, s24, 32
+; CHECK-NEXT: s_or_b32 s7, s22, 32
+; CHECK-NEXT: s_or_b32 s8, s20, 32
+; CHECK-NEXT: s_or_b32 s9, s18, 32
+; CHECK-NEXT: s_sub_i32 s10, s10, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s10, v0
+; CHECK-NEXT: s_sub_i32 s9, s9, 32
+; CHECK-NEXT: s_sub_i32 s8, s8, 32
+; CHECK-NEXT: s_sub_i32 s7, s7, 32
+; CHECK-NEXT: s_sub_i32 s6, s6, 32
+; CHECK-NEXT: s_sub_i32 s5, s5, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v15
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s9, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s8, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s7, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, s6, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, s5, v10
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, s4, v12
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: v_mov_b32_e32 v10, 0
+; CHECK-NEXT: v_mov_b32_e32 v12, 0
+; CHECK-NEXT: v_mov_b32_e32 v14, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <8 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <8 x i64> %arg0, %or
+ ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v16_or32_inreg:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
+; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12
+; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
+; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28
+; CHECK-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:36
+; CHECK-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:44
+; CHECK-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:52
+; CHECK-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:60
+; CHECK-NEXT: s_or_b32 s7, s22, 32
+; CHECK-NEXT: s_or_b32 s8, s20, 32
+; CHECK-NEXT: s_or_b32 s9, s18, 32
+; CHECK-NEXT: s_or_b32 s10, s16, 32
+; CHECK-NEXT: s_sub_i32 s10, s10, 32
+; CHECK-NEXT: s_sub_i32 s9, s9, 32
+; CHECK-NEXT: s_sub_i32 s8, s8, 32
+; CHECK-NEXT: s_sub_i32 s7, s7, 32
+; CHECK-NEXT: s_or_b32 s4, s28, 32
+; CHECK-NEXT: s_or_b32 s5, s26, 32
+; CHECK-NEXT: s_or_b32 s6, s24, 32
+; CHECK-NEXT: s_sub_i32 s6, s6, 32
+; CHECK-NEXT: s_sub_i32 s5, s5, 32
+; CHECK-NEXT: s_sub_i32 s4, s4, 32
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, s5, v10
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, s4, v12
+; CHECK-NEXT: v_mov_b32_e32 v10, 0
+; CHECK-NEXT: v_mov_b32_e32 v12, 0
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v1
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v1, v14
+; CHECK-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:68
+; CHECK-NEXT: s_waitcnt vmcnt(7)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v3
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v17, v1, v16
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v5
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v19, v1, v18
+; CHECK-NEXT: s_waitcnt vmcnt(5)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v7
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v21, v1, v20
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_or_b32_e32 v1, 32, v9
+; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v23, v1, v22
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s10, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s9, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s8, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s7, v6
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_or_b32_e32 v2, 32, v29
+; CHECK-NEXT: v_or_b32_e32 v4, 32, v27
+; CHECK-NEXT: v_or_b32_e32 v6, 32, v25
+; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
+; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
+; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, s6, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v25, v6, v24
+; CHECK-NEXT: v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT: v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: v_mov_b32_e32 v16, 0
+; CHECK-NEXT: v_mov_b32_e32 v18, 0
+; CHECK-NEXT: v_mov_b32_e32 v20, 0
+; CHECK-NEXT: v_mov_b32_e32 v22, 0
+; CHECK-NEXT: v_mov_b32_e32 v24, 0
+; CHECK-NEXT: v_mov_b32_e32 v26, 0
+; CHECK-NEXT: v_mov_b32_e32 v28, 0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v0, 32, v14
+; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v31, v0, v30
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v14, 0
+; CHECK-NEXT: v_mov_b32_e32 v30, 0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %or = or <16 x i64> %shift_amt, splat (i64 32)
+ %shl = shl <16 x i64> %arg0, %or
+ ret <16 x i64> %shl
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Test range from max/min
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
; FIXME: This case should be reduced too, but computeKnownBits() cannot
; determine the range. Match current results for now.
+
define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
; CHECK-LABEL: shl_maxmin:
; CHECK: ; %bb.0:
@@ -80,3 +1060,412 @@ define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
%shl = shl i64 %arg0, %min
ret i64 %shl
}
+
+define <2 x i64> @shl_v2_maxmin(<2 x i64> noundef %arg0, <2 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v2_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v4, 32, v4, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v6, 32, v6, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, 63, v6, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v4, 63, v4, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %max = call <2 x i64> @llvm.umax.i64(<2 x i64> %arg1, <2 x i64> splat (i64 32))
+ %min = call <2 x i64> @llvm.umin.i64(<2 x i64> %max, <2 x i64> splat (i64 63))
+ %shl = shl <2 x i64> %arg0, %min
+ ret <2 x i64> %shl
+}
+
+define <3 x i64> @shl_v3_maxmin(<3 x i64> noundef %arg0, <3 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v3_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v6, 32, v6, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[8:9]
+; CHECK-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v8, 32, v8, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT: v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT: v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v10, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v8, 63, v8, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[6:7]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v8, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v6, 63, v6, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v6, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %max = call <3 x i64> @llvm.umax.i64(<3 x i64> %arg1, <3 x i64> splat (i64 32))
+ %min = call <3 x i64> @llvm.umin.i64(<3 x i64> %max, <3 x i64> splat (i64 63))
+ %shl = shl <3 x i64> %arg0, %min
+ ret <3 x i64> %shl
+}
+
+define <4 x i64> @shl_v4_maxmin(<4 x i64> noundef %arg0, <4 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v4_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[8:9]
+; CHECK-NEXT: v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v8, 32, v8, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT: v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[12:13]
+; CHECK-NEXT: v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v12, 32, v12, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[14:15]
+; CHECK-NEXT: v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v14, 32, v14, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[14:15]
+; CHECK-NEXT: v_cndmask_b32_e32 v14, 63, v14, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[12:13]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v14, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v12, 63, v12, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v12, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v10, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v8, 63, v8, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %max = call <4 x i64> @llvm.umax.i64(<4 x i64> %arg1, <4 x i64> splat (i64 32))
+ %min = call <4 x i64> @llvm.umin.i64(<4 x i64> %max, <4 x i64> splat (i64 63))
+ %shl = shl <4 x i64> %arg0, %min
+ ret <4 x i64> %shl
+}
+
+define <5 x i64> @shl_v5_maxmin(<5 x i64> noundef %arg0, <5 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v5_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[10:11]
+; CHECK-NEXT: v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v10, 32, v10, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[12:13]
+; CHECK-NEXT: v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v12, 32, v12, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[14:15]
+; CHECK-NEXT: v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v14, 32, v14, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[16:17]
+; CHECK-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v16, 32, v16, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[18:19]
+; CHECK-NEXT: v_cndmask_b32_e32 v19, 0, v19, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v18, 32, v18, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[18:19]
+; CHECK-NEXT: v_cndmask_b32_e32 v18, 63, v18, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v18, v[8:9]
+; CHECK-NEXT: v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[14:15]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v16, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v14, 63, v14, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[12:13]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v14, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v12, 63, v12, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[10:11]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v12, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v10, 63, v10, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v10, v[0:1]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %max = call <5 x i64> @llvm.umax.i64(<5 x i64> %arg1, <5 x i64> splat (i64 32))
+ %min = call <5 x i64> @llvm.umin.i64(<5 x i64> %max, <5 x i64> splat (i64 63))
+ %shl = shl <5 x i64> %arg0, %min
+ ret <5 x i64> %shl
+}
+
+define <8 x i64> @shl_v8_maxmin(<8 x i64> noundef %arg0, <8 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v8_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[16:17]
+; CHECK-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v16, 32, v16, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[18:19]
+; CHECK-NEXT: v_cndmask_b32_e32 v19, 0, v19, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v18, 32, v18, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[20:21]
+; CHECK-NEXT: v_cndmask_b32_e32 v21, 0, v21, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v20, 32, v20, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[22:23]
+; CHECK-NEXT: v_cndmask_b32_e32 v23, 0, v23, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v22, 32, v22, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[24:25]
+; CHECK-NEXT: v_cndmask_b32_e32 v25, 0, v25, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v24, 32, v24, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[26:27]
+; CHECK-NEXT: v_cndmask_b32_e32 v27, 0, v27, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v26, 32, v26, vcc
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[28:29]
+; CHECK-NEXT: v_cndmask_b32_e32 v29, 0, v29, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v28, 32, v28, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[28:29]
+; CHECK-NEXT: v_cndmask_b32_e32 v28, 63, v28, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[26:27]
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], v28, v[12:13]
+; CHECK-NEXT: v_cndmask_b32_e32 v26, 63, v26, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[24:25]
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], v26, v[10:11]
+; CHECK-NEXT: v_cndmask_b32_e32 v24, 63, v24, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[22:23]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v24, v[8:9]
+; CHECK-NEXT: v_cndmask_b32_e32 v22, 63, v22, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[20:21]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v22, v[6:7]
+; CHECK-NEXT: v_cndmask_b32_e32 v20, 63, v20, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[18:19]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v20, v[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v18, 63, v18, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v18, v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v16, v[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[30:31]
+; CHECK-NEXT: v_cndmask_b32_e32 v17, 0, v31, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v16, 32, v30, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[16:17]
+; CHECK-NEXT: v_cndmask_b32_e32 v16, 63, v16, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v16, v[14:15]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %max = call <8 x i64> @llvm.umax.i64(<8 x i64> %arg1, <8 x i64> splat (i64 32))
+ %min = call <8 x i64> @llvm.umin.i64(<8 x i64> %max, <8 x i64> splat (i64 63))
+ %shl = shl <8 x i64> %arg0, %min
+ ret <8 x i64> %shl
+}
+
+define <16 x i64> @shl_v16_maxmin(<16 x i64> noundef %arg0, <16 x i64> noundef %arg1) {
+; CHECK-LABEL: shl_v16_maxmin:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; CHECK-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:16
+; CHECK-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12
+; CHECK-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:24
+; CHECK-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20
+; CHECK-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32
+; CHECK-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:28
+; CHECK-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36
+; CHECK-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:40
+; CHECK-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:48
+; CHECK-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:44
+; CHECK-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56
+; CHECK-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:52
+; CHECK-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:64
+; CHECK-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
+; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:68
+; CHECK-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72
+; CHECK-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:80
+; CHECK-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76
+; CHECK-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:88
+; CHECK-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:84
+; CHECK-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:96
+; CHECK-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:92
+; CHECK-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:100
+; CHECK-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:104
+; CHECK-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:112
+; CHECK-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:108
+; CHECK-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:120
+; CHECK-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:116
+; CHECK-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:128
+; CHECK-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:124
+; CHECK-NEXT: v_mov_b32_e32 v29, v27
+; CHECK-NEXT: v_mov_b32_e32 v28, v26
+; CHECK-NEXT: v_mov_b32_e32 v27, v25
+; CHECK-NEXT: v_mov_b32_e32 v26, v24
+; CHECK-NEXT: v_mov_b32_e32 v25, v23
+; CHECK-NEXT: v_mov_b32_e32 v24, v22
+; CHECK-NEXT: v_mov_b32_e32 v23, v21
+; CHECK-NEXT: v_mov_b32_e32 v22, v20
+; CHECK-NEXT: v_mov_b32_e32 v21, v19
+; CHECK-NEXT: v_mov_b32_e32 v20, v18
+; CHECK-NEXT: v_mov_b32_e32 v19, v17
+; CHECK-NEXT: v_mov_b32_e32 v18, v16
+; CHECK-NEXT: v_mov_b32_e32 v17, v15
+; CHECK-NEXT: v_mov_b32_e32 v16, v14
+; CHECK-NEXT: v_mov_b32_e32 v15, v13
+; CHECK-NEXT: v_mov_b32_e32 v14, v12
+; CHECK-NEXT: v_mov_b32_e32 v13, v11
+; CHECK-NEXT: v_mov_b32_e32 v12, v10
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_mov_b32_e32 v9, v7
+; CHECK-NEXT: v_mov_b32_e32 v8, v6
+; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v5, v3
+; CHECK-NEXT: v_mov_b32_e32 v4, v2
+; CHECK-NEXT: v_mov_b32_e32 v3, v1
+; CHECK-NEXT: v_mov_b32_e32 v2, v0
+; CHECK-NEXT: s_waitcnt vmcnt(30)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[31:32]
+; CHECK-NEXT: v_cndmask_b32_e32 v33, 0, v32, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v32, 32, v31, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(26)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[36:37]
+; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[34:35]
+; CHECK-NEXT: v_cndmask_b32_e32 v37, 0, v37, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v36, 32, v36, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(22)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[48:49]
+; CHECK-NEXT: v_cndmask_b32_e64 v35, 0, v35, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v49, 0, v49, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v48, 32, v48, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(18)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[52:53]
+; CHECK-NEXT: v_cndmask_b32_e64 v34, 32, v34, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v53, 0, v53, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v52, 32, v52, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(14)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[40:41]
+; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[38:39]
+; CHECK-NEXT: v_cndmask_b32_e32 v41, 0, v41, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v40, 32, v40, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(12)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[42:43]
+; CHECK-NEXT: v_cndmask_b32_e64 v39, 0, v39, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v43, 0, v43, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v42, 32, v42, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(10)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[44:45]
+; CHECK-NEXT: v_cndmask_b32_e64 v38, 32, v38, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v45, 0, v45, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v44, 32, v44, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(8)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[46:47]
+; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[50:51]
+; CHECK-NEXT: v_cndmask_b32_e32 v47, 0, v47, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v46, 32, v46, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(6)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[56:57]
+; CHECK-NEXT: v_cndmask_b32_e64 v51, 0, v51, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v1, 0, v57, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v0, 32, v56, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[58:59]
+; CHECK-NEXT: v_cndmask_b32_e64 v50, 32, v50, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v59, 0, v59, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v58, 32, v58, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[60:61]
+; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[54:55]
+; CHECK-NEXT: v_cndmask_b32_e32 v61, 0, v61, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v60, 32, v60, vcc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[62:63]
+; CHECK-NEXT: v_cndmask_b32_e64 v55, 0, v55, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v57, 0, v63, vcc
+; CHECK-NEXT: v_cndmask_b32_e32 v56, 32, v62, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[56:57]
+; CHECK-NEXT: v_cndmask_b32_e64 v54, 32, v54, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e32 v56, 63, v56, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[60:61]
+; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; CHECK-NEXT: v_cndmask_b32_e32 v57, 63, v60, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[58:59]
+; CHECK-NEXT: v_cndmask_b32_e32 v58, 63, v58, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[0:1]
+; CHECK-NEXT: v_cndmask_b32_e32 v59, 63, v0, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[46:47]
+; CHECK-NEXT: v_cndmask_b32_e32 v46, 63, v46, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[44:45]
+; CHECK-NEXT: v_cndmask_b32_e32 v44, 63, v44, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[42:43]
+; CHECK-NEXT: v_cndmask_b32_e32 v42, 63, v42, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[40:41]
+; CHECK-NEXT: v_cndmask_b32_e32 v40, 63, v40, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[54:55]
+; CHECK-NEXT: v_cndmask_b32_e32 v54, 63, v54, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[52:53]
+; CHECK-NEXT: v_cndmask_b32_e32 v52, 63, v52, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[50:51]
+; CHECK-NEXT: v_cndmask_b32_e32 v50, 63, v50, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[48:49]
+; CHECK-NEXT: v_cndmask_b32_e32 v48, 63, v48, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[38:39]
+; CHECK-NEXT: v_cndmask_b32_e32 v38, 63, v38, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[36:37]
+; CHECK-NEXT: v_cndmask_b32_e32 v36, 63, v36, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[34:35]
+; CHECK-NEXT: v_cndmask_b32_e32 v34, 63, v34, vcc
+; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[32:33]
+; CHECK-NEXT: v_cndmask_b32_e32 v0, 63, v32, vcc
+; CHECK-NEXT: v_lshlrev_b64 v[0:1], v0, v[2:3]
+; CHECK-NEXT: v_lshlrev_b64 v[2:3], v34, v[4:5]
+; CHECK-NEXT: v_lshlrev_b64 v[4:5], v36, v[6:7]
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v38, v[8:9]
+; CHECK-NEXT: v_lshlrev_b64 v[8:9], v48, v[10:11]
+; CHECK-NEXT: v_lshlrev_b64 v[10:11], v50, v[12:13]
+; CHECK-NEXT: v_lshlrev_b64 v[12:13], v52, v[14:15]
+; CHECK-NEXT: v_lshlrev_b64 v[14:15], v54, v[16:17]
+; CHECK-NEXT: v_lshlrev_b64 v[16:17], v40, v[18:19]
+; CHECK-NEXT: v_lshlrev_b64 v[18:19], v42, v[20:21]
+; CHECK-NEXT: v_lshlrev_b64 v[20:21], v44, v[22:23]
+; CHECK-NEXT: v_lshlrev_b64 v[22:23], v46, v[24:25]
+; CHECK-NEXT: v_lshlrev_b64 v[24:25], v59, v[26:27]
+; CHECK-NEXT: v_lshlrev_b64 v[26:27], v58, v[28:29]
+; CHECK-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshlrev_b64 v[30:31], v56, v[30:31]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshlrev_b64 v[28:29], v57, v[28:29]
+; CHECK-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %max = call <16 x i64> @llvm.umax.i64(<16 x i64> %arg1, <16 x i64> splat (i64 32))
+ %min = call <16 x i64> @llvm.umin.i64(<16 x i64> %max, <16 x i64> splat (i64 63))
+ %shl = shl <16 x i64> %arg0, %min
+ ret <16 x i64> %shl
+}
>From 862c784132611675e09703a24b1956255dd5b1c3 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 6 Feb 2025 15:29:13 -0600
Subject: [PATCH 04/12] Use & instead of sub to adjust shift
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 10 +-
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 399 ++++++------------
2 files changed, 128 insertions(+), 281 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d7c004e1308c7..33e6b64cddcd7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4047,16 +4047,16 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
if (!CRHS) {
- // shl i64 X, Y -> [0, shl i32 X, (Y - 32)]
+ // shl i64 X, Y -> [0, shl i32 X, (Y & 0x1F)]
if (VT == MVT::i64) {
KnownBits Known = DAG.computeKnownBits(RHS);
if (Known.getMinValue().getZExtValue() >= 32) {
SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, RHS);
- const SDValue C32 = DAG.getConstant(32, SL, MVT::i32);
- SDValue ShiftAmt =
- DAG.getNode(ISD::SUB, SL, MVT::i32, truncShiftAmt, C32);
+ const SDValue C31 = DAG.getConstant(31, SL, MVT::i32);
+ SDValue MaskedShiftAmt =
+ DAG.getNode(ISD::AND, SL, MVT::i32, truncShiftAmt, C31);
SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
- SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
+ SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, MaskedShiftAmt);
const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index a49694d8da5a8..c3d11d632df8d 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -5,7 +5,7 @@
;;
;; where Y is in the range [63-32] to:
;;
-;; DST = [0, shl i32 X, (Y - 32)]
+;; DST = [0, shl i32 X, (Y & 0x1F)]
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
@@ -541,9 +541,7 @@ define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
; CHECK-LABEL: shl_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v2
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v2, v0
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or i64 %shift_amt, 32
@@ -555,12 +553,8 @@ define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
; CHECK-LABEL: shl_v2_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v4
-; CHECK-NEXT: v_or_b32_e32 v3, 32, v6
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v4, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v6, v2
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -573,15 +567,9 @@ define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
; CHECK-LABEL: shl_v3_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v6
-; CHECK-NEXT: v_or_b32_e32 v3, 32, v8
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_or_b32_e32 v5, 32, v10
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v6, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v8, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v10, v4
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -595,18 +583,10 @@ define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
; CHECK-LABEL: shl_v4_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v8
-; CHECK-NEXT: v_or_b32_e32 v3, 32, v10
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_or_b32_e32 v5, 32, v12
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT: v_or_b32_e32 v7, 32, v14
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v7
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v8, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v10, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v12, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v14, v6
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -621,21 +601,11 @@ define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
; CHECK-LABEL: shl_v5_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v10
-; CHECK-NEXT: v_or_b32_e32 v3, 32, v12
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_or_b32_e32 v5, 32, v14
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT: v_or_b32_e32 v7, 32, v16
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT: v_or_b32_e32 v9, 32, v18
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v7
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v9
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v10, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v12, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v14, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v16, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, v18, v8
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -651,30 +621,14 @@ define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
; CHECK-LABEL: shl_v8_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v16
-; CHECK-NEXT: v_or_b32_e32 v3, 32, v18
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_or_b32_e32 v5, 32, v20
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v3
-; CHECK-NEXT: v_or_b32_e32 v7, 32, v22
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v5
-; CHECK-NEXT: v_or_b32_e32 v9, 32, v24
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v7
-; CHECK-NEXT: v_or_b32_e32 v11, 32, v26
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v9
-; CHECK-NEXT: v_or_b32_e32 v13, 32, v28
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, v0, v8
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v11
-; CHECK-NEXT: v_or_b32_e32 v15, 32, v30
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, v0, v10
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v13
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, v0, v12
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v15
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, v16, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v18, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v20, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v22, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, v24, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, v26, v10
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, v28, v12
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v30, v14
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -697,93 +651,63 @@ define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt)
; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12
; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28
-; CHECK-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:60
-; CHECK-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:52
-; CHECK-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:44
-; CHECK-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(7)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v1
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: s_waitcnt vmcnt(3)
; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: s_waitcnt vmcnt(6)
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v3
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v0, v2
-; CHECK-NEXT: s_waitcnt vmcnt(5)
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v5
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v0, v4
-; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v7
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v0, v6
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_or_b32_e32 v2, 32, v11
-; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: v_or_b32_e32 v4, 32, v13
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v6, 32, v15
-; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v9
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, v6, v8
-; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:68
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, v4, v10
-; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:76
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, v2, v12
-; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v0, v14
-; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:92
-; CHECK-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:124
-; CHECK-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:116
-; CHECK-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:108
-; CHECK-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:100
-; CHECK-NEXT: s_waitcnt vmcnt(7)
-; CHECK-NEXT: v_or_b32_e32 v6, 32, v6
-; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT: s_waitcnt vmcnt(6)
-; CHECK-NEXT: v_or_b32_e32 v4, 32, v4
-; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v0
-; CHECK-NEXT: v_or_b32_e32 v2, 32, v2
-; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v17, v6, v16
-; CHECK-NEXT: v_lshlrev_b32_e32 v19, v4, v18
-; CHECK-NEXT: v_lshlrev_b32_e32 v21, v2, v20
-; CHECK-NEXT: v_lshlrev_b32_e32 v23, v0, v22
+; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v8
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_or_b32_e32 v2, 32, v10
-; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: v_or_b32_e32 v4, 32, v12
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v6, 32, v14
-; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v25, v6, v24
-; CHECK-NEXT: v_lshlrev_b32_e32 v27, v4, v26
-; CHECK-NEXT: v_lshlrev_b32_e32 v29, v2, v28
-; CHECK-NEXT: v_lshlrev_b32_e32 v31, v0, v30
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, v3, v2
+; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:44
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, v5, v4
+; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:52
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, v7, v6
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:60
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, v0, v8
+; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:68
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, v2, v10
+; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, v4, v12
+; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v6, v14
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92
; CHECK-NEXT: v_mov_b32_e32 v8, 0
; CHECK-NEXT: v_mov_b32_e32 v10, 0
; CHECK-NEXT: v_mov_b32_e32 v12, 0
; CHECK-NEXT: v_mov_b32_e32 v14, 0
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v17, v0, v16
+; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v19, v2, v18
; CHECK-NEXT: v_mov_b32_e32 v16, 0
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshlrev_b32_e32 v21, v4, v20
+; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116
+; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:108
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v23, v6, v22
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:124
; CHECK-NEXT: v_mov_b32_e32 v18, 0
; CHECK-NEXT: v_mov_b32_e32 v20, 0
; CHECK-NEXT: v_mov_b32_e32 v22, 0
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v25, v0, v24
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v24, 0
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_lshlrev_b32_e32 v27, v4, v26
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshlrev_b32_e32 v31, v6, v30
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v6, 0
; CHECK-NEXT: v_mov_b32_e32 v26, 0
; CHECK-NEXT: v_mov_b32_e32 v28, 0
; CHECK-NEXT: v_mov_b32_e32 v30, 0
@@ -799,9 +723,7 @@ define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
; CHECK-LABEL: shl_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s16, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s4, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or i64 %shift_amt, 32
@@ -813,12 +735,8 @@ define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %sh
; CHECK-LABEL: shl_v2_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s18, 32
-; CHECK-NEXT: s_or_b32 s5, s16, 32
-; CHECK-NEXT: s_sub_i32 s5, s5, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s5, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s4, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -831,15 +749,9 @@ define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %sh
; CHECK-LABEL: shl_v3_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s20, 32
-; CHECK-NEXT: s_or_b32 s5, s18, 32
-; CHECK-NEXT: s_or_b32 s6, s16, 32
-; CHECK-NEXT: s_sub_i32 s6, s6, 32
-; CHECK-NEXT: s_sub_i32 s5, s5, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s6, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s5, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s4, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -853,18 +765,10 @@ define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %sh
; CHECK-LABEL: shl_v4_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s22, 32
-; CHECK-NEXT: s_or_b32 s5, s20, 32
-; CHECK-NEXT: s_or_b32 s6, s18, 32
-; CHECK-NEXT: s_or_b32 s7, s16, 32
-; CHECK-NEXT: s_sub_i32 s7, s7, 32
-; CHECK-NEXT: s_sub_i32 s6, s6, 32
-; CHECK-NEXT: s_sub_i32 s5, s5, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s7, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s6, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s5, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s4, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -879,21 +783,11 @@ define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %sh
; CHECK-LABEL: shl_v5_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s24, 32
-; CHECK-NEXT: s_or_b32 s5, s22, 32
-; CHECK-NEXT: s_or_b32 s6, s20, 32
-; CHECK-NEXT: s_or_b32 s7, s18, 32
-; CHECK-NEXT: s_or_b32 s8, s16, 32
-; CHECK-NEXT: s_sub_i32 s8, s8, 32
-; CHECK-NEXT: s_sub_i32 s7, s7, 32
-; CHECK-NEXT: s_sub_i32 s6, s6, 32
-; CHECK-NEXT: s_sub_i32 s5, s5, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s8, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s7, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s6, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s5, v6
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, s4, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, s24, v8
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -909,30 +803,14 @@ define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %sh
; CHECK-LABEL: shl_v8_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s10, s16, 32
-; CHECK-NEXT: v_or_b32_e32 v15, 32, v16
-; CHECK-NEXT: s_or_b32 s4, s28, 32
-; CHECK-NEXT: s_or_b32 s5, s26, 32
-; CHECK-NEXT: s_or_b32 s6, s24, 32
-; CHECK-NEXT: s_or_b32 s7, s22, 32
-; CHECK-NEXT: s_or_b32 s8, s20, 32
-; CHECK-NEXT: s_or_b32 s9, s18, 32
-; CHECK-NEXT: s_sub_i32 s10, s10, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s10, v0
-; CHECK-NEXT: s_sub_i32 s9, s9, 32
-; CHECK-NEXT: s_sub_i32 s8, s8, 32
-; CHECK-NEXT: s_sub_i32 s7, s7, 32
-; CHECK-NEXT: s_sub_i32 s6, s6, 32
-; CHECK-NEXT: s_sub_i32 s5, s5, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v15
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s9, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s8, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s7, v6
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, s6, v8
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, s5, v10
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, s4, v12
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v0, v14
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, s26, v10
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, s28, v12
+; CHECK-NEXT: v_lshlrev_b32_e32 v15, v16, v14
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
@@ -955,68 +833,43 @@ define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg
; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12
; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28
-; CHECK-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:36
-; CHECK-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:44
-; CHECK-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:52
-; CHECK-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:60
-; CHECK-NEXT: s_or_b32 s7, s22, 32
-; CHECK-NEXT: s_or_b32 s8, s20, 32
-; CHECK-NEXT: s_or_b32 s9, s18, 32
-; CHECK-NEXT: s_or_b32 s10, s16, 32
-; CHECK-NEXT: s_sub_i32 s10, s10, 32
-; CHECK-NEXT: s_sub_i32 s9, s9, 32
-; CHECK-NEXT: s_sub_i32 s8, s8, 32
-; CHECK-NEXT: s_sub_i32 s7, s7, 32
-; CHECK-NEXT: s_or_b32 s4, s28, 32
-; CHECK-NEXT: s_or_b32 s5, s26, 32
-; CHECK-NEXT: s_or_b32 s6, s24, 32
-; CHECK-NEXT: s_sub_i32 s6, s6, 32
-; CHECK-NEXT: s_sub_i32 s5, s5, 32
-; CHECK-NEXT: s_sub_i32 s4, s4, 32
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, s5, v10
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, s4, v12
+; CHECK-NEXT: v_lshlrev_b32_e32 v9, s24, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v11, s26, v10
+; CHECK-NEXT: v_lshlrev_b32_e32 v13, s28, v12
+; CHECK-NEXT: v_mov_b32_e32 v8, 0
; CHECK-NEXT: v_mov_b32_e32 v10, 0
; CHECK-NEXT: v_mov_b32_e32 v12, 0
-; CHECK-NEXT: s_waitcnt vmcnt(7)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v1
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
+; CHECK-NEXT: s_waitcnt vmcnt(3)
; CHECK-NEXT: v_lshlrev_b32_e32 v15, v1, v14
-; CHECK-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:68
-; CHECK-NEXT: s_waitcnt vmcnt(7)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v3
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_lshlrev_b32_e32 v17, v1, v16
-; CHECK-NEXT: s_waitcnt vmcnt(6)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v5
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_lshlrev_b32_e32 v19, v1, v18
-; CHECK-NEXT: s_waitcnt vmcnt(5)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v7
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_lshlrev_b32_e32 v21, v1, v20
+; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:36
+; CHECK-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:44
; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: v_or_b32_e32 v1, 32, v9
-; CHECK-NEXT: v_subrev_u32_e32 v1, 32, v1
-; CHECK-NEXT: v_lshlrev_b32_e32 v23, v1, v22
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s10, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s9, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s8, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s7, v6
-; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: v_or_b32_e32 v2, 32, v29
-; CHECK-NEXT: v_or_b32_e32 v4, 32, v27
-; CHECK-NEXT: v_or_b32_e32 v6, 32, v25
-; CHECK-NEXT: v_subrev_u32_e32 v6, 32, v6
-; CHECK-NEXT: v_subrev_u32_e32 v4, 32, v4
-; CHECK-NEXT: v_subrev_u32_e32 v2, 32, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, s6, v8
-; CHECK-NEXT: v_lshlrev_b32_e32 v25, v6, v24
-; CHECK-NEXT: v_lshlrev_b32_e32 v27, v4, v26
-; CHECK-NEXT: v_lshlrev_b32_e32 v29, v2, v28
+; CHECK-NEXT: v_lshlrev_b32_e32 v17, v3, v16
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v19, v5, v18
+; CHECK-NEXT: s_waitcnt vmcnt(2)
+; CHECK-NEXT: v_lshlrev_b32_e32 v21, v7, v20
+; CHECK-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:68
+; CHECK-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:60
+; CHECK-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:52
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v4, 0
; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
+; CHECK-NEXT: s_waitcnt vmcnt(4)
+; CHECK-NEXT: v_lshlrev_b32_e32 v23, v1, v22
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: s_waitcnt vmcnt(3)
+; CHECK-NEXT: v_lshlrev_b32_e32 v25, v14, v24
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v14, 0
+; CHECK-NEXT: s_waitcnt vmcnt(1)
+; CHECK-NEXT: v_lshlrev_b32_e32 v29, v16, v28
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshlrev_b32_e32 v27, v18, v26
+; CHECK-NEXT: v_lshlrev_b32_e32 v31, v20, v30
; CHECK-NEXT: v_mov_b32_e32 v16, 0
; CHECK-NEXT: v_mov_b32_e32 v18, 0
; CHECK-NEXT: v_mov_b32_e32 v20, 0
@@ -1024,12 +877,6 @@ define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg
; CHECK-NEXT: v_mov_b32_e32 v24, 0
; CHECK-NEXT: v_mov_b32_e32 v26, 0
; CHECK-NEXT: v_mov_b32_e32 v28, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v0, 32, v14
-; CHECK-NEXT: v_subrev_u32_e32 v0, 32, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v31, v0, v30
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v14, 0
; CHECK-NEXT: v_mov_b32_e32 v30, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <16 x i64> %shift_amt, splat (i64 32)
>From 40e6f589f1df8b6d3f71ae8a197742cc77ca8738 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 7 Feb 2025 08:38:33 -0600
Subject: [PATCH 05/12] Remove noundef from shift data parameter
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 84 ++++++++++++------------
1 file changed, 42 insertions(+), 42 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index c3d11d632df8d..99062b1f9ae79 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -17,7 +17,7 @@
; determine the minimum from metadata in this case. Match current results
; for now.
-define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
+define i64 @shl_metadata(i64 %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -30,7 +30,7 @@ define i64 @shl_metadata(i64 noundef %arg0, ptr %arg1.ptr) {
ret i64 %shl
}
-define <2 x i64> @shl_v2_metadata(<2 x i64> noundef %arg0, ptr %arg1.ptr) {
+define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v2_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -44,7 +44,7 @@ define <2 x i64> @shl_v2_metadata(<2 x i64> noundef %arg0, ptr %arg1.ptr) {
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_metadata(<3 x i64> noundef %arg0, ptr %arg1.ptr) {
+define <3 x i64> @shl_v3_metadata(<3 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v3_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -60,7 +60,7 @@ define <3 x i64> @shl_v3_metadata(<3 x i64> noundef %arg0, ptr %arg1.ptr) {
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_metadata(<4 x i64> noundef %arg0, ptr %arg1.ptr) {
+define <4 x i64> @shl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v4_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -79,7 +79,7 @@ define <4 x i64> @shl_v4_metadata(<4 x i64> noundef %arg0, ptr %arg1.ptr) {
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_metadata(<5 x i64> noundef %arg0, ptr %arg1.ptr) {
+define <5 x i64> @shl_v5_metadata(<5 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v5_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -100,7 +100,7 @@ define <5 x i64> @shl_v5_metadata(<5 x i64> noundef %arg0, ptr %arg1.ptr) {
ret <5 x i64> %shl
}
-define <8 x i64> @shl_v8_metadata(<8 x i64> noundef %arg0, ptr %arg1.ptr) {
+define <8 x i64> @shl_v8_metadata(<8 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v8_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -126,7 +126,7 @@ define <8 x i64> @shl_v8_metadata(<8 x i64> noundef %arg0, ptr %arg1.ptr) {
ret <8 x i64> %shl
}
-define <16 x i64> @shl_v16_metadata(<16 x i64> noundef %arg0, ptr %arg1.ptr) {
+define <16 x i64> @shl_v16_metadata(<16 x i64> %arg0, ptr %arg1.ptr) {
; CHECK-LABEL: shl_v16_metadata:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -180,7 +180,7 @@ define <16 x i64> @shl_v16_metadata(<16 x i64> noundef %arg0, ptr %arg1.ptr) {
; These cases must not be reduced because the known minimum, 16, is not in range.
-define i64 @shl_or16(i64 noundef %arg0, i64 %shift_amt) {
+define i64 @shl_or16(i64 %arg0, i64 %shift_amt) {
; CHECK-LABEL: shl_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -192,7 +192,7 @@ define i64 @shl_or16(i64 noundef %arg0, i64 %shift_amt) {
ret i64 %shl
}
-define <2 x i64> @shl_v2_or16(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+define <2 x i64> @shl_v2_or16(<2 x i64> %arg0, <2 x i64> %shift_amt) {
; CHECK-LABEL: shl_v2_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -206,7 +206,7 @@ define <2 x i64> @shl_v2_or16(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_or16(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+define <3 x i64> @shl_v3_or16(<3 x i64> %arg0, <3 x i64> %shift_amt) {
; CHECK-LABEL: shl_v3_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -222,7 +222,7 @@ define <3 x i64> @shl_v3_or16(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_or16(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+define <4 x i64> @shl_v4_or16(<4 x i64> %arg0, <4 x i64> %shift_amt) {
; CHECK-LABEL: shl_v4_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -240,7 +240,7 @@ define <4 x i64> @shl_v4_or16(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or16(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+define <5 x i64> @shl_v5_or16(<5 x i64> %arg0, <5 x i64> %shift_amt) {
; CHECK-LABEL: shl_v5_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -260,7 +260,7 @@ define <5 x i64> @shl_v5_or16(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
ret <5 x i64> %shl
}
-define <8 x i64> @shl_v8_or16(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+define <8 x i64> @shl_v8_or16(<8 x i64> %arg0, <8 x i64> %shift_amt) {
; CHECK-LABEL: shl_v8_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -286,7 +286,7 @@ define <8 x i64> @shl_v8_or16(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
ret <8 x i64> %shl
}
-define <16 x i64> @shl_v16_or16(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+define <16 x i64> @shl_v16_or16(<16 x i64> %arg0, <16 x i64> %shift_amt) {
; CHECK-LABEL: shl_v16_or16:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -363,7 +363,7 @@ define <16 x i64> @shl_v16_or16(<16 x i64> noundef %arg0, <16 x i64> %shift_amt)
; test inreg
-define i64 @shl_or16_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+define i64 @shl_or16_inreg(i64 %arg0, i64 inreg %shift_amt) {
; CHECK-LABEL: shl_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -375,7 +375,7 @@ define i64 @shl_or16_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
ret i64 %shl
}
-define <2 x i64> @shl_v2_or16_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+define <2 x i64> @shl_v2_or16_inreg(<2 x i64> %arg0, <2 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v2_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -389,7 +389,7 @@ define <2 x i64> @shl_v2_or16_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %sh
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_or16_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+define <3 x i64> @shl_v3_or16_inreg(<3 x i64> %arg0, <3 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v3_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -405,7 +405,7 @@ define <3 x i64> @shl_v3_or16_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %sh
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_or16_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+define <4 x i64> @shl_v4_or16_inreg(<4 x i64> %arg0, <4 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v4_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -423,7 +423,7 @@ define <4 x i64> @shl_v4_or16_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %sh
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or16_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+define <5 x i64> @shl_v5_or16_inreg(<5 x i64> %arg0, <5 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v5_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -443,7 +443,7 @@ define <5 x i64> @shl_v5_or16_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %sh
ret <5 x i64> %shl
}
-define <8 x i64> @shl_v8_or16_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+define <8 x i64> @shl_v8_or16_inreg(<8 x i64> %arg0, <8 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v8_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -469,7 +469,7 @@ define <8 x i64> @shl_v8_or16_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %sh
ret <8 x i64> %shl
}
-define <16 x i64> @shl_v16_or16_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+define <16 x i64> @shl_v16_or16_inreg(<16 x i64> %arg0, <16 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v16_or16_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -537,7 +537,7 @@ define <16 x i64> @shl_v16_or16_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg
; These cases are reduced because computeKnownBits() can calculate a minimum of 32
; based on the OR with 32.
-define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
+define i64 @shl_or32(i64 %arg0, i64 %shift_amt) {
; CHECK-LABEL: shl_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -549,7 +549,7 @@ define i64 @shl_or32(i64 noundef %arg0, i64 %shift_amt) {
ret i64 %shl
}
-define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
+define <2 x i64> @shl_v2_or32(<2 x i64> %arg0, <2 x i64> %shift_amt) {
; CHECK-LABEL: shl_v2_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -563,7 +563,7 @@ define <2 x i64> @shl_v2_or32(<2 x i64> noundef %arg0, <2 x i64> %shift_amt) {
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
+define <3 x i64> @shl_v3_or32(<3 x i64> %arg0, <3 x i64> %shift_amt) {
; CHECK-LABEL: shl_v3_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -579,7 +579,7 @@ define <3 x i64> @shl_v3_or32(<3 x i64> noundef %arg0, <3 x i64> %shift_amt) {
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
+define <4 x i64> @shl_v4_or32(<4 x i64> %arg0, <4 x i64> %shift_amt) {
; CHECK-LABEL: shl_v4_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -597,7 +597,7 @@ define <4 x i64> @shl_v4_or32(<4 x i64> noundef %arg0, <4 x i64> %shift_amt) {
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
+define <5 x i64> @shl_v5_or32(<5 x i64> %arg0, <5 x i64> %shift_amt) {
; CHECK-LABEL: shl_v5_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -617,7 +617,7 @@ define <5 x i64> @shl_v5_or32(<5 x i64> noundef %arg0, <5 x i64> %shift_amt) {
ret <5 x i64> %shl
}
-define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
+define <8 x i64> @shl_v8_or32(<8 x i64> %arg0, <8 x i64> %shift_amt) {
; CHECK-LABEL: shl_v8_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -643,7 +643,7 @@ define <8 x i64> @shl_v8_or32(<8 x i64> noundef %arg0, <8 x i64> %shift_amt) {
ret <8 x i64> %shl
}
-define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt) {
+define <16 x i64> @shl_v16_or32(<16 x i64> %arg0, <16 x i64> %shift_amt) {
; CHECK-LABEL: shl_v16_or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -719,7 +719,7 @@ define <16 x i64> @shl_v16_or32(<16 x i64> noundef %arg0, <16 x i64> %shift_amt)
; test inreg
-define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
+define i64 @shl_or32_inreg(i64 %arg0, i64 inreg %shift_amt) {
; CHECK-LABEL: shl_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -731,7 +731,7 @@ define i64 @shl_or32_inreg(i64 noundef %arg0, i64 inreg %shift_amt) {
ret i64 %shl
}
-define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %shift_amt) {
+define <2 x i64> @shl_v2_or32_inreg(<2 x i64> %arg0, <2 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v2_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -745,7 +745,7 @@ define <2 x i64> @shl_v2_or32_inreg(<2 x i64> noundef %arg0, <2 x i64> inreg %sh
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %shift_amt) {
+define <3 x i64> @shl_v3_or32_inreg(<3 x i64> %arg0, <3 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v3_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -761,7 +761,7 @@ define <3 x i64> @shl_v3_or32_inreg(<3 x i64> noundef %arg0, <3 x i64> inreg %sh
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %shift_amt) {
+define <4 x i64> @shl_v4_or32_inreg(<4 x i64> %arg0, <4 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v4_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -779,7 +779,7 @@ define <4 x i64> @shl_v4_or32_inreg(<4 x i64> noundef %arg0, <4 x i64> inreg %sh
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %shift_amt) {
+define <5 x i64> @shl_v5_or32_inreg(<5 x i64> %arg0, <5 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v5_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -799,7 +799,7 @@ define <5 x i64> @shl_v5_or32_inreg(<5 x i64> noundef %arg0, <5 x i64> inreg %sh
ret <5 x i64> %shl
}
-define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %shift_amt) {
+define <8 x i64> @shl_v8_or32_inreg(<8 x i64> %arg0, <8 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v8_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -825,7 +825,7 @@ define <8 x i64> @shl_v8_or32_inreg(<8 x i64> noundef %arg0, <8 x i64> inreg %sh
ret <8 x i64> %shl
}
-define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg %shift_amt) {
+define <16 x i64> @shl_v16_or32_inreg(<16 x i64> %arg0, <16 x i64> inreg %shift_amt) {
; CHECK-LABEL: shl_v16_or32_inreg:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -891,7 +891,7 @@ define <16 x i64> @shl_v16_or32_inreg(<16 x i64> noundef %arg0, <16 x i64> inreg
; FIXME: This case should be reduced too, but computeKnownBits() cannot
; determine the range. Match current results for now.
-define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
+define i64 @shl_maxmin(i64 %arg0, i64 noundef %arg1) {
; CHECK-LABEL: shl_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -908,7 +908,7 @@ define i64 @shl_maxmin(i64 noundef %arg0, i64 noundef %arg1) {
ret i64 %shl
}
-define <2 x i64> @shl_v2_maxmin(<2 x i64> noundef %arg0, <2 x i64> noundef %arg1) {
+define <2 x i64> @shl_v2_maxmin(<2 x i64> %arg0, <2 x i64> noundef %arg1) {
; CHECK-LABEL: shl_v2_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -931,7 +931,7 @@ define <2 x i64> @shl_v2_maxmin(<2 x i64> noundef %arg0, <2 x i64> noundef %arg1
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_maxmin(<3 x i64> noundef %arg0, <3 x i64> noundef %arg1) {
+define <3 x i64> @shl_v3_maxmin(<3 x i64> %arg0, <3 x i64> noundef %arg1) {
; CHECK-LABEL: shl_v3_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -960,7 +960,7 @@ define <3 x i64> @shl_v3_maxmin(<3 x i64> noundef %arg0, <3 x i64> noundef %arg1
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_maxmin(<4 x i64> noundef %arg0, <4 x i64> noundef %arg1) {
+define <4 x i64> @shl_v4_maxmin(<4 x i64> %arg0, <4 x i64> noundef %arg1) {
; CHECK-LABEL: shl_v4_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -995,7 +995,7 @@ define <4 x i64> @shl_v4_maxmin(<4 x i64> noundef %arg0, <4 x i64> noundef %arg1
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_maxmin(<5 x i64> noundef %arg0, <5 x i64> noundef %arg1) {
+define <5 x i64> @shl_v5_maxmin(<5 x i64> %arg0, <5 x i64> noundef %arg1) {
; CHECK-LABEL: shl_v5_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1036,7 +1036,7 @@ define <5 x i64> @shl_v5_maxmin(<5 x i64> noundef %arg0, <5 x i64> noundef %arg1
ret <5 x i64> %shl
}
-define <8 x i64> @shl_v8_maxmin(<8 x i64> noundef %arg0, <8 x i64> noundef %arg1) {
+define <8 x i64> @shl_v8_maxmin(<8 x i64> %arg0, <8 x i64> noundef %arg1) {
; CHECK-LABEL: shl_v8_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1097,7 +1097,7 @@ define <8 x i64> @shl_v8_maxmin(<8 x i64> noundef %arg0, <8 x i64> noundef %arg1
ret <8 x i64> %shl
}
-define <16 x i64> @shl_v16_maxmin(<16 x i64> noundef %arg0, <16 x i64> noundef %arg1) {
+define <16 x i64> @shl_v16_maxmin(<16 x i64> %arg0, <16 x i64> noundef %arg1) {
; CHECK-LABEL: shl_v16_maxmin:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
>From 4c6b76588527f94cc44e0e7f887ed53f1c0b5ed5 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 7 Feb 2025 09:25:12 -0600
Subject: [PATCH 06/12] Avoid hardcoding types and explicit constants
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 21 +++++++++++--------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 33e6b64cddcd7..bbcc8ef503efc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4050,16 +4050,19 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
// shl i64 X, Y -> [0, shl i32 X, (Y & 0x1F)]
if (VT == MVT::i64) {
KnownBits Known = DAG.computeKnownBits(RHS);
- if (Known.getMinValue().getZExtValue() >= 32) {
- SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, RHS);
- const SDValue C31 = DAG.getConstant(31, SL, MVT::i32);
+ EVT TargetType=VT.getHalfSizedIntegerVT(*DAG.getContext());
+ EVT TargetVecPairType=EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
+
+ if (Known.getMinValue().getZExtValue() >= TargetType.getSizeInBits()) {
+ SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
+ const SDValue ShiftMask = DAG.getConstant(TargetType.getSizeInBits() - 1, SL, TargetType);
SDValue MaskedShiftAmt =
- DAG.getNode(ISD::AND, SL, MVT::i32, truncShiftAmt, C31);
- SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
- SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, MaskedShiftAmt);
- const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
- SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
- return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+ DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
+ SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
+ SDValue NewShift = DAG.getNode(ISD::SHL, SL, TargetType, Lo, MaskedShiftAmt);
+ const SDValue Zero = DAG.getConstant(0, SL, TargetType);
+ SDValue Vec = DAG.getBuildVector(TargetVecPairType, SL, {Zero, NewShift});
+ return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
}
}
return SDValue();
>From 72cd29ae35abebdb719ceee6082a2f4efc80a5d5 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 7 Feb 2025 09:32:34 -0600
Subject: [PATCH 07/12] Removed testing for 5/8/16-element vectors
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 868 -----------------------
1 file changed, 868 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 99062b1f9ae79..d666654bff7dc 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -79,99 +79,6 @@ define <4 x i64> @shl_v4_metadata(<4 x i64> %arg0, ptr %arg1.ptr) {
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_metadata(<5 x i64> %arg0, ptr %arg1.ptr) {
-; CHECK-LABEL: shl_v5_metadata:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dword v19, v[10:11] offset:32
-; CHECK-NEXT: flat_load_dwordx4 v[12:15], v[10:11]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dwordx4 v[15:18], v[10:11] offset:16
-; CHECK-NEXT: ; kill: killed $vgpr10 killed $vgpr11
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v19, v[8:9]
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v12, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v14, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v15, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v17, v[6:7]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load <5 x i64>, ptr %arg1.ptr, !range !0
- %shl = shl <5 x i64> %arg0, %shift.amt
- ret <5 x i64> %shl
-}
-
-define <8 x i64> @shl_v8_metadata(<8 x i64> %arg0, ptr %arg1.ptr) {
-; CHECK-LABEL: shl_v8_metadata:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dwordx4 v[18:21], v[16:17]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v18, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v20, v[2:3]
-; CHECK-NEXT: flat_load_dwordx4 v[18:21], v[16:17] offset:16
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v18, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v20, v[6:7]
-; CHECK-NEXT: flat_load_dwordx4 v[18:21], v[16:17] offset:32
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v18, v[8:9]
-; CHECK-NEXT: flat_load_dwordx4 v[16:19], v[16:17] offset:48
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], v20, v[10:11]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], v16, v[12:13]
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v18, v[14:15]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load <8 x i64>, ptr %arg1.ptr, !range !0
- %shl = shl <8 x i64> %arg0, %shift.amt
- ret <8 x i64> %shl
-}
-
-define <16 x i64> @shl_v16_metadata(<16 x i64> %arg0, ptr %arg1.ptr) {
-; CHECK-LABEL: shl_v16_metadata:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8
-; CHECK-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v32, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v34, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v35, v[4:5]
-; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49] offset:32
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v37, v[6:7]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:48
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v32, v[8:9]
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], v34, v[10:11]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], v35, v[12:13]
-; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49] offset:64
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v37, v[14:15]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:80
-; CHECK-NEXT: v_lshlrev_b64 v[16:17], v32, v[16:17]
-; CHECK-NEXT: v_lshlrev_b64 v[18:19], v34, v[18:19]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[20:21], v35, v[20:21]
-; CHECK-NEXT: flat_load_dwordx4 v[32:35], v[48:49] offset:96
-; CHECK-NEXT: v_lshlrev_b64 v[22:23], v37, v[22:23]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_load_dwordx4 v[35:38], v[48:49] offset:112
-; CHECK-NEXT: v_lshlrev_b64 v[24:25], v32, v[24:25]
-; CHECK-NEXT: v_lshlrev_b64 v[26:27], v34, v[26:27]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[28:29], v35, v[28:29]
-; CHECK-NEXT: v_lshlrev_b64 v[30:31], v37, v[30:31]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %shift.amt = load <16 x i64>, ptr %arg1.ptr, !range !0
- %shl = shl <16 x i64> %arg0, %shift.amt
- ret <16 x i64> %shl
-}
-
!0 = !{i64 32, i64 64}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -240,127 +147,6 @@ define <4 x i64> @shl_v4_or16(<4 x i64> %arg0, <4 x i64> %shift_amt) {
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or16(<5 x i64> %arg0, <5 x i64> %shift_amt) {
-; CHECK-LABEL: shl_v5_or16:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v11, 16, v18
-; CHECK-NEXT: v_or_b32_e32 v13, 16, v16
-; CHECK-NEXT: v_or_b32_e32 v14, 16, v14
-; CHECK-NEXT: v_or_b32_e32 v12, 16, v12
-; CHECK-NEXT: v_or_b32_e32 v10, 16, v10
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v10, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v12, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v14, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v13, v[6:7]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v11, v[8:9]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <5 x i64> %shift_amt, splat (i64 16)
- %shl = shl <5 x i64> %arg0, %or
- ret <5 x i64> %shl
-}
-
-define <8 x i64> @shl_v8_or16(<8 x i64> %arg0, <8 x i64> %shift_amt) {
-; CHECK-LABEL: shl_v8_or16:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v16, v[0:1]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v18
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v16, v[2:3]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v20
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v16, v[4:5]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v22
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v16, v[6:7]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v24
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v16, v[8:9]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v26
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], v16, v[10:11]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v28
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], v16, v[12:13]
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v30
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v16, v[14:15]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <8 x i64> %shift_amt, splat (i64 16)
- %shl = shl <8 x i64> %arg0, %or
- ret <8 x i64> %shl
-}
-
-define <16 x i64> @shl_v16_or16(<16 x i64> %arg0, <16 x i64> %shift_amt) {
-; CHECK-LABEL: shl_v16_or16:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v31, v[0:1]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:12
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v31, v[2:3]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:20
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v31, v[4:5]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:28
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v31, v[6:7]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v31, v[8:9]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], v31, v[10:11]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:52
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], v31, v[12:13]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v31, v[14:15]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:68
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[16:17], v31, v[16:17]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[18:19], v31, v[18:19]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:84
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[20:21], v31, v[20:21]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[22:23], v31, v[22:23]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[24:25], v31, v[24:25]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:108
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[26:27], v31, v[26:27]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[28:29], v31, v[28:29]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v32, 16, v32
-; CHECK-NEXT: v_lshlrev_b64 v[30:31], v32, v[30:31]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <16 x i64> %shift_amt, splat (i64 16)
- %shl = shl <16 x i64> %arg0, %or
- ret <16 x i64> %shl
-}
-
; test inreg
define i64 @shl_or16_inreg(i64 %arg0, i64 inreg %shift_amt) {
@@ -423,113 +209,6 @@ define <4 x i64> @shl_v4_or16_inreg(<4 x i64> %arg0, <4 x i64> inreg %shift_amt)
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or16_inreg(<5 x i64> %arg0, <5 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v5_or16_inreg:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s24, 16
-; CHECK-NEXT: s_or_b32 s5, s22, 16
-; CHECK-NEXT: s_or_b32 s6, s20, 16
-; CHECK-NEXT: s_or_b32 s7, s18, 16
-; CHECK-NEXT: s_or_b32 s8, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s8, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], s7, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], s6, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], s5, v[6:7]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], s4, v[8:9]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <5 x i64> %shift_amt, splat (i64 16)
- %shl = shl <5 x i64> %arg0, %or
- ret <5 x i64> %shl
-}
-
-define <8 x i64> @shl_v8_or16_inreg(<8 x i64> %arg0, <8 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v8_or16_inreg:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v16, 16, v16
-; CHECK-NEXT: s_or_b32 s4, s28, 16
-; CHECK-NEXT: s_or_b32 s5, s26, 16
-; CHECK-NEXT: s_or_b32 s6, s24, 16
-; CHECK-NEXT: s_or_b32 s7, s22, 16
-; CHECK-NEXT: s_or_b32 s8, s20, 16
-; CHECK-NEXT: s_or_b32 s9, s18, 16
-; CHECK-NEXT: s_or_b32 s10, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s10, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], s9, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], s8, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], s7, v[6:7]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], s6, v[8:9]
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], s5, v[10:11]
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], s4, v[12:13]
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v16, v[14:15]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <8 x i64> %shift_amt, splat (i64 16)
- %shl = shl <8 x i64> %arg0, %or
- ret <8 x i64> %shl
-}
-
-define <16 x i64> @shl_v16_or16_inreg(<16 x i64> %arg0, <16 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v16_or16_inreg:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; CHECK-NEXT: s_or_b32 s4, s28, 16
-; CHECK-NEXT: s_or_b32 s5, s26, 16
-; CHECK-NEXT: s_or_b32 s6, s24, 16
-; CHECK-NEXT: s_or_b32 s7, s22, 16
-; CHECK-NEXT: s_or_b32 s8, s20, 16
-; CHECK-NEXT: s_or_b32 s9, s18, 16
-; CHECK-NEXT: s_or_b32 s10, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s10, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], s9, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], s8, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], s7, v[6:7]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], s6, v[8:9]
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], s5, v[10:11]
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], s4, v[12:13]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v31, v[14:15]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:12
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[16:17], v31, v[16:17]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:20
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[18:19], v31, v[18:19]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:28
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[20:21], v31, v[20:21]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[22:23], v31, v[22:23]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[24:25], v31, v[24:25]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:52
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[26:27], v31, v[26:27]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v31, 16, v31
-; CHECK-NEXT: v_lshlrev_b64 v[28:29], v31, v[28:29]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_or_b32_e32 v32, 16, v32
-; CHECK-NEXT: v_lshlrev_b64 v[30:31], v32, v[30:31]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <16 x i64> %shift_amt, splat (i64 16)
- %shl = shl <16 x i64> %arg0, %or
- ret <16 x i64> %shl
-}
-
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Test range with an "or X, 32"
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -597,126 +276,6 @@ define <4 x i64> @shl_v4_or32(<4 x i64> %arg0, <4 x i64> %shift_amt) {
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or32(<5 x i64> %arg0, <5 x i64> %shift_amt) {
-; CHECK-LABEL: shl_v5_or32:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v10, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v12, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v14, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v16, v6
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, v18, v8
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <5 x i64> %shift_amt, splat (i64 32)
- %shl = shl <5 x i64> %arg0, %or
- ret <5 x i64> %shl
-}
-
-define <8 x i64> @shl_v8_or32(<8 x i64> %arg0, <8 x i64> %shift_amt) {
-; CHECK-LABEL: shl_v8_or32:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v16, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v18, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v20, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v22, v6
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, v24, v8
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, v26, v10
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, v28, v12
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v30, v14
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
-; CHECK-NEXT: v_mov_b32_e32 v10, 0
-; CHECK-NEXT: v_mov_b32_e32 v12, 0
-; CHECK-NEXT: v_mov_b32_e32 v14, 0
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <8 x i64> %shift_amt, splat (i64 32)
- %shl = shl <8 x i64> %arg0, %or
- ret <8 x i64> %shl
-}
-
-define <16 x i64> @shl_v16_or32(<16 x i64> %arg0, <16 x i64> %shift_amt) {
-; CHECK-LABEL: shl_v16_or32:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
-; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12
-; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
-; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, v1, v0
-; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:36
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, v3, v2
-; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, v5, v4
-; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:52
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, v7, v6
-; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:60
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, v0, v8
-; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:68
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, v2, v10
-; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, v4, v12
-; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v6, v14
-; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
-; CHECK-NEXT: v_mov_b32_e32 v10, 0
-; CHECK-NEXT: v_mov_b32_e32 v12, 0
-; CHECK-NEXT: v_mov_b32_e32 v14, 0
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v17, v0, v16
-; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v19, v2, v18
-; CHECK-NEXT: v_mov_b32_e32 v16, 0
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_lshlrev_b32_e32 v21, v4, v20
-; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116
-; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:108
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v23, v6, v22
-; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:124
-; CHECK-NEXT: v_mov_b32_e32 v18, 0
-; CHECK-NEXT: v_mov_b32_e32 v20, 0
-; CHECK-NEXT: v_mov_b32_e32 v22, 0
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v25, v0, v24
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v24, 0
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_lshlrev_b32_e32 v29, v2, v28
-; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: v_lshlrev_b32_e32 v27, v4, v26
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v31, v6, v30
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: v_mov_b32_e32 v26, 0
-; CHECK-NEXT: v_mov_b32_e32 v28, 0
-; CHECK-NEXT: v_mov_b32_e32 v30, 0
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <16 x i64> %shift_amt, splat (i64 32)
- %shl = shl <16 x i64> %arg0, %or
- ret <16 x i64> %shl
-}
-
; test inreg
define i64 @shl_or32_inreg(i64 %arg0, i64 inreg %shift_amt) {
@@ -779,111 +338,6 @@ define <4 x i64> @shl_v4_or32_inreg(<4 x i64> %arg0, <4 x i64> inreg %shift_amt)
ret <4 x i64> %shl
}
-define <5 x i64> @shl_v5_or32_inreg(<5 x i64> %arg0, <5 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v5_or32_inreg:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, s24, v8
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <5 x i64> %shift_amt, splat (i64 32)
- %shl = shl <5 x i64> %arg0, %or
- ret <5 x i64> %shl
-}
-
-define <8 x i64> @shl_v8_or32_inreg(<8 x i64> %arg0, <8 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v8_or32_inreg:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, s24, v8
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, s26, v10
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, s28, v12
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v16, v14
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
-; CHECK-NEXT: v_mov_b32_e32 v10, 0
-; CHECK-NEXT: v_mov_b32_e32 v12, 0
-; CHECK-NEXT: v_mov_b32_e32 v14, 0
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <8 x i64> %shift_amt, splat (i64 32)
- %shl = shl <8 x i64> %arg0, %or
- ret <8 x i64> %shl
-}
-
-define <16 x i64> @shl_v16_or32_inreg(<16 x i64> %arg0, <16 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v16_or32_inreg:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
-; CHECK-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12
-; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
-; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28
-; CHECK-NEXT: v_lshlrev_b32_e32 v9, s24, v8
-; CHECK-NEXT: v_lshlrev_b32_e32 v11, s26, v10
-; CHECK-NEXT: v_lshlrev_b32_e32 v13, s28, v12
-; CHECK-NEXT: v_mov_b32_e32 v8, 0
-; CHECK-NEXT: v_mov_b32_e32 v10, 0
-; CHECK-NEXT: v_mov_b32_e32 v12, 0
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v15, v1, v14
-; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:36
-; CHECK-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:44
-; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: v_lshlrev_b32_e32 v17, v3, v16
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v19, v5, v18
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_lshlrev_b32_e32 v21, v7, v20
-; CHECK-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:68
-; CHECK-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:60
-; CHECK-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:52
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0
-; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: v_lshlrev_b32_e32 v23, v1, v22
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
-; CHECK-NEXT: s_waitcnt vmcnt(3)
-; CHECK-NEXT: v_lshlrev_b32_e32 v25, v14, v24
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v14, 0
-; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: v_lshlrev_b32_e32 v29, v16, v28
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v27, v18, v26
-; CHECK-NEXT: v_lshlrev_b32_e32 v31, v20, v30
-; CHECK-NEXT: v_mov_b32_e32 v16, 0
-; CHECK-NEXT: v_mov_b32_e32 v18, 0
-; CHECK-NEXT: v_mov_b32_e32 v20, 0
-; CHECK-NEXT: v_mov_b32_e32 v22, 0
-; CHECK-NEXT: v_mov_b32_e32 v24, 0
-; CHECK-NEXT: v_mov_b32_e32 v26, 0
-; CHECK-NEXT: v_mov_b32_e32 v28, 0
-; CHECK-NEXT: v_mov_b32_e32 v30, 0
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %or = or <16 x i64> %shift_amt, splat (i64 32)
- %shl = shl <16 x i64> %arg0, %or
- ret <16 x i64> %shl
-}
-
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Test range from max/min
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -994,325 +448,3 @@ define <4 x i64> @shl_v4_maxmin(<4 x i64> %arg0, <4 x i64> noundef %arg1) {
%shl = shl <4 x i64> %arg0, %min
ret <4 x i64> %shl
}
-
-define <5 x i64> @shl_v5_maxmin(<5 x i64> %arg0, <5 x i64> noundef %arg1) {
-; CHECK-LABEL: shl_v5_maxmin:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[10:11]
-; CHECK-NEXT: v_cndmask_b32_e32 v11, 0, v11, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v10, 32, v10, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[12:13]
-; CHECK-NEXT: v_cndmask_b32_e32 v13, 0, v13, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v12, 32, v12, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[14:15]
-; CHECK-NEXT: v_cndmask_b32_e32 v15, 0, v15, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v14, 32, v14, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[16:17]
-; CHECK-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v16, 32, v16, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[18:19]
-; CHECK-NEXT: v_cndmask_b32_e32 v19, 0, v19, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v18, 32, v18, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[18:19]
-; CHECK-NEXT: v_cndmask_b32_e32 v18, 63, v18, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[16:17]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v18, v[8:9]
-; CHECK-NEXT: v_cndmask_b32_e32 v16, 63, v16, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[14:15]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v16, v[6:7]
-; CHECK-NEXT: v_cndmask_b32_e32 v14, 63, v14, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[12:13]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v14, v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v12, 63, v12, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[10:11]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v12, v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v10, 63, v10, vcc
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v10, v[0:1]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %max = call <5 x i64> @llvm.umax.i64(<5 x i64> %arg1, <5 x i64> splat (i64 32))
- %min = call <5 x i64> @llvm.umin.i64(<5 x i64> %max, <5 x i64> splat (i64 63))
- %shl = shl <5 x i64> %arg0, %min
- ret <5 x i64> %shl
-}
-
-define <8 x i64> @shl_v8_maxmin(<8 x i64> %arg0, <8 x i64> noundef %arg1) {
-; CHECK-LABEL: shl_v8_maxmin:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[16:17]
-; CHECK-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v16, 32, v16, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[18:19]
-; CHECK-NEXT: v_cndmask_b32_e32 v19, 0, v19, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v18, 32, v18, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[20:21]
-; CHECK-NEXT: v_cndmask_b32_e32 v21, 0, v21, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v20, 32, v20, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[22:23]
-; CHECK-NEXT: v_cndmask_b32_e32 v23, 0, v23, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v22, 32, v22, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[24:25]
-; CHECK-NEXT: v_cndmask_b32_e32 v25, 0, v25, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v24, 32, v24, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[26:27]
-; CHECK-NEXT: v_cndmask_b32_e32 v27, 0, v27, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v26, 32, v26, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[28:29]
-; CHECK-NEXT: v_cndmask_b32_e32 v29, 0, v29, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v28, 32, v28, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[28:29]
-; CHECK-NEXT: v_cndmask_b32_e32 v28, 63, v28, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[26:27]
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], v28, v[12:13]
-; CHECK-NEXT: v_cndmask_b32_e32 v26, 63, v26, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[24:25]
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], v26, v[10:11]
-; CHECK-NEXT: v_cndmask_b32_e32 v24, 63, v24, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[22:23]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v24, v[8:9]
-; CHECK-NEXT: v_cndmask_b32_e32 v22, 63, v22, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[20:21]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v22, v[6:7]
-; CHECK-NEXT: v_cndmask_b32_e32 v20, 63, v20, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[18:19]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v20, v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v18, 63, v18, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[16:17]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v18, v[2:3]
-; CHECK-NEXT: v_cndmask_b32_e32 v16, 63, v16, vcc
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v16, v[0:1]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[30:31]
-; CHECK-NEXT: v_cndmask_b32_e32 v17, 0, v31, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v16, 32, v30, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[16:17]
-; CHECK-NEXT: v_cndmask_b32_e32 v16, 63, v16, vcc
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v16, v[14:15]
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %max = call <8 x i64> @llvm.umax.i64(<8 x i64> %arg1, <8 x i64> splat (i64 32))
- %min = call <8 x i64> @llvm.umin.i64(<8 x i64> %max, <8 x i64> splat (i64 63))
- %shl = shl <8 x i64> %arg0, %min
- ret <8 x i64> %shl
-}
-
-define <16 x i64> @shl_v16_maxmin(<16 x i64> %arg0, <16 x i64> noundef %arg1) {
-; CHECK-LABEL: shl_v16_maxmin:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
-; CHECK-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:16
-; CHECK-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12
-; CHECK-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:24
-; CHECK-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20
-; CHECK-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32
-; CHECK-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:28
-; CHECK-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36
-; CHECK-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:40
-; CHECK-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:48
-; CHECK-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:44
-; CHECK-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56
-; CHECK-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:52
-; CHECK-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:64
-; CHECK-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60
-; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:68
-; CHECK-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72
-; CHECK-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:80
-; CHECK-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76
-; CHECK-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:88
-; CHECK-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:84
-; CHECK-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:96
-; CHECK-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:92
-; CHECK-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:100
-; CHECK-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:104
-; CHECK-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:112
-; CHECK-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:108
-; CHECK-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:120
-; CHECK-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:116
-; CHECK-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:128
-; CHECK-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:124
-; CHECK-NEXT: v_mov_b32_e32 v29, v27
-; CHECK-NEXT: v_mov_b32_e32 v28, v26
-; CHECK-NEXT: v_mov_b32_e32 v27, v25
-; CHECK-NEXT: v_mov_b32_e32 v26, v24
-; CHECK-NEXT: v_mov_b32_e32 v25, v23
-; CHECK-NEXT: v_mov_b32_e32 v24, v22
-; CHECK-NEXT: v_mov_b32_e32 v23, v21
-; CHECK-NEXT: v_mov_b32_e32 v22, v20
-; CHECK-NEXT: v_mov_b32_e32 v21, v19
-; CHECK-NEXT: v_mov_b32_e32 v20, v18
-; CHECK-NEXT: v_mov_b32_e32 v19, v17
-; CHECK-NEXT: v_mov_b32_e32 v18, v16
-; CHECK-NEXT: v_mov_b32_e32 v17, v15
-; CHECK-NEXT: v_mov_b32_e32 v16, v14
-; CHECK-NEXT: v_mov_b32_e32 v15, v13
-; CHECK-NEXT: v_mov_b32_e32 v14, v12
-; CHECK-NEXT: v_mov_b32_e32 v13, v11
-; CHECK-NEXT: v_mov_b32_e32 v12, v10
-; CHECK-NEXT: v_mov_b32_e32 v11, v9
-; CHECK-NEXT: v_mov_b32_e32 v10, v8
-; CHECK-NEXT: v_mov_b32_e32 v9, v7
-; CHECK-NEXT: v_mov_b32_e32 v8, v6
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
-; CHECK-NEXT: v_mov_b32_e32 v5, v3
-; CHECK-NEXT: v_mov_b32_e32 v4, v2
-; CHECK-NEXT: v_mov_b32_e32 v3, v1
-; CHECK-NEXT: v_mov_b32_e32 v2, v0
-; CHECK-NEXT: s_waitcnt vmcnt(30)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[31:32]
-; CHECK-NEXT: v_cndmask_b32_e32 v33, 0, v32, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v32, 32, v31, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(26)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[36:37]
-; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[34:35]
-; CHECK-NEXT: v_cndmask_b32_e32 v37, 0, v37, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v36, 32, v36, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(22)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[48:49]
-; CHECK-NEXT: v_cndmask_b32_e64 v35, 0, v35, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v49, 0, v49, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v48, 32, v48, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(18)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[52:53]
-; CHECK-NEXT: v_cndmask_b32_e64 v34, 32, v34, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v53, 0, v53, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v52, 32, v52, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(14)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[40:41]
-; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[38:39]
-; CHECK-NEXT: v_cndmask_b32_e32 v41, 0, v41, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v40, 32, v40, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(12)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[42:43]
-; CHECK-NEXT: v_cndmask_b32_e64 v39, 0, v39, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v43, 0, v43, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v42, 32, v42, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(10)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[44:45]
-; CHECK-NEXT: v_cndmask_b32_e64 v38, 32, v38, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v45, 0, v45, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v44, 32, v44, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(8)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[46:47]
-; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[50:51]
-; CHECK-NEXT: v_cndmask_b32_e32 v47, 0, v47, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v46, 32, v46, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(6)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[56:57]
-; CHECK-NEXT: v_cndmask_b32_e64 v51, 0, v51, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v1, 0, v57, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v0, 32, v56, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(4)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[58:59]
-; CHECK-NEXT: v_cndmask_b32_e64 v50, 32, v50, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v59, 0, v59, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v58, 32, v58, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[60:61]
-; CHECK-NEXT: v_cmp_lt_u64_e64 s[4:5], 32, v[54:55]
-; CHECK-NEXT: v_cndmask_b32_e32 v61, 0, v61, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v60, 32, v60, vcc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, 32, v[62:63]
-; CHECK-NEXT: v_cndmask_b32_e64 v55, 0, v55, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v57, 0, v63, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v56, 32, v62, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[56:57]
-; CHECK-NEXT: v_cndmask_b32_e64 v54, 32, v54, s[4:5]
-; CHECK-NEXT: v_cndmask_b32_e32 v56, 63, v56, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[60:61]
-; CHECK-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; CHECK-NEXT: v_cndmask_b32_e32 v57, 63, v60, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[58:59]
-; CHECK-NEXT: v_cndmask_b32_e32 v58, 63, v58, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[0:1]
-; CHECK-NEXT: v_cndmask_b32_e32 v59, 63, v0, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[46:47]
-; CHECK-NEXT: v_cndmask_b32_e32 v46, 63, v46, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[44:45]
-; CHECK-NEXT: v_cndmask_b32_e32 v44, 63, v44, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[42:43]
-; CHECK-NEXT: v_cndmask_b32_e32 v42, 63, v42, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[40:41]
-; CHECK-NEXT: v_cndmask_b32_e32 v40, 63, v40, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[54:55]
-; CHECK-NEXT: v_cndmask_b32_e32 v54, 63, v54, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[52:53]
-; CHECK-NEXT: v_cndmask_b32_e32 v52, 63, v52, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[50:51]
-; CHECK-NEXT: v_cndmask_b32_e32 v50, 63, v50, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[48:49]
-; CHECK-NEXT: v_cndmask_b32_e32 v48, 63, v48, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[38:39]
-; CHECK-NEXT: v_cndmask_b32_e32 v38, 63, v38, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[36:37]
-; CHECK-NEXT: v_cndmask_b32_e32 v36, 63, v36, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[34:35]
-; CHECK-NEXT: v_cndmask_b32_e32 v34, 63, v34, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, 63, v[32:33]
-; CHECK-NEXT: v_cndmask_b32_e32 v0, 63, v32, vcc
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], v0, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], v34, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], v36, v[6:7]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], v38, v[8:9]
-; CHECK-NEXT: v_lshlrev_b64 v[8:9], v48, v[10:11]
-; CHECK-NEXT: v_lshlrev_b64 v[10:11], v50, v[12:13]
-; CHECK-NEXT: v_lshlrev_b64 v[12:13], v52, v[14:15]
-; CHECK-NEXT: v_lshlrev_b64 v[14:15], v54, v[16:17]
-; CHECK-NEXT: v_lshlrev_b64 v[16:17], v40, v[18:19]
-; CHECK-NEXT: v_lshlrev_b64 v[18:19], v42, v[20:21]
-; CHECK-NEXT: v_lshlrev_b64 v[20:21], v44, v[22:23]
-; CHECK-NEXT: v_lshlrev_b64 v[22:23], v46, v[24:25]
-; CHECK-NEXT: v_lshlrev_b64 v[24:25], v59, v[26:27]
-; CHECK-NEXT: v_lshlrev_b64 v[26:27], v58, v[28:29]
-; CHECK-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; CHECK-NEXT: s_waitcnt vmcnt(2)
-; CHECK-NEXT: v_lshlrev_b64 v[30:31], v56, v[30:31]
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_lshlrev_b64 v[28:29], v57, v[28:29]
-; CHECK-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
- %max = call <16 x i64> @llvm.umax.i64(<16 x i64> %arg1, <16 x i64> splat (i64 32))
- %min = call <16 x i64> @llvm.umin.i64(<16 x i64> %max, <16 x i64> splat (i64 63))
- %shl = shl <16 x i64> %arg0, %min
- ret <16 x i64> %shl
-}
>From f936383542f4af7434a58cd7ec0df1ddf68ff348 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 7 Feb 2025 09:41:23 -0600
Subject: [PATCH 08/12] Add comment noting that AND instruction will be deleted
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index bbcc8ef503efc..624aae1a1fb4d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4056,6 +4056,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
if (Known.getMinValue().getZExtValue() >= TargetType.getSizeInBits()) {
SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
const SDValue ShiftMask = DAG.getConstant(TargetType.getSizeInBits() - 1, SL, TargetType);
+ // This AND instruction will be removed during later instruction selection.
SDValue MaskedShiftAmt =
DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
>From 72ae9386acf0f61bb2bc6386a402595b29581442 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 7 Feb 2025 12:24:39 -0600
Subject: [PATCH 09/12] Fully use inreg to generate scalar shl
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/shl64_reduce.ll | 123 ++++++++++++++---------
1 file changed, 75 insertions(+), 48 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index d666654bff7dc..05430213c17d2 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -147,62 +147,80 @@ define <4 x i64> @shl_v4_or16(<4 x i64> %arg0, <4 x i64> %shift_amt) {
ret <4 x i64> %shl
}
-; test inreg
+; test SGPR
-define i64 @shl_or16_inreg(i64 %arg0, i64 inreg %shift_amt) {
-; CHECK-LABEL: shl_or16_inreg:
+define i64 @shl_or16_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or16_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s4, v[0:1]
+; CHECK-NEXT: s_or_b32 s4, s18, 16
+; CHECK-NEXT: s_lshl_b64 s[4:5], s[16:17], s4
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or i64 %shift_amt, 16
%shl = shl i64 %arg0, %or
ret i64 %shl
}
-define <2 x i64> @shl_v2_or16_inreg(<2 x i64> %arg0, <2 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v2_or16_inreg:
+define <2 x i64> @shl_v2_or16_sgpr(<2 x i64> inreg %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or16_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s18, 16
-; CHECK-NEXT: s_or_b32 s5, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s5, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], s4, v[2:3]
+; CHECK-NEXT: s_or_b32 s6, s22, 16
+; CHECK-NEXT: s_or_b32 s4, s20, 16
+; CHECK-NEXT: s_lshl_b64 s[4:5], s[16:17], s4
+; CHECK-NEXT: s_lshl_b64 s[6:7], s[18:19], s6
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: v_mov_b32_e32 v1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s6
+; CHECK-NEXT: v_mov_b32_e32 v3, s7
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <2 x i64> %shift_amt, splat (i64 16)
%shl = shl <2 x i64> %arg0, %or
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_or16_inreg(<3 x i64> %arg0, <3 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v3_or16_inreg:
+define <3 x i64> @shl_v3_or16_sgpr(<3 x i64> inreg %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or16_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s20, 16
-; CHECK-NEXT: s_or_b32 s5, s18, 16
-; CHECK-NEXT: s_or_b32 s6, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s6, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], s5, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], s4, v[4:5]
+; CHECK-NEXT: s_or_b32 s8, s26, 16
+; CHECK-NEXT: s_or_b32 s6, s24, 16
+; CHECK-NEXT: s_or_b32 s4, s22, 16
+; CHECK-NEXT: s_lshl_b64 s[4:5], s[16:17], s4
+; CHECK-NEXT: s_lshl_b64 s[6:7], s[18:19], s6
+; CHECK-NEXT: s_lshl_b64 s[8:9], s[20:21], s8
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: v_mov_b32_e32 v1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s6
+; CHECK-NEXT: v_mov_b32_e32 v3, s7
+; CHECK-NEXT: v_mov_b32_e32 v4, s8
+; CHECK-NEXT: v_mov_b32_e32 v5, s9
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <3 x i64> %shift_amt, splat (i64 16)
%shl = shl <3 x i64> %arg0, %or
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_or16_inreg(<4 x i64> %arg0, <4 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v4_or16_inreg:
+define <4 x i64> @shl_v4_or16_sgpr(<4 x i64> inreg %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or16_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_or_b32 s4, s22, 16
-; CHECK-NEXT: s_or_b32 s5, s20, 16
-; CHECK-NEXT: s_or_b32 s6, s18, 16
-; CHECK-NEXT: s_or_b32 s7, s16, 16
-; CHECK-NEXT: v_lshlrev_b64 v[0:1], s7, v[0:1]
-; CHECK-NEXT: v_lshlrev_b64 v[2:3], s6, v[2:3]
-; CHECK-NEXT: v_lshlrev_b64 v[4:5], s5, v[4:5]
-; CHECK-NEXT: v_lshlrev_b64 v[6:7], s4, v[6:7]
+; CHECK-NEXT: v_or_b32_e32 v0, 16, v0
+; CHECK-NEXT: s_or_b32 s8, s28, 16
+; CHECK-NEXT: s_or_b32 s6, s26, 16
+; CHECK-NEXT: s_or_b32 s4, s24, 16
+; CHECK-NEXT: s_lshl_b64 s[4:5], s[16:17], s4
+; CHECK-NEXT: s_lshl_b64 s[6:7], s[18:19], s6
+; CHECK-NEXT: s_lshl_b64 s[8:9], s[20:21], s8
+; CHECK-NEXT: v_lshlrev_b64 v[6:7], v0, s[22:23]
+; CHECK-NEXT: v_mov_b32_e32 v0, s4
+; CHECK-NEXT: v_mov_b32_e32 v1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s6
+; CHECK-NEXT: v_mov_b32_e32 v3, s7
+; CHECK-NEXT: v_mov_b32_e32 v4, s8
+; CHECK-NEXT: v_mov_b32_e32 v5, s9
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <4 x i64> %shift_amt, splat (i64 16)
%shl = shl <4 x i64> %arg0, %or
@@ -276,61 +294,70 @@ define <4 x i64> @shl_v4_or32(<4 x i64> %arg0, <4 x i64> %shift_amt) {
ret <4 x i64> %shl
}
-; test inreg
+; test SGPR
-define i64 @shl_or32_inreg(i64 %arg0, i64 inreg %shift_amt) {
-; CHECK-LABEL: shl_or32_inreg:
+define i64 @shl_or32_sgpr(i64 inreg %arg0, i64 inreg %shift_amt) {
+; CHECK-LABEL: shl_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
+; CHECK-NEXT: s_lshl_b32 s4, s16, s18
; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, s4
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or i64 %shift_amt, 32
%shl = shl i64 %arg0, %or
ret i64 %shl
}
-define <2 x i64> @shl_v2_or32_inreg(<2 x i64> %arg0, <2 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v2_or32_inreg:
+define <2 x i64> @shl_v2_or32_sgpr(<2 x i64> inreg %arg0, <2 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v2_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
+; CHECK-NEXT: s_lshl_b32 s4, s16, s20
+; CHECK-NEXT: s_lshl_b32 s5, s18, s22
; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, s4
; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <2 x i64> %shift_amt, splat (i64 32)
%shl = shl <2 x i64> %arg0, %or
ret <2 x i64> %shl
}
-define <3 x i64> @shl_v3_or32_inreg(<3 x i64> %arg0, <3 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v3_or32_inreg:
+define <3 x i64> @shl_v3_or32_sgpr(<3 x i64> inreg %arg0, <3 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v3_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
+; CHECK-NEXT: s_lshl_b32 s4, s16, s22
+; CHECK-NEXT: s_lshl_b32 s5, s18, s24
+; CHECK-NEXT: s_lshl_b32 s6, s20, s26
; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, s4
; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v5, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <3 x i64> %shift_amt, splat (i64 32)
%shl = shl <3 x i64> %arg0, %or
ret <3 x i64> %shl
}
-define <4 x i64> @shl_v4_or32_inreg(<4 x i64> %arg0, <4 x i64> inreg %shift_amt) {
-; CHECK-LABEL: shl_v4_or32_inreg:
+define <4 x i64> @shl_v4_or32_sgpr(<4 x i64> inreg %arg0, <4 x i64> inreg %shift_amt) {
+; CHECK-LABEL: shl_v4_or32_sgpr:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_lshlrev_b32_e32 v1, s16, v0
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, s18, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v5, s20, v4
-; CHECK-NEXT: v_lshlrev_b32_e32 v7, s22, v6
+; CHECK-NEXT: s_lshl_b32 s4, s16, s24
+; CHECK-NEXT: s_lshl_b32 s5, s18, s26
+; CHECK-NEXT: s_lshl_b32 s6, s20, s28
+; CHECK-NEXT: v_lshlrev_b32_e64 v7, v0, s22
; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, s4
; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v3, s5
; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: v_mov_b32_e32 v5, s6
; CHECK-NEXT: v_mov_b32_e32 v6, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or <4 x i64> %shift_amt, splat (i64 32)
>From 367846de71ed281c56c54b3412c51d0767041683 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 11 Feb 2025 16:53:09 -0600
Subject: [PATCH 10/12] New shl32 must preserve flags
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 2 +-
.../test/CodeGen/AMDGPU/shl64_reduce_flags.ll | 99 +++++++++++++++++++
2 files changed, 100 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/shl64_reduce_flags.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 624aae1a1fb4d..d9e3c40f93b24 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4060,7 +4060,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SDValue MaskedShiftAmt =
DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
- SDValue NewShift = DAG.getNode(ISD::SHL, SL, TargetType, Lo, MaskedShiftAmt);
+ SDValue NewShift = DAG.getNode(ISD::SHL, SL, TargetType, Lo, MaskedShiftAmt, N->getFlags());
const SDValue Zero = DAG.getConstant(0, SL, TargetType);
SDValue Vec = DAG.getBuildVector(TargetVecPairType, SL, {Zero, NewShift});
return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce_flags.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce_flags.ll
new file mode 100644
index 0000000000000..7c75303c3463f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce_flags.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -stop-after=finalize-isel -o - %s | FileCheck %s
+
+;; Test that reduction of:
+;;
+;; DST = shl i64 X, Y
+;;
+;; where Y is in the range [63-32] to:
+;;
+;; DST = [0, shl i32 X, (Y & 0x1F)]
+;;
+;; preserves flags
+
+define i64 @shl_nsw(i64 %arg0, i64 %shift_amt) {
+ ; CHECK-LABEL: name: shl_nsw
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[DEF]]
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[COPY2]], %subreg.sub1
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[DEF3]]
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, killed [[COPY3]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = nsw V_LSHLREV_B32_e64 killed [[COPY4]], killed [[COPY5]], implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_MOV_B32_e32_]]
+ ; CHECK-NEXT: $vgpr1 = COPY [[V_LSHLREV_B32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ %or = or i64 %shift_amt, 32
+ %shl = shl nsw i64 %arg0, %or
+ ret i64 %shl
+}
+
+define i64 @shl_nuw(i64 %arg0, i64 %shift_amt) {
+ ; CHECK-LABEL: name: shl_nuw
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[DEF]]
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[COPY2]], %subreg.sub1
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[DEF3]]
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, killed [[COPY3]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = nuw V_LSHLREV_B32_e64 killed [[COPY4]], killed [[COPY5]], implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_MOV_B32_e32_]]
+ ; CHECK-NEXT: $vgpr1 = COPY [[V_LSHLREV_B32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ %or = or i64 %shift_amt, 32
+ %shl = shl nuw i64 %arg0, %or
+ ret i64 %shl
+}
+
+define i64 @shl_nsw_nuw(i64 %arg0, i64 %shift_amt) {
+ ; CHECK-LABEL: name: shl_nsw_nuw
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[DEF]]
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[COPY2]], %subreg.sub1
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[DEF3]]
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, killed [[COPY3]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = nuw nsw V_LSHLREV_B32_e64 killed [[COPY4]], killed [[COPY5]], implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_MOV_B32_e32_]]
+ ; CHECK-NEXT: $vgpr1 = COPY [[V_LSHLREV_B32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ %or = or i64 %shift_amt, 32
+ %shl = shl nsw nuw i64 %arg0, %or
+ ret i64 %shl
+}
>From b01c15b4f304f20600785e122d16c9ad82656383 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 11 Feb 2025 17:02:43 -0600
Subject: [PATCH 11/12] Add comment that shift amt will be clamped and apply
clang-format
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d9e3c40f93b24..0f825e51bfd3b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4050,19 +4050,24 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
// shl i64 X, Y -> [0, shl i32 X, (Y & 0x1F)]
if (VT == MVT::i64) {
KnownBits Known = DAG.computeKnownBits(RHS);
- EVT TargetType=VT.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetVecPairType=EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
+ EVT TargetType = VT.getHalfSizedIntegerVT(*DAG.getContext());
+ EVT TargetVecPairType =
+ EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
if (Known.getMinValue().getZExtValue() >= TargetType.getSizeInBits()) {
SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
- const SDValue ShiftMask = DAG.getConstant(TargetType.getSizeInBits() - 1, SL, TargetType);
- // This AND instruction will be removed during later instruction selection.
+ const SDValue ShiftMask =
+ DAG.getConstant(TargetType.getSizeInBits() - 1, SL, TargetType);
+ // This AND instruction will clamp out of bounds shift values.
+ // It will also be removed during later instruction selection.
SDValue MaskedShiftAmt =
DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
- SDValue NewShift = DAG.getNode(ISD::SHL, SL, TargetType, Lo, MaskedShiftAmt, N->getFlags());
+ SDValue NewShift = DAG.getNode(ISD::SHL, SL, TargetType, Lo,
+ MaskedShiftAmt, N->getFlags());
const SDValue Zero = DAG.getConstant(0, SL, TargetType);
- SDValue Vec = DAG.getBuildVector(TargetVecPairType, SL, {Zero, NewShift});
+ SDValue Vec =
+ DAG.getBuildVector(TargetVecPairType, SL, {Zero, NewShift});
return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
}
}
>From 9fbb56a50da760523d45e620dac5e5d5e31280af Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 12 Feb 2025 10:01:29 -0600
Subject: [PATCH 12/12] Merge constant and variable case
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 120 ++++++++----------
1 file changed, 56 insertions(+), 64 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 0f825e51bfd3b..2d46cf3b70a34 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4046,66 +4046,42 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SDLoc SL(N);
SelectionDAG &DAG = DCI.DAG;
- if (!CRHS) {
- // shl i64 X, Y -> [0, shl i32 X, (Y & 0x1F)]
- if (VT == MVT::i64) {
- KnownBits Known = DAG.computeKnownBits(RHS);
- EVT TargetType = VT.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetVecPairType =
- EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
-
- if (Known.getMinValue().getZExtValue() >= TargetType.getSizeInBits()) {
- SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
- const SDValue ShiftMask =
- DAG.getConstant(TargetType.getSizeInBits() - 1, SL, TargetType);
- // This AND instruction will clamp out of bounds shift values.
- // It will also be removed during later instruction selection.
- SDValue MaskedShiftAmt =
- DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
- SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
- SDValue NewShift = DAG.getNode(ISD::SHL, SL, TargetType, Lo,
- MaskedShiftAmt, N->getFlags());
- const SDValue Zero = DAG.getConstant(0, SL, TargetType);
- SDValue Vec =
- DAG.getBuildVector(TargetVecPairType, SL, {Zero, NewShift});
- return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
- }
- }
- return SDValue();
- }
+ unsigned RHSVal;
+ if (CRHS) {
+ RHSVal = CRHS->getZExtValue();
+ if (!RHSVal)
+ return LHS;
- unsigned RHSVal = CRHS->getZExtValue();
- if (!RHSVal)
- return LHS;
+ switch (LHS->getOpcode()) {
+ default:
+ break;
+ case ISD::ZERO_EXTEND:
+ case ISD::SIGN_EXTEND:
+ case ISD::ANY_EXTEND: {
+ SDValue X = LHS->getOperand(0);
+
+ if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
+ isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
+ // Prefer build_vector as the canonical form if packed types are legal.
+ // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
+ SDValue Vec = DAG.getBuildVector(
+ MVT::v2i16, SL,
+ {DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0)});
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
+ }
- switch (LHS->getOpcode()) {
- default:
- break;
- case ISD::ZERO_EXTEND:
- case ISD::SIGN_EXTEND:
- case ISD::ANY_EXTEND: {
- SDValue X = LHS->getOperand(0);
-
- if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
- isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
- // Prefer build_vector as the canonical form if packed types are legal.
- // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
- SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
- { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
- return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
+ // shl (ext x) => zext (shl x), if shift does not overflow int
+ if (VT != MVT::i64)
+ break;
+ KnownBits Known = DAG.computeKnownBits(X);
+ unsigned LZ = Known.countMinLeadingZeros();
+ if (LZ < RHSVal)
+ break;
+ EVT XVT = X.getValueType();
+ SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(CRHS, 0));
+ return DAG.getZExtOrTrunc(Shl, SL, VT);
+ }
}
-
- // shl (ext x) => zext (shl x), if shift does not overflow int
- if (VT != MVT::i64)
- break;
- KnownBits Known = DAG.computeKnownBits(X);
- unsigned LZ = Known.countMinLeadingZeros();
- if (LZ < RHSVal)
- break;
- EVT XVT = X.getValueType();
- SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(CRHS, 0));
- return DAG.getZExtOrTrunc(Shl, SL, VT);
- }
}
if (VT != MVT::i64)
@@ -4116,18 +4092,34 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
// On some subtargets, 64-bit shift is a quarter rate instruction. In the
// common case, splitting this into a move and a 32-bit shift is faster and
// the same code size.
- if (RHSVal < 32)
+ EVT TargetType = VT.getHalfSizedIntegerVT(*DAG.getContext());
+ EVT TargetVecPairType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
+ KnownBits Known = DAG.computeKnownBits(RHS);
+
+ if (Known.getMinValue().getZExtValue() < TargetType.getSizeInBits())
return SDValue();
+ SDValue ShiftAmt;
- SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
+ if (CRHS) {
+ ShiftAmt =
+ DAG.getConstant(RHSVal - TargetType.getSizeInBits(), SL, TargetType);
+ } else {
+ SDValue truncShiftAmt = DAG.getNode(ISD::TRUNCATE, SL, TargetType, RHS);
+ const SDValue ShiftMask =
+ DAG.getConstant(TargetType.getSizeInBits() - 1, SL, TargetType);
+ // This AND instruction will clamp out of bounds shift values.
+ // It will also be removed during later instruction selection.
+ ShiftAmt = DAG.getNode(ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
+ }
- SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
- SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
+ SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, LHS);
+ SDValue NewShift =
+ DAG.getNode(ISD::SHL, SL, TargetType, Lo, ShiftAmt, N->getFlags());
- const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+ const SDValue Zero = DAG.getConstant(0, SL, TargetType);
- SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
- return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+ SDValue Vec = DAG.getBuildVector(TargetVecPairType, SL, {Zero, NewShift});
+ return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
}
SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
More information about the llvm-commits
mailing list