[llvm] [AMDGPU] Create hi-half of 64-bit ashr with mov of -1 (PR #146569)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 2 07:34:32 PDT 2025
https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/146569
>From 2e32d5dfe2eb5ad83c40e3f03bc6db703a520ee5 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 1 Jul 2025 11:25:47 -0500
Subject: [PATCH 1/4] Create hi-half of 64-bit ashr with mov of -1
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 10 +-
llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll | 102 ++++++++++++++++++
2 files changed, 110 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d75c7a178b4a8..a877b28119c16 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4218,9 +4218,15 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
SDValue SplitLHS = DAG.getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, LHSSL, TargetType, SplitLHS, One);
}
- Hi = DAG.getFreeze(Hi);
- SDValue HiShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftFullAmt);
+ KnownBits KnownLHS = DAG.computeKnownBits(LHS);
+ SDValue HiShift;
+ if (KnownLHS.isNegative())
+ HiShift = DAG.getAllOnesConstant(SL, TargetType);
+ else {
+ Hi = DAG.getFreeze(Hi);
+ HiShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftFullAmt);
+ }
SDValue NewShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftAmt);
SDValue Vec;
diff --git a/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll b/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll
new file mode 100644
index 0000000000000..bb291272555b1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll
@@ -0,0 +1,102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+
+; Test that negative 64-bit values shifted by [32-63] bits have
+; a hi-result created by moving an all-ones constant.
+
+; FIXME: Range metadata is invalidated when i64 types are legalized to v2i32 types.
+; We could call performSraCombine before legalization, but other optimizations only work
+; with 64-bit sra.
+define i64 @scalar_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: scalar_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v4, v[2:3]
+; CHECK-NEXT: ; kill: killed $vgpr0 killed $vgpr1
+; CHECK-NEXT: ; kill: killed $vgpr2 killed $vgpr3
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v5
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v4, v5
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load i64, ptr %arg0.ptr, !range !0, !noundef !{}
+ %shift.amt = load i64, ptr %arg1.ptr, !range !1, !noundef !{}
+ %ashr = ashr i64 %val, %shift.amt
+ ret i64 %ashr
+}
+
+define <2 x i64> @v2_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: v2_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v1, -1
+; CHECK-NEXT: v_mov_b32_e32 v3, -1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v8, v5
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, v10, v7
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <2 x i64>, ptr %arg0.ptr, !range !2, !noundef !{}
+ %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !3, !noundef !{}
+ %ashr = ashr <2 x i64> %val, %shift.amt
+ ret <2 x i64> %ashr
+}
+
+define <3 x i64> @v3_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: v3_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dword v4, v[0:1] offset:20
+; CHECK-NEXT: flat_load_dword v6, v[2:3] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v1, -1
+; CHECK-NEXT: v_mov_b32_e32 v3, -1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v4, v6, v4
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v8, v5
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, v10, v7
+; CHECK-NEXT: v_mov_b32_e32 v5, -1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <3 x i64>, ptr %arg0.ptr, !range !4, !noundef !{}
+ %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !5, !noundef !{}
+ %ashr = ashr <3 x i64> %val, %shift.amt
+ ret <3 x i64> %ashr
+}
+
+define <4 x i64> @v4_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: v4_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[0:1]
+; CHECK-NEXT: flat_load_dwordx4 v[11:14], v[0:1] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[15:18], v[2:3] offset:16
+; CHECK-NEXT: v_mov_b32_e32 v1, -1
+; CHECK-NEXT: v_mov_b32_e32 v3, -1
+; CHECK-NEXT: v_mov_b32_e32 v5, -1
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v7, -1
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, v4, v8
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, v6, v10
+; CHECK-NEXT: v_ashrrev_i32_e32 v4, v15, v12
+; CHECK-NEXT: v_ashrrev_i32_e32 v6, v17, v14
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <4 x i64>, ptr %arg0.ptr, !range !6, !noundef !{}
+ %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !7, !noundef !{}
+ %ashr = ashr <4 x i64> %val, %shift.amt
+ ret <4 x i64> %ashr
+}
+
+!0 = !{i64 -6000000000, i64 0}
+!1 = !{i64 32, i64 64}
+!2 = !{i64 -7000000000, i64 -1000}
+!3 = !{i64 38, i64 64}
+!4 = !{i64 -8000000000, i64 -2001}
+!5 = !{i64 38, i64 60}
+!6 = !{i64 -9000000000, i64 -3002}
+!7 = !{i64 38, i64 50}
>From 8039cb5845451a33162c06e41d399e2166d5fb50 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 1 Jul 2025 16:22:15 -0500
Subject: [PATCH 2/4] Add negative tests
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll | 87 +++++++++++++++++++
1 file changed, 87 insertions(+)
diff --git a/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll b/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll
index bb291272555b1..9713689217cf7 100644
--- a/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/neg_ashr64_reduce.ll
@@ -92,6 +92,7 @@ define <4 x i64> @v4_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
ret <4 x i64> %ashr
}
+; Ranges used when transformation is valid
!0 = !{i64 -6000000000, i64 0}
!1 = !{i64 32, i64 64}
!2 = !{i64 -7000000000, i64 -1000}
@@ -100,3 +101,89 @@ define <4 x i64> @v4_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
!5 = !{i64 38, i64 60}
!6 = !{i64 -9000000000, i64 -3002}
!7 = !{i64 38, i64 50}
+
+; Test that negative 64-bit values shifted by [2?-63] bits do NOT have
+; a hi-result created by moving an all-ones constant.
+
+define i64 @no_transform_scalar_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: no_transform_scalar_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT: flat_load_dword v6, v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i64 v[0:1], v6, v[4:5]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load i64, ptr %arg0.ptr, !range !8, !noundef !{}
+ %shift.amt = load i64, ptr %arg1.ptr, !range !9, !noundef !{}
+ %ashr = ashr i64 %val, %shift.amt
+ ret i64 %ashr
+}
+
+define <2 x i64> @no_transform_v2_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: no_transform_v2_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i64 v[0:1], v8, v[4:5]
+; CHECK-NEXT: v_ashrrev_i64 v[2:3], v10, v[6:7]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <2 x i64>, ptr %arg0.ptr, !range !10, !noundef !{}
+ %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !11, !noundef !{}
+ %ashr = ashr <2 x i64> %val, %shift.amt
+ ret <2 x i64> %ashr
+}
+
+define <3 x i64> @no_transform_v3_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: no_transform_v3_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[0:1]
+; CHECK-NEXT: flat_load_dwordx2 v[11:12], v[0:1] offset:16
+; CHECK-NEXT: flat_load_dword v5, v[2:3] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i64 v[0:1], v4, v[7:8]
+; CHECK-NEXT: v_ashrrev_i64 v[2:3], v6, v[9:10]
+; CHECK-NEXT: v_ashrrev_i64 v[4:5], v5, v[11:12]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <3 x i64>, ptr %arg0.ptr, !range !12, !noundef !{}
+ %shift.amt = load <3 x i64>, ptr %arg1.ptr, !range !13, !noundef !{}
+ %ashr = ashr <3 x i64> %val, %shift.amt
+ ret <3 x i64> %ashr
+}
+
+define <4 x i64> @no_transform_v4_ashr_metadata(ptr %arg0.ptr, ptr %arg1.ptr) {
+; CHECK-LABEL: no_transform_v4_ashr_metadata:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[2:3]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: flat_load_dwordx4 v[7:10], v[0:1]
+; CHECK-NEXT: flat_load_dwordx4 v[11:14], v[0:1] offset:16
+; CHECK-NEXT: flat_load_dwordx4 v[15:18], v[2:3] offset:16
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i64 v[0:1], v4, v[7:8]
+; CHECK-NEXT: v_ashrrev_i64 v[2:3], v6, v[9:10]
+; CHECK-NEXT: v_ashrrev_i64 v[4:5], v15, v[11:12]
+; CHECK-NEXT: v_ashrrev_i64 v[6:7], v17, v[13:14]
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %val = load <4 x i64>, ptr %arg0.ptr, !range !14, !noundef !{}
+ %shift.amt = load <4 x i64>, ptr %arg1.ptr, !range !15, !noundef !{}
+ %ashr = ashr <4 x i64> %val, %shift.amt
+ ret <4 x i64> %ashr
+}
+
+; Ranges used when transformation is invalid
+!8 = !{i64 -10000000000, i64 0}
+!9 = !{i64 29, i64 64}
+!10 = !{i64 -11000000000, i64 -1000}
+!11 = !{i64 28, i64 64}
+!12 = !{i64 -12000000000, i64 -2001}
+!13 = !{i64 27, i64 60}
+!14 = !{i64 -13000000000, i64 -3002}
+!15 = !{i64 26, i64 50}
+
>From 2e2950d047ef93906939d7ceb5ed7a8ec6f83d80 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 1 Jul 2025 16:23:19 -0500
Subject: [PATCH 3/4] Add parens on if-side to match else-side
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index a877b28119c16..d9ae0d6896e53 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4221,9 +4221,9 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
KnownBits KnownLHS = DAG.computeKnownBits(LHS);
SDValue HiShift;
- if (KnownLHS.isNegative())
+ if (KnownLHS.isNegative()) {
HiShift = DAG.getAllOnesConstant(SL, TargetType);
- else {
+ } else {
Hi = DAG.getFreeze(Hi);
HiShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftFullAmt);
}
>From 54615b51fcc183a8ef2a3885fcfd692329797959 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 2 Jul 2025 09:34:18 -0500
Subject: [PATCH 4/4] Preserve exact flag
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 3 +-
.../CodeGen/AMDGPU/ashr64_reduce_flags.ll | 43 +++++++++++++++++++
2 files changed, 45 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/ashr64_reduce_flags.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d9ae0d6896e53..0997ef8201e63 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4227,7 +4227,8 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
Hi = DAG.getFreeze(Hi);
HiShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftFullAmt);
}
- SDValue NewShift = DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftAmt);
+ SDValue NewShift =
+ DAG.getNode(ISD::SRA, SL, TargetType, Hi, ShiftAmt, N->getFlags());
SDValue Vec;
if (VT.isVector()) {
diff --git a/llvm/test/CodeGen/AMDGPU/ashr64_reduce_flags.ll b/llvm/test/CodeGen/AMDGPU/ashr64_reduce_flags.ll
new file mode 100644
index 0000000000000..59f3a4915b9a7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/ashr64_reduce_flags.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -stop-after=finalize-isel -o - %s | FileCheck %s
+
+;; Test that reduction of:
+;;
+;; DST = ashr i64 X, Y
+;;
+;; where Y is in the range [63-32] to:
+;;
+;; DST = [ashr i32 HI(X), (Y & 0x1F), ashr i32 HI(X), 31]
+;;
+;; preserves flags
+
+define i64 @ashr_exact(i64 %arg0, i64 %shift_amt) {
+ ; CHECK-LABEL: name: ashr_exact
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[DEF]]
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub1
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[DEF3]]
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, killed [[COPY4]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = exact V_ASHRREV_I32_e64 killed [[COPY5]], [[COPY3]], implicit $exec
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 31
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e64_1:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 killed [[S_MOV_B32_]], [[COPY3]], implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ASHRREV_I32_e64_]]
+ ; CHECK-NEXT: $vgpr1 = COPY [[V_ASHRREV_I32_e64_1]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ %or = or i64 %shift_amt, 32
+ %ashr = ashr exact i64 %arg0, %or
+ ret i64 %ashr
+}
+
More information about the llvm-commits
mailing list