[llvm] [AMDGPU] Handle MachineOperandType global address in SIFoldOperands. (PR #135424)
Akhilesh Moorthy via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 24 09:10:56 PDT 2025
https://github.com/isakhilesh updated https://github.com/llvm/llvm-project/pull/135424
>From ea3c20aa84e4b74e0bc497c2a0e9b81aa3907f54 Mon Sep 17 00:00:00 2001
From: Akhilesh Moorthy <akhilesh.moorthy at amd.com>
Date: Wed, 23 Apr 2025 17:52:03 -0500
Subject: [PATCH] [AMDGPU] Handle MachineOperandType global address in
SIFoldOperands.
Fixes SWDEV-504645.
This patch handles the global operand type properly fixing the
bug : Assertion `(isFI() || isCPI() || isTargetIndex() ||
isJTI()) && "Wrong MachineOperand accessor"' failed.
---
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 4 ++
llvm/test/CodeGen/AMDGPU/swdev-504645.ll | 47 +++++++++++++++++++++++
2 files changed, 51 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/swdev-504645.ll
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 1547142a8d5c6..3cfbe14d4e7d2 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1161,6 +1161,10 @@ void SIFoldOperandsImpl::foldOperand(
if (OpToFold.isImm())
UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
+ else if (OpToFold.isGlobal())
+ UseMI->getOperand(1).ChangeToGA(OpToFold.getGlobal(),
+ OpToFold.getOffset(),
+ OpToFold.getTargetFlags());
else
UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
diff --git a/llvm/test/CodeGen/AMDGPU/swdev-504645.ll b/llvm/test/CodeGen/AMDGPU/swdev-504645.ll
new file mode 100644
index 0000000000000..3467a51cbc9b5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/swdev-504645.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck %s
+
+define void @test_load_zext() {
+; CHECK-LABEL: test_load_zext:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s0, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_or_saveexec_b64 s[2:3], -1
+; CHECK-NEXT: scratch_store_dword off, v40, s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[2:3]
+; CHECK-NEXT: s_add_i32 s32, s32, 16
+; CHECK-NEXT: v_writelane_b32 v40, s0, 2
+; CHECK-NEXT: s_getpc_b64 s[0:1]
+; CHECK-NEXT: s_add_u32 s0, s0, test_buffer_load_sgpr_plus_imm_offset_noflags at gotpcrel32@lo+4
+; CHECK-NEXT: s_addc_u32 s1, s1, test_buffer_load_sgpr_plus_imm_offset_noflags at gotpcrel32@hi+12
+; CHECK-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; CHECK-NEXT: v_writelane_b32 v40, s30, 0
+; CHECK-NEXT: s_mov_b32 s0, DescriptorBuffer at abs32@lo
+; CHECK-NEXT: v_writelane_b32 v40, s31, 1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; CHECK-NEXT: v_readlane_b32 s31, v40, 1
+; CHECK-NEXT: v_readlane_b32 s30, v40, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: v_readlane_b32 s0, v40, 2
+; CHECK-NEXT: s_or_saveexec_b64 s[2:3], -1
+; CHECK-NEXT: scratch_load_dword v40, off, s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[2:3]
+; CHECK-NEXT: s_mov_b32 s33, s0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+.entry:
+ %0 = call i32 @llvm.amdgcn.reloc.constant(metadata !0)
+ call void @test_buffer_load_sgpr_plus_imm_offset_noflags(i32 %0)
+ ret void
+}
+
+declare void @test_buffer_load_sgpr_plus_imm_offset_noflags(i32 inreg)
+
+; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare i32 @llvm.amdgcn.reloc.constant(metadata) #0
+
+attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+
+!0 = !{!"DescriptorBuffer", i32 4, i32 8, i32 0, i32 0}
More information about the llvm-commits
mailing list