[llvm] [AMDGPU] Dynamic VGPR support for llvm.amdgcn.cs.chain (PR #130094)
Diana Picus via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 19 02:34:09 PDT 2025
https://github.com/rovka updated https://github.com/llvm/llvm-project/pull/130094
>From 3cacd079d8671473213fd273a8df6839723728a2 Mon Sep 17 00:00:00 2001
From: Jannik Silvanus <jannik.silvanus at amd.com>
Date: Thu, 30 Mar 2023 17:45:38 +0200
Subject: [PATCH 01/23] [AMDGPU] Add GFX12 S_ALLOC_VGPR instruction
This patch only adds the instruction for disassembly support.
We neither have an instrinsic nor codegen support, and it is
unclear whether we actually want to ever have an intrinsic,
given the fragile semantics.
For now, it will be generated only by the backend in very specific
circumstances.
---
llvm/lib/Target/AMDGPU/SOPInstructions.td | 7 +++++++
llvm/test/MC/AMDGPU/gfx11_unsupported.s | 3 +++
llvm/test/MC/AMDGPU/gfx12_asm_sop1.s | 9 +++++++++
llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt | 9 +++++++++
4 files changed, 28 insertions(+)
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 5e62ceac281b8..eeac9c1ad1084 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -423,6 +423,12 @@ let SubtargetPredicate = isGFX11Plus in {
}
} // End SubtargetPredicate = isGFX11Plus
+let SubtargetPredicate = isGFX12Plus in {
+ let hasSideEffects = 1, Defs = [SCC] in {
+ def S_ALLOC_VGPR : SOP1_0_32 <"s_alloc_vgpr">;
+ }
+} // End SubtargetPredicate = isGFX12Plus
+
class SOP1_F32_Inst<string opName, SDPatternOperator Op, ValueType vt0=f32,
ValueType vt1=vt0> :
SOP1_32<opName, [(set vt0:$sdst, (UniformUnaryFrag<Op> vt1:$src0))]>;
@@ -2048,6 +2054,7 @@ defm S_GET_BARRIER_STATE_M0 : SOP1_M0_Real_gfx12<0x050>;
defm S_BARRIER_SIGNAL_IMM : SOP1_IMM_Real_gfx12<0x04e>;
defm S_BARRIER_SIGNAL_ISFIRST_IMM : SOP1_IMM_Real_gfx12<0x04f>;
defm S_GET_BARRIER_STATE_IMM : SOP1_IMM_Real_gfx12<0x050>;
+defm S_ALLOC_VGPR : SOP1_Real_gfx12<0x053>;
defm S_SLEEP_VAR : SOP1_IMM_Real_gfx12<0x058>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/MC/AMDGPU/gfx11_unsupported.s b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
index d5d10ce548c4d..cd30647fe6378 100644
--- a/llvm/test/MC/AMDGPU/gfx11_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx11_unsupported.s
@@ -322,6 +322,9 @@ image_sample_cd_o v252, v[1:4], s[8:15], s[12:15] dmask:0x1
image_sample_cd_o_g16 v[5:6], v[1:4], s[8:15], s[12:15] dmask:0x3
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+s_alloc_vgpr s0
+// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
s_atomic_add flat_scratch_hi, s[2:3], s0
// CHECK: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s b/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s
index 1e12e5bb48828..3828a4d36adcc 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_sop1.s
@@ -1,5 +1,14 @@
// RUN: llvm-mc -triple=amdgcn -show-encoding -mcpu=gfx1200 %s | FileCheck --check-prefix=GFX12 %s
+s_alloc_vgpr 0x1235
+// GFX12: encoding: [0xff,0x53,0x80,0xbe,0x35,0x12,0x00,0x00]
+
+s_alloc_vgpr 18
+// GFX12: encoding: [0x92,0x53,0x80,0xbe]
+
+s_alloc_vgpr s35
+// GFX12: encoding: [0x23,0x53,0x80,0xbe]
+
s_sleep_var 0x1234
// GFX12: encoding: [0xff,0x58,0x80,0xbe,0x34,0x12,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt
index fa7d020bdd726..2ee84fa976d79 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sop1.txt
@@ -1,5 +1,14 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX12 %s
+# GFX12: s_alloc_vgpr 0x1235 ; encoding: [0xff,0x53,0x80,0xbe,0x35,0x12,0x00,0x00]
+0xff,0x53,0x80,0xbe,0x35,0x12,0x00,0x00
+
+# GFX12: s_alloc_vgpr 18 ; encoding: [0x92,0x53,0x80,0xbe]
+0x92,0x53,0x80,0xbe
+
+# GFX12: s_alloc_vgpr s35 ; encoding: [0x23,0x53,0x80,0xbe]
+0x23,0x53,0x80,0xbe
+
# GFX12: s_sleep_var 0x1234 ; encoding: [0xff,0x58,0x80,0xbe,0x34,0x12,0x00,0x00]
0xff,0x58,0x80,0xbe,0x34,0x12,0x00,0x00
>From b2a7bdc3954d2bf72e99d730ce00159c2550f563 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Mon, 23 Oct 2023 10:36:31 +0200
Subject: [PATCH 02/23] [AMDGPU] Add SubtargetFeature for dynamic VGPR mode
This represents a hardware mode supported only for wave32 compute
shaders. When enabled, we set the `.dynamic_vgpr_en` field of
`.compute_registers` to true in the PAL metadata.
---
llvm/docs/AMDGPUUsage.rst | 6 ++++++
llvm/lib/Target/AMDGPU/AMDGPU.td | 6 ++++++
llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp | 3 +++
llvm/lib/Target/AMDGPU/GCNSubtarget.h | 3 +++
llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll | 13 ++++++++-----
5 files changed, 26 insertions(+), 5 deletions(-)
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index def6addd595e8..59cc08a59ed7c 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -758,6 +758,12 @@ For example:
enabled will execute correctly but may be less
performant than code generated for XNACK replay
disabled.
+
+ dynamic-vgpr TODO Represents the "Dynamic VGPR" hardware mode, introduced in GFX12.
+ Waves launched in this mode may allocate or deallocate the VGPRs
+ using dedicated instructions, but may not send the DEALLOC_VGPRS
+ message.
+
=============== ============================ ==================================================
.. _amdgpu-target-id:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index effc8d2ed6b49..31a98ee132bf6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -1239,6 +1239,12 @@ def FeatureXF32Insts : SubtargetFeature<"xf32-insts",
"v_mfma_f32_16x16x8_xf32 and v_mfma_f32_32x32x4_xf32"
>;
+def FeatureDynamicVGPR : SubtargetFeature <"dynamic-vgpr",
+ "DynamicVGPR",
+ "true",
+ "Enable dynamic VGPR mode"
+>;
+
// Dummy feature used to disable assembler instructions.
def FeatureDisable : SubtargetFeature<"",
"FeatureDisable","true",
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 31e0bd8d652bc..13e61756e3036 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -1414,6 +1414,9 @@ static void EmitPALMetadataCommon(AMDGPUPALMetadata *MD,
MD->setHwStage(CC, ".trap_present",
(bool)CurrentProgramInfo.TrapHandlerEnable);
MD->setHwStage(CC, ".excp_en", CurrentProgramInfo.EXCPEnable);
+
+ if (ST.isDynamicVGPREnabled())
+ MD->setComputeRegisters(".dynamic_vgpr_en", true);
}
MD->setHwStage(CC, ".lds_size",
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 6664a70572ded..1254cbad83b60 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -190,6 +190,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
/// indicates a lack of S_CLAUSE support.
unsigned MaxHardClauseLength = 0;
bool SupportsSRAMECC = false;
+ bool DynamicVGPR = false;
// This should not be used directly. 'TargetID' tracks the dynamic settings
// for SRAMECC.
@@ -1647,6 +1648,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
return true;
}
+ bool isDynamicVGPREnabled() const { return DynamicVGPR; }
+
bool requiresDisjointEarlyClobberAndUndef() const override {
// AMDGPU doesn't care if early-clobber and undef operands are allocated
// to the same register.
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
index 7536e83a9da6b..fa22089978c2e 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
@@ -1,4 +1,6 @@
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR
; CHECK-LABEL: {{^}}_amdgpu_cs_main:
; CHECK: ; TotalNumSgprs: 4
@@ -8,6 +10,7 @@
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
+; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
@@ -57,7 +60,7 @@
; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
; CHECK-NEXT: .excp_en: 0
; CHECK-NEXT: .float_mode: 0xc0
-; CHECK-NEXT: .ieee_mode: false
+; GFX11-NEXT: .ieee_mode: false
; CHECK-NEXT: .image_op: false
; CHECK-NEXT: .lds_size: 0
; CHECK-NEXT: .mem_ordered: true
@@ -112,7 +115,7 @@
; CHECK-NEXT: .debug_mode: false
; CHECK-NEXT: .entry_point: _amdgpu_gs
; CHECK-NEXT: .entry_point_symbol: gs_shader
-; CHECK-NEXT: .ieee_mode: false
+; GFX11-NEXT: .ieee_mode: false
; CHECK-NEXT: .lds_size: 0x200
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
@@ -124,7 +127,7 @@
; CHECK-NEXT: .debug_mode: false
; CHECK-NEXT: .entry_point: _amdgpu_hs
; CHECK-NEXT: .entry_point_symbol: hs_shader
-; CHECK-NEXT: .ieee_mode: false
+; GFX11-NEXT: .ieee_mode: false
; CHECK-NEXT: .lds_size: 0x1000
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
@@ -136,7 +139,7 @@
; CHECK-NEXT: .debug_mode: false
; CHECK-NEXT: .entry_point: _amdgpu_ps
; CHECK-NEXT: .entry_point_symbol: ps_shader
-; CHECK-NEXT: .ieee_mode: false
+; GFX11-NEXT: .ieee_mode: false
; CHECK-NEXT: .lds_size: 0
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
>From c29d8202c06488a9466aea49dda4cf2b4663236e Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Mon, 23 Oct 2023 11:46:19 +0200
Subject: [PATCH 03/23] [AMDGPU] Deallocate VGPRs before exiting in dynamic
VGPR mode
In dynamic VGPR mode, Waves must deallocate all VGPRs before exiting. If
the shader program does not do this, hardware inserts `S_ALLOC_VGPR 0`
before S_ENDPGM, but this may incur some performance cost. Therefore
it's better if the compiler proactively generates that instruction.
This patch extends `si-insert-waitcnts` to deallocate the VGPRs via
a `S_ALLOC_VGPR 0` before any `S_ENDPGM` when in dynamic VGPR mode.
---
llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp | 60 +--
.../CodeGen/AMDGPU/release-vgprs-gfx12.mir | 356 ++++++++++++++++++
2 files changed, 393 insertions(+), 23 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/release-vgprs-gfx12.mir
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 7e6bce2bf5f12..42ef23e836a58 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1647,17 +1647,21 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
(MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false));
}
- // Identify S_ENDPGM instructions which may have to wait for outstanding VMEM
- // stores. In this case it can be useful to send a message to explicitly
- // release all VGPRs before the stores have completed, but it is only safe to
- // do this if:
- // * there are no outstanding scratch stores
- // * we are not in Dynamic VGPR mode
+ // In dynamic VGPR mode, we want to release the VGPRs before the wave exits.
+ // Technically the hardware will do this on its own if we don't, but that
+ // might cost extra cycles compared to doing it explicitly.
+ // When not in dynamic VGPR mode, identify S_ENDPGM instructions which may
+ // have to wait for outstanding VMEM stores. In this case it can be useful to
+ // send a message to explicitly release all VGPRs before the stores have
+ // completed, but it is only safe to do this if there are no outstanding
+ // scratch stores.
else if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED) {
- if (ST->getGeneration() >= AMDGPUSubtarget::GFX11 && !WCG->isOptNone() &&
- ScoreBrackets.getScoreRange(STORE_CNT) != 0 &&
- !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS))
+ if (!WCG->isOptNone() &&
+ (ST->isDynamicVGPREnabled() ||
+ (ST->getGeneration() >= AMDGPUSubtarget::GFX11 &&
+ ScoreBrackets.getScoreRange(STORE_CNT) != 0 &&
+ !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS))))
ReleaseVGPRInsts.insert(&MI);
}
// Resolve vm waits before gs-done.
@@ -2610,26 +2614,36 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
}
}
- // Insert DEALLOC_VGPR messages before previously identified S_ENDPGM
- // instructions.
+ // Deallocate the VGPRs before previously identified S_ENDPGM instructions.
+ // This is done in different ways depending on how the VGPRs were allocated
+ // (i.e. whether we're in dynamic VGPR mode or not).
// Skip deallocation if kernel is waveslot limited vs VGPR limited. A short
// waveslot limited kernel runs slower with the deallocation.
- if (!ReleaseVGPRInsts.empty() &&
- (MF.getFrameInfo().hasCalls() ||
- ST->getOccupancyWithNumVGPRs(
- TRI->getNumUsedPhysRegs(*MRI, AMDGPU::VGPR_32RegClass)) <
- AMDGPU::IsaInfo::getMaxWavesPerEU(ST))) {
+ if (ST->isDynamicVGPREnabled()) {
for (MachineInstr *MI : ReleaseVGPRInsts) {
- if (ST->requiresNopBeforeDeallocVGPRs()) {
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- TII->get(AMDGPU::S_NOP))
- .addImm(0);
- }
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- TII->get(AMDGPU::S_SENDMSG))
- .addImm(AMDGPU::SendMsg::ID_DEALLOC_VGPRS_GFX11Plus);
+ TII->get(AMDGPU::S_ALLOC_VGPR))
+ .addImm(0);
Modified = true;
}
+ } else {
+ if (!ReleaseVGPRInsts.empty() &&
+ (MF.getFrameInfo().hasCalls() ||
+ ST->getOccupancyWithNumVGPRs(
+ TRI->getNumUsedPhysRegs(*MRI, AMDGPU::VGPR_32RegClass)) <
+ AMDGPU::IsaInfo::getMaxWavesPerEU(ST))) {
+ for (MachineInstr *MI : ReleaseVGPRInsts) {
+ if (ST->requiresNopBeforeDeallocVGPRs()) {
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ TII->get(AMDGPU::S_NOP))
+ .addImm(0);
+ }
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ TII->get(AMDGPU::S_SENDMSG))
+ .addImm(AMDGPU::SendMsg::ID_DEALLOC_VGPRS_GFX11Plus);
+ Modified = true;
+ }
+ }
}
ReleaseVGPRInsts.clear();
PreheadersToFlush.clear();
diff --git a/llvm/test/CodeGen/AMDGPU/release-vgprs-gfx12.mir b/llvm/test/CodeGen/AMDGPU/release-vgprs-gfx12.mir
new file mode 100644
index 0000000000000..884b5f8b6f018
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/release-vgprs-gfx12.mir
@@ -0,0 +1,356 @@
+# RUN: llc -O2 -march=amdgcn -mcpu=gfx1200 -run-pass=si-insert-waitcnts -verify-machineinstrs -o - %s | FileCheck %s -check-prefixes=CHECK,DEFAULT
+# RUN: llc -O2 -march=amdgcn -mcpu=gfx1200 -mattr=+dynamic-vgpr -run-pass=si-insert-waitcnts -verify-machineinstrs -o - %s | FileCheck %s -check-prefixes=CHECK,DVGPR
+
+--- |
+ define amdgpu_ps void @tbuffer_store1() { ret void }
+ define amdgpu_ps void @tbuffer_store2() { ret void }
+ define amdgpu_ps void @flat_store() { ret void }
+ define amdgpu_ps void @global_store() { ret void }
+ define amdgpu_ps void @buffer_store_format() { ret void }
+ define amdgpu_ps void @ds_write_b32() { ret void }
+ define amdgpu_ps void @global_store_dword() { ret void }
+ define amdgpu_ps void @multiple_basic_blocks1() { ret void }
+ define amdgpu_ps void @multiple_basic_blocks2() { ret void }
+ define amdgpu_ps void @multiple_basic_blocks3() { ret void }
+ define amdgpu_ps void @recursive_loop() { ret void }
+ define amdgpu_ps void @recursive_loop_vmem() { ret void }
+ define amdgpu_ps void @image_store() { ret void }
+ define amdgpu_ps void @scratch_store() { ret void }
+ define amdgpu_ps void @buffer_atomic() { ret void }
+ define amdgpu_ps void @flat_atomic() { ret void }
+ define amdgpu_ps void @global_atomic() { ret void }
+ define amdgpu_ps void @image_atomic() { ret void }
+ define amdgpu_ps void @global_store_optnone() noinline optnone { ret void }
+...
+
+---
+name: tbuffer_store1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: tbuffer_store1
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ TBUFFER_STORE_FORMAT_XYZW_OFFSET_exact killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, killed renamable $sgpr4, 42, 117, 0, 0, implicit $exec
+ S_ENDPGM 0
+...
+
+---
+name: tbuffer_store2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: tbuffer_store2
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ TBUFFER_STORE_FORMAT_XYZW_OFFEN_exact killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, killed renamable $vgpr4, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 115, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 7)
+ S_ENDPGM 0
+...
+
+---
+name: flat_store
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: flat_store
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ FLAT_STORE_DWORDX4 $vgpr49_vgpr50, $vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, implicit $exec, implicit $flat_scr
+ S_ENDPGM 0
+...
+
+---
+name: global_store
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: global_store
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ GLOBAL_STORE_DWORD undef renamable $vgpr0_vgpr1, killed renamable $vgpr1, 0, 4, implicit $exec
+ S_WAIT_STORECNT 0
+ S_ENDPGM 0
+...
+
+---
+name: buffer_store_format
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: buffer_store_format
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ BUFFER_STORE_FORMAT_D16_X_OFFEN_exact killed renamable $vgpr0, killed renamable $vgpr1, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, killed renamable $sgpr4, 0, 0, 0, implicit $exec
+ S_ENDPGM 0
+...
+
+---
+name: ds_write_b32
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: ds_write_b32
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ renamable $vgpr0 = IMPLICIT_DEF
+ renamable $vgpr1 = IMPLICIT_DEF
+ DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 12, 0, implicit $exec, implicit $m0
+ S_ENDPGM 0
+
+...
+---
+name: global_store_dword
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr0_sgpr1
+
+ ; CHECK-LABEL: name: global_store_dword
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ renamable $vgpr0 = V_MAD_I32_I24_e64 killed $vgpr1, killed $vgpr0, killed $sgpr2, 0, implicit $exec
+ GLOBAL_STORE_DWORD_SADDR killed renamable $vgpr2, killed renamable $vgpr0, killed renamable $sgpr0_sgpr1, 0, 0, implicit $exec
+ S_ENDPGM 0
+...
+
+---
+name: multiple_basic_blocks1
+body: |
+ ; CHECK-LABEL: name: multiple_basic_blocks1
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1
+
+ renamable $vgpr0 = BUFFER_LOAD_FORMAT_X_IDXEN killed renamable $vgpr0, renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.1, %bb.2
+
+ $vgpr1 = V_ADD_U32_e32 renamable $vgpr0, renamable $vgpr2, implicit $exec
+ S_CMP_LG_U32 killed renamable $sgpr3, renamable $sgpr4, implicit-def $scc
+ S_CBRANCH_SCC1 %bb.1, implicit killed $scc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_ENDPGM 0
+
+...
+
+---
+name: multiple_basic_blocks2
+body: |
+ ; CHECK-LABEL: name: multiple_basic_blocks2
+ ; CHECK: bb.2:
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ bb.0:
+ successors: %bb.2
+
+ TBUFFER_STORE_FORMAT_X_OFFSET_exact killed renamable $vgpr0, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 125, 0, 0, implicit $exec
+ $vgpr1 = V_ADD_U32_e32 renamable $vgpr0, renamable $vgpr2, implicit $exec
+ S_BRANCH %bb.2
+
+ bb.1:
+ successors: %bb.2
+
+ $vgpr1 = V_ADD_U32_e32 renamable $vgpr0, renamable $vgpr2, implicit $exec
+ TBUFFER_STORE_FORMAT_X_OFFSET_exact killed renamable $vgpr0, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 125, 0, 0, implicit $exec
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_ENDPGM 0
+...
+
+---
+name: multiple_basic_blocks3
+body: |
+ ; CHECK-LABEL: name: multiple_basic_blocks3
+ ; CHECK: bb.4:
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ bb.0:
+ successors: %bb.2
+
+ $vgpr1 = V_ADD_U32_e32 renamable $vgpr0, renamable $vgpr2, implicit $exec
+ TBUFFER_STORE_FORMAT_X_OFFSET_exact killed renamable $vgpr0, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 125, 0, 0, implicit $exec
+ S_BRANCH %bb.2
+
+ bb.1:
+ successors: %bb.2
+
+ $vgpr1 = V_ADD_U32_e32 renamable $vgpr0, renamable $vgpr2, implicit $exec
+ S_BRANCH %bb.2
+
+ bb.2:
+ successors: %bb.4
+
+ S_BRANCH %bb.4
+
+ bb.3:
+ successors: %bb.4
+
+ $vgpr1 = V_ADD_U32_e32 renamable $vgpr0, renamable $vgpr2, implicit $exec
+ S_BRANCH %bb.4
+
+ bb.4:
+ S_ENDPGM 0
+...
+
+---
+name: recursive_loop
+body: |
+ ; CHECK-LABEL: name: recursive_loop
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1
+
+ renamable $vgpr0 = BUFFER_LOAD_FORMAT_X_IDXEN killed renamable $vgpr0, renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.1, %bb.2
+
+ S_CMP_LG_U32 killed renamable $sgpr3, renamable $sgpr4, implicit-def $scc
+ S_CBRANCH_SCC1 %bb.1, implicit killed $scc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_ENDPGM 0
+...
+
+---
+name: recursive_loop_vmem
+body: |
+ ; CHECK-LABEL: name: recursive_loop_vmem
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1
+
+ renamable $vgpr0 = BUFFER_LOAD_FORMAT_X_IDXEN killed renamable $vgpr0, renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.1, %bb.2
+
+ TBUFFER_STORE_FORMAT_XYZW_OFFEN_exact killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, killed renamable $vgpr4, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 115, 0, 0, implicit $exec
+ S_CMP_LG_U32 killed renamable $sgpr3, renamable $sgpr4, implicit-def $scc
+ S_CBRANCH_SCC1 %bb.1, implicit killed $scc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_ENDPGM 0
+...
+
+---
+name: image_store
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: image_store
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ IMAGE_STORE_V2_V1_gfx11 killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 12, 0, 1, 0, 0, -1, 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), addrspace 7)
+ S_ENDPGM 0
+...
+
+---
+name: scratch_store
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: scratch_store
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ renamable $sgpr0 = S_AND_B32 killed renamable $sgpr0, -16, implicit-def dead $scc
+ SCRATCH_STORE_DWORD_SADDR killed renamable $vgpr0, killed renamable $sgpr0, 0, 0, implicit $exec, implicit $flat_scr
+ S_ENDPGM 0
+...
+
+---
+name: buffer_atomic
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: buffer_atomic
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ BUFFER_ATOMIC_ADD_F32_OFFEN killed renamable $vgpr0, killed renamable $vgpr2, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 7)
+ S_ENDPGM 0
+...
+
+---
+name: flat_atomic
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: flat_atomic
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ renamable $vgpr0_vgpr1 = FLAT_ATOMIC_DEC_X2_RTN killed renamable $vgpr0_vgpr1, killed renamable $vgpr2_vgpr3, 40, 1, implicit $exec, implicit $flat_scr
+ S_ENDPGM 0
+...
+
+
+---
+name: global_atomic
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: global_atomic
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ renamable $vgpr0_vgpr1 = GLOBAL_ATOMIC_INC_X2_SADDR_RTN killed renamable $vgpr0, killed renamable $vgpr1_vgpr2, killed renamable $sgpr0_sgpr1, 40, 1, implicit $exec
+ S_ENDPGM 0
+...
+
+---
+name: image_atomic
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: image_atomic
+ ; CHECK-NOT: S_SENDMSG 3
+ ; DEFAULT-NOT: S_ALLOC_VGPR
+ ; DVGPR: S_ALLOC_VGPR 0
+ ; CHECK: S_ENDPGM 0
+ renamable $vgpr0_vgpr1_vgpr2_vgpr3 = IMAGE_ATOMIC_CMPSWAP_V4_V1_gfx12 killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, killed renamable $vgpr4, killed renamable $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7, 15, 0, 1, 1, 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), addrspace 7)
+ S_ENDPGM 0
+...
+
+---
+name: global_store_optnone
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: global_store_optnone
+ ; CHECK-NOT: S_SENDMSG 3
+ ; CHECK-NOT: S_ALLOC_VGPR
+ ; CHECK: S_ENDPGM 0
+ GLOBAL_STORE_DWORD undef renamable $vgpr0_vgpr1, killed renamable $vgpr1, 0, 4, implicit $exec
+ S_WAIT_STORECNT 0
+ S_ENDPGM 0
+...
>From aff1e132263dba730999eb017b7548a5d2f46b6f Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Tue, 10 Oct 2023 11:06:23 +0200
Subject: [PATCH 04/23] [AMDGPU] Dynamic VGPR support for llvm.amdgcn.cs.chain
The llvm.amdgcn.cs.chain intrinsic has a 'flags' operand which may
indicate that we want to reallocate the VGPRs before performing the
call.
A call with the following arguments:
```
llvm.amdgcn.cs.chain %callee, %exec, %sgpr_args, %vgpr_args,
/*flags*/0x1, %num_vgprs, %fallback_exec, %fallback_callee
```
is supposed to do the following:
- copy the SGPR and VGPR args into their respective registers
- try to change the VGPR allocation
- if the allocation has succeeded, set EXEC to %exec and jump to
%callee, otherwise set EXEC to %fallback_exec and jump to
%fallback_callee
This patch implements the dynamic VGPR behaviour by generating an
S_ALLOC_VGPR followed by S_CSELECT_B32/64 instructions for the EXEC and
callee. The rest of the call sequence is left undisturbed (i.e.
identical to the case where the flags are 0 and we don't use dynamic
VGPRs). We achieve this by introducing some new pseudos
(SI_CS_CHAIN_TC_Wn_DVGPR) which are expanded in the SILateBranchLowering
pass, just like the simpler SI_CS_CHAIN_TC_Wn pseudos. The main reason
is so that we don't risk other passes (particularly the PostRA
scheduler) introducing instructions between the S_ALLOC_VGPR and the
jump. Such instructions might end up using VGPRs that have been
deallocated, or the wrong EXEC mask. Once the whole backend treats
S_ALLOC_VGPR and changes to EXEC as barriers for instructions that use
VGPRs, we could in principle move the expansion earlier (but in the
absence of a good reason for that my personal preference is to keep
it later in order to make debugging easier).
Since the expansion happens after register allocation, we're
careful to select constants to immediate operands instead of letting
ISel generate S_MOVs which could interfere with register allocation
(i.e. make it look like we need more registers than we actually do).
For GFX12, S_ALLOC_VGPR only works in wave32 mode, so we bail out
during ISel in wave64 mode. However, we can define the pseudos for
wave64 too so it's easy to handle if future generations support it.
Co-authored-by: Ana Mihajlovic <Ana.Mihajlovic at amd.com>
---
llvm/include/llvm/CodeGen/SelectionDAGISel.h | 29 +-
.../SelectionDAG/SelectionDAGBuilder.cpp | 13 +-
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 126 +++++--
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 88 +++--
llvm/lib/Target/AMDGPU/SIInstructions.td | 76 +++--
.../Target/AMDGPU/SILateBranchLowering.cpp | 62 +++-
.../amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll | 97 ++++++
.../isel-amdgcn-cs-chain-intrinsic-w32.ll | 36 +-
.../isel-amdgcn-cs-chain-intrinsic-w64.ll | 36 +-
...-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll | 315 ++++++++++++++++++
.../CodeGen/AMDGPU/remove-register-flags.mir | 19 ++
11 files changed, 746 insertions(+), 151 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll
create mode 100644 llvm/test/CodeGen/AMDGPU/remove-register-flags.mir
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index e9452a6dc6233..55f8f19d437a0 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -328,20 +328,21 @@ class SelectionDAGISel {
};
enum {
- OPFL_None = 0, // Node has no chain or glue input and isn't variadic.
- OPFL_Chain = 1, // Node has a chain input.
- OPFL_GlueInput = 2, // Node has a glue input.
- OPFL_GlueOutput = 4, // Node has a glue output.
- OPFL_MemRefs = 8, // Node gets accumulated MemRefs.
- OPFL_Variadic0 = 1<<4, // Node is variadic, root has 0 fixed inputs.
- OPFL_Variadic1 = 2<<4, // Node is variadic, root has 1 fixed inputs.
- OPFL_Variadic2 = 3<<4, // Node is variadic, root has 2 fixed inputs.
- OPFL_Variadic3 = 4<<4, // Node is variadic, root has 3 fixed inputs.
- OPFL_Variadic4 = 5<<4, // Node is variadic, root has 4 fixed inputs.
- OPFL_Variadic5 = 6<<4, // Node is variadic, root has 5 fixed inputs.
- OPFL_Variadic6 = 7<<4, // Node is variadic, root has 6 fixed inputs.
-
- OPFL_VariadicInfo = OPFL_Variadic6
+ OPFL_None = 0, // Node has no chain or glue input and isn't variadic.
+ OPFL_Chain = 1, // Node has a chain input.
+ OPFL_GlueInput = 2, // Node has a glue input.
+ OPFL_GlueOutput = 4, // Node has a glue output.
+ OPFL_MemRefs = 8, // Node gets accumulated MemRefs.
+ OPFL_Variadic0 = 1 << 4, // Node is variadic, root has 0 fixed inputs.
+ OPFL_Variadic1 = 2 << 4, // Node is variadic, root has 1 fixed inputs.
+ OPFL_Variadic2 = 3 << 4, // Node is variadic, root has 2 fixed inputs.
+ OPFL_Variadic3 = 4 << 4, // Node is variadic, root has 3 fixed inputs.
+ OPFL_Variadic4 = 5 << 4, // Node is variadic, root has 4 fixed inputs.
+ OPFL_Variadic5 = 6 << 4, // Node is variadic, root has 5 fixed inputs.
+ OPFL_Variadic6 = 7 << 4, // Node is variadic, root has 6 fixed inputs.
+ OPFL_Variadic7 = 8 << 4, // Node is variadic, root has 7 fixed inputs.
+
+ OPFL_VariadicInfo = 15 << 4 // Mask for extracting the OPFL_VariadicN bits.
};
/// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index d5a07e616236e..ba5f7e03d98e4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7996,10 +7996,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
return;
}
case Intrinsic::amdgcn_cs_chain: {
- assert(I.arg_size() == 5 && "Additional args not supported yet");
- assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
- "Non-zero flags not supported yet");
-
// At this point we don't care if it's amdgpu_cs_chain or
// amdgpu_cs_chain_preserve.
CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
@@ -8026,6 +8022,15 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
Args[2].IsInReg = true; // EXEC should be inreg
+ // Forward the flags and any additional arguments.
+ for (unsigned Idx = 4; Idx < I.arg_size(); ++Idx) {
+ TargetLowering::ArgListEntry Arg;
+ Arg.Node = getValue(I.getOperand(Idx));
+ Arg.Ty = I.getOperand(Idx)->getType();
+ Arg.setAttributes(&I, Idx);
+ Args.push_back(Arg);
+ }
+
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(getCurSDLoc())
.setChain(getRoot())
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 478a4c161fce7..a440617319228 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -953,8 +953,9 @@ getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) {
}
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
- bool IsTailCall, bool isWave32,
- CallingConv::ID CC) {
+ bool IsTailCall, bool IsWave32,
+ CallingConv::ID CC,
+ bool IsDynamicVGPRChainCall = false) {
// For calls to amdgpu_cs_chain functions, the address is known to be uniform.
assert((AMDGPU::isChainCC(CC) || !IsIndirect || !IsTailCall) &&
"Indirect calls can't be tail calls, "
@@ -962,8 +963,12 @@ static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
if (!IsTailCall)
return AMDGPU::G_SI_CALL;
- if (AMDGPU::isChainCC(CC))
- return isWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32 : AMDGPU::SI_CS_CHAIN_TC_W64;
+ if (AMDGPU::isChainCC(CC)) {
+ if (IsDynamicVGPRChainCall)
+ return IsWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR
+ : AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR;
+ return IsWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32 : AMDGPU::SI_CS_CHAIN_TC_W64;
+ }
return CC == CallingConv::AMDGPU_Gfx ? AMDGPU::SI_TCRETURN_GFX :
AMDGPU::SI_TCRETURN;
@@ -972,7 +977,8 @@ static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
// Add operands to call instruction to track the callee.
static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
MachineIRBuilder &MIRBuilder,
- AMDGPUCallLowering::CallLoweringInfo &Info) {
+ AMDGPUCallLowering::CallLoweringInfo &Info,
+ bool IsDynamicVGPRChainCall = false) {
if (Info.Callee.isReg()) {
CallInst.addReg(Info.Callee.getReg());
CallInst.addImm(0);
@@ -983,7 +989,12 @@ static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
auto Ptr = MIRBuilder.buildGlobalValue(
LLT::pointer(GV->getAddressSpace(), 64), GV);
CallInst.addReg(Ptr.getReg(0));
- CallInst.add(Info.Callee);
+
+ if (IsDynamicVGPRChainCall)
+ // DynamicVGPR chain calls are always indirect.
+ CallInst.addImm(0);
+ else
+ CallInst.add(Info.Callee);
} else
return false;
@@ -1177,6 +1188,18 @@ void AMDGPUCallLowering::handleImplicitCallArguments(
}
}
+namespace {
+// Chain calls have special arguments that we need to handle. These have the
+// same index as they do in the llvm.amdgcn.cs.chain intrinsic.
+enum ChainCallArgIdx {
+ Exec = 1,
+ Flags = 4,
+ NumVGPRs = 5,
+ FallbackExec = 6,
+ FallbackCallee = 7,
+};
+} // anonymous namespace
+
bool AMDGPUCallLowering::lowerTailCall(
MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
SmallVectorImpl<ArgInfo> &OutArgs) const {
@@ -1185,6 +1208,8 @@ bool AMDGPUCallLowering::lowerTailCall(
SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
const SITargetLowering &TLI = *getTLI<SITargetLowering>();
// True when we're tail calling, but without -tailcallopt.
@@ -1200,34 +1225,78 @@ bool AMDGPUCallLowering::lowerTailCall(
if (!IsSibCall)
CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP);
- unsigned Opc =
- getCallOpcode(MF, Info.Callee.isReg(), true, ST.isWave32(), CalleeCC);
+ bool IsChainCall = AMDGPU::isChainCC(Info.CallConv);
+ bool IsDynamicVGPRChainCall = false;
+
+ if (IsChainCall) {
+ ArgInfo FlagsArg = Info.OrigArgs[ChainCallArgIdx::Flags];
+ const APInt &FlagsValue = cast<ConstantInt>(FlagsArg.OrigValue)->getValue();
+ if (FlagsValue.isZero()) {
+ if (Info.OrigArgs.size() != 5) {
+ LLVM_DEBUG(dbgs() << "No additional args allowed if flags == 0");
+ return false;
+ }
+ } else if (FlagsValue.isOneBitSet(0)) {
+ IsDynamicVGPRChainCall = true;
+
+ if (Info.OrigArgs.size() != 8) {
+ LLVM_DEBUG(dbgs() << "Expected 3 additional args");
+ return false;
+ }
+
+ // On GFX12, we can only change the VGPR allocation for wave32.
+ if (!ST.isWave32()) {
+ LLVM_DEBUG(dbgs() << "Dynamic VGPR mode is only supported for wave32");
+ return false;
+ }
+
+ ArgInfo FallbackExecArg = Info.OrigArgs[ChainCallArgIdx::FallbackExec];
+ assert(FallbackExecArg.Regs.size() == 1 &&
+ "Expected single register for fallback EXEC");
+ if (!FallbackExecArg.Ty->isIntegerTy(ST.getWavefrontSize())) {
+ LLVM_DEBUG(dbgs() << "Bad type for fallback EXEC");
+ return false;
+ }
+ }
+ }
+
+ unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), /*IsTailCall*/ true,
+ ST.isWave32(), CalleeCC, IsDynamicVGPRChainCall);
auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
- if (!addCallTargetOperands(MIB, MIRBuilder, Info))
+ if (!addCallTargetOperands(MIB, MIRBuilder, Info, IsDynamicVGPRChainCall))
return false;
// Byte offset for the tail call. When we are sibcalling, this will always
// be 0.
MIB.addImm(0);
- // If this is a chain call, we need to pass in the EXEC mask.
- const SIRegisterInfo *TRI = ST.getRegisterInfo();
- if (AMDGPU::isChainCC(Info.CallConv)) {
- ArgInfo ExecArg = Info.OrigArgs[1];
+ // If this is a chain call, we need to pass in the EXEC mask as well as any
+ // other special args.
+ if (IsChainCall) {
+ auto AddRegOrImm = [&](const ArgInfo &Arg) {
+ if (auto CI = dyn_cast<ConstantInt>(Arg.OrigValue)) {
+ MIB.addImm(CI->getSExtValue());
+ } else {
+ MIB.addReg(Arg.Regs[0]);
+ unsigned Idx = MIB->getNumOperands() - 1;
+ MIB->getOperand(Idx).setReg(constrainOperandRegClass(
+ MF, *TRI, MRI, *TII, *ST.getRegBankInfo(), *MIB, MIB->getDesc(),
+ MIB->getOperand(Idx), Idx));
+ }
+ };
+
+ ArgInfo ExecArg = Info.OrigArgs[ChainCallArgIdx::Exec];
assert(ExecArg.Regs.size() == 1 && "Too many regs for EXEC");
- if (!ExecArg.Ty->isIntegerTy(ST.getWavefrontSize()))
+ if (!ExecArg.Ty->isIntegerTy(ST.getWavefrontSize())) {
+ LLVM_DEBUG(dbgs() << "Bad type for EXEC");
return false;
-
- if (const auto *CI = dyn_cast<ConstantInt>(ExecArg.OrigValue)) {
- MIB.addImm(CI->getSExtValue());
- } else {
- MIB.addReg(ExecArg.Regs[0]);
- unsigned Idx = MIB->getNumOperands() - 1;
- MIB->getOperand(Idx).setReg(constrainOperandRegClass(
- MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB,
- MIB->getDesc(), MIB->getOperand(Idx), Idx));
}
+
+ AddRegOrImm(ExecArg);
+ if (IsDynamicVGPRChainCall)
+ std::for_each(Info.OrigArgs.begin() + ChainCallArgIdx::NumVGPRs,
+ Info.OrigArgs.end(), AddRegOrImm);
}
// Tell the call which registers are clobbered.
@@ -1329,9 +1398,9 @@ bool AMDGPUCallLowering::lowerTailCall(
// FIXME: We should define regbankselectable call instructions to handle
// divergent call targets.
if (MIB->getOperand(0).isReg()) {
- MIB->getOperand(0).setReg(constrainOperandRegClass(
- MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB,
- MIB->getDesc(), MIB->getOperand(0), 0));
+ MIB->getOperand(0).setReg(
+ constrainOperandRegClass(MF, *TRI, MRI, *TII, *ST.getRegBankInfo(),
+ *MIB, MIB->getDesc(), MIB->getOperand(0), 0));
}
MF.getFrameInfo().setHasTailCall();
@@ -1345,11 +1414,6 @@ bool AMDGPUCallLowering::lowerChainCall(MachineIRBuilder &MIRBuilder,
ArgInfo Callee = Info.OrigArgs[0];
ArgInfo SGPRArgs = Info.OrigArgs[2];
ArgInfo VGPRArgs = Info.OrigArgs[3];
- ArgInfo Flags = Info.OrigArgs[4];
-
- assert(cast<ConstantInt>(Flags.OrigValue)->isZero() &&
- "Non-zero flags aren't supported yet.");
- assert(Info.OrigArgs.size() == 5 && "Additional args aren't supported yet.");
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index fe095414e5172..5438c6f50dea2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3657,6 +3657,19 @@ bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
return true;
}
+namespace {
+// Chain calls have special arguments that we need to handle. These are
+// tagging along at the end of the arguments list(s), after the SGPR and VGPR
+// arguments (index 0 and 1 respectively).
+enum ChainCallArgIdx {
+ Exec = 2,
+ Flags,
+ NumVGPRs,
+ FallbackExec,
+ FallbackCallee
+};
+} // anonymous namespace
+
// The wave scratch offset register is used as the global base pointer.
SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
@@ -3665,37 +3678,67 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
SelectionDAG &DAG = CLI.DAG;
- TargetLowering::ArgListEntry RequestedExec;
- if (IsChainCallConv) {
- // The last argument should be the value that we need to put in EXEC.
- // Pop it out of CLI.Outs and CLI.OutVals before we do any processing so we
- // don't treat it like the rest of the arguments.
- RequestedExec = CLI.Args.back();
- assert(RequestedExec.Node && "No node for EXEC");
+ const SDLoc &DL = CLI.DL;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
- if (!RequestedExec.Ty->isIntegerTy(Subtarget->getWavefrontSize()))
+ llvm::SmallVector<SDValue, 6> ChainCallSpecialArgs;
+ if (IsChainCallConv) {
+ // The last arguments should be the value that we need to put in EXEC,
+ // followed by the flags and any other arguments with special meanings.
+ // Pop them out of CLI.Outs and CLI.OutVals before we do any processing so
+ // we don't treat them like the "real" arguments.
+ auto RequestedExecIt = std::find_if(
+ CLI.Outs.begin(), CLI.Outs.end(),
+ [](const ISD::OutputArg &Arg) { return Arg.OrigArgIndex == 2; });
+ assert(RequestedExecIt != CLI.Outs.end() && "No node for EXEC");
+
+ size_t SpecialArgsBeginIdx = RequestedExecIt - CLI.Outs.begin();
+ CLI.OutVals.erase(CLI.OutVals.begin() + SpecialArgsBeginIdx,
+ CLI.OutVals.end());
+ CLI.Outs.erase(RequestedExecIt, CLI.Outs.end());
+
+ assert(CLI.Outs.back().OrigArgIndex < 2 &&
+ "Haven't popped all the special args");
+
+ TargetLowering::ArgListEntry RequestedExecArg =
+ CLI.Args[ChainCallArgIdx::Exec];
+ if (!RequestedExecArg.Ty->isIntegerTy(Subtarget->getWavefrontSize()))
return lowerUnhandledCall(CLI, InVals, "Invalid value for EXEC");
- assert(CLI.Outs.back().OrigArgIndex == 2 && "Unexpected last arg");
- CLI.Outs.pop_back();
- CLI.OutVals.pop_back();
+ // Convert constants into TargetConstants, so they become immediate operands
+ // instead of being selected into S_MOV.
+ auto PushNodeOrTargetConstant = [&](TargetLowering::ArgListEntry Arg) {
+ if (auto ArgNode = dyn_cast<ConstantSDNode>(Arg.Node))
+ ChainCallSpecialArgs.push_back(DAG.getTargetConstant(
+ ArgNode->getAPIntValue(), DL, ArgNode->getValueType(0)));
+ else
+ ChainCallSpecialArgs.push_back(Arg.Node);
+ };
- if (RequestedExec.Ty->isIntegerTy(64)) {
- assert(CLI.Outs.back().OrigArgIndex == 2 && "Exec wasn't split up");
- CLI.Outs.pop_back();
- CLI.OutVals.pop_back();
- }
+ PushNodeOrTargetConstant(RequestedExecArg);
+
+ // Process any other special arguments depending on the value of the flags.
+ TargetLowering::ArgListEntry Flags = CLI.Args[ChainCallArgIdx::Flags];
+
+ const APInt &FlagsValue = cast<ConstantSDNode>(Flags.Node)->getAPIntValue();
+ if (FlagsValue.isZero()) {
+ if (CLI.Args.size() > ChainCallArgIdx::Flags + 1)
+ return lowerUnhandledCall(CLI, InVals,
+ "No additional args allowed if flags == 0");
+ } else if (FlagsValue.isOneBitSet(0)) {
+ if (CLI.Args.size() != ChainCallArgIdx::FallbackCallee + 1) {
+ return lowerUnhandledCall(CLI, InVals, "Expected 3 additional args");
+ }
- assert(CLI.Outs.back().OrigArgIndex != 2 &&
- "Haven't popped all the pieces of the EXEC mask");
+ std::for_each(CLI.Args.begin() + ChainCallArgIdx::NumVGPRs,
+ CLI.Args.end(), PushNodeOrTargetConstant);
+ }
}
- const SDLoc &DL = CLI.DL;
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
bool IsVarArg = CLI.IsVarArg;
bool IsSibCall = false;
@@ -3983,7 +4026,8 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
}
if (IsChainCallConv)
- Ops.push_back(RequestedExec.Node);
+ Ops.insert(Ops.end(), ChainCallSpecialArgs.begin(),
+ ChainCallSpecialArgs.end());
// Add argument registers to the end of the list so that they are known live
// into the call.
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 63f66023837a2..e5af35f2da686 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -692,36 +692,42 @@ def : GCNPat<
(SI_TCRETURN_GFX Gfx_CCR_SGPR_64:$src0, (i64 0), i32imm:$fpdiff)
>;
-// Pseudo for the llvm.amdgcn.cs.chain intrinsic.
-// This is essentially a tail call, but it also takes a mask to put in EXEC
-// right before jumping to the callee.
-class SI_CS_CHAIN_TC<
+// Pseudos for the llvm.amdgcn.cs.chain intrinsic.
+multiclass SI_CS_CHAIN_TC<
ValueType execvt, Predicate wavesizepred,
- RegisterOperand execrc = getSOPSrcForVT<execvt>.ret>
- : SPseudoInstSI <(outs),
- (ins CCR_SGPR_64:$src0, unknown:$callee, i32imm:$fpdiff, execrc:$exec)> {
- let FixedSize = 0;
- let isCall = 1;
- let isTerminator = 1;
- let isBarrier = 1;
- let isReturn = 1;
- let UseNamedOperandTable = 1;
- let SchedRW = [WriteBranch];
- let isConvergent = 1;
-
- let WaveSizePredicate = wavesizepred;
-}
-
-def SI_CS_CHAIN_TC_W32 : SI_CS_CHAIN_TC<i32, isWave32>;
-def SI_CS_CHAIN_TC_W64 : SI_CS_CHAIN_TC<i64, isWave64>;
+ RegisterOperand execrc = getSOPSrcForVT<execvt>.ret> {
+ let FixedSize = 0,
+ isCall = 1,
+ isTerminator = 1,
+ isBarrier = 1,
+ isReturn = 1,
+ UseNamedOperandTable = 1,
+ SchedRW = [WriteBranch],
+ isConvergent = 1,
+ WaveSizePredicate = wavesizepred in {
+ // This is essentially a tail call, but it also takes a mask to put in EXEC
+ // right before jumping to the callee.
+ def NAME: SPseudoInstSI <(outs),
+ (ins CCR_SGPR_64:$src0, unknown:$callee, i32imm:$fpdiff, execrc:$exec)>;
+
+ // Same as above, but it will first try to reallocate the VGPRs, and choose an
+ // EXEC mask and a callee depending on the success of the reallocation attempt.
+ def _DVGPR : SPseudoInstSI <(outs),
+ (ins CCR_SGPR_64:$src0, i64imm:$callee, i32imm:$fpdiff, execrc:$exec,
+ SSrc_b32:$numvgprs, execrc:$fbexec, CCR_SGPR_64:$fbcallee)>;
+ } // End FixedSize = 0 etc
+}
+
+defm SI_CS_CHAIN_TC_W32 : SI_CS_CHAIN_TC<i32, isWave32>;
+defm SI_CS_CHAIN_TC_W64 : SI_CS_CHAIN_TC<i64, isWave64>;
// Handle selecting direct & indirect calls via SI_CS_CHAIN_TC_W32/64
multiclass si_cs_chain_tc_pattern<
dag callee, ValueType execvt, RegisterOperand execrc, Instruction tc> {
-def : GCNPat<
- (AMDGPUtc_return_chain i64:$src0, callee, (i32 timm:$fpdiff), execvt:$exec),
- (tc CCR_SGPR_64:$src0, callee, i32imm:$fpdiff, execrc:$exec)
->;
+ def : GCNPat<
+ (AMDGPUtc_return_chain i64:$src0, callee, (i32 timm:$fpdiff), execvt:$exec),
+ (tc CCR_SGPR_64:$src0, callee, i32imm:$fpdiff, execrc:$exec)
+ >;
}
multiclass si_cs_chain_tc_patterns<
@@ -736,6 +742,26 @@ multiclass si_cs_chain_tc_patterns<
defm : si_cs_chain_tc_patterns<i32>;
defm : si_cs_chain_tc_patterns<i64>;
+// Match dynamic VGPR case. This is always indirect since we choose the callee
+// dynamically based on the result of the VGPR reallocation, so make sure to
+// drop the callee info if there is any.
+multiclass si_cs_chain_tc_dvgpr_patterns<
+ ValueType execvt, RegisterOperand execrc = getSOPSrcForVT<execvt>.ret,
+ Instruction tc = SI_CS_CHAIN_TC_W32_DVGPR> {
+ let AddedComplexity = 90 in {
+ foreach callee = [ (i64 0), (tglobaladdr) ] in {
+ def : GCNPat<
+ (AMDGPUtc_return_chain i64:$src0, callee, (i32 timm:$fpdiff), execvt:$exec,
+ i32:$numvgprs, execvt:$fbexec, i64:$fbcallee),
+ (tc CCR_SGPR_64:$src0, (i64 0), i32imm:$fpdiff, execrc:$exec,
+ SSrc_b32:$numvgprs, execrc:$fbexec, CCR_SGPR_64:$fbcallee)
+ >;
+ }
+ } // AddedComplexity
+}
+
+defm : si_cs_chain_tc_dvgpr_patterns<i32>; // On GFX12, dVGPR mode is wave32-only.
+
def ADJCALLSTACKUP : SPseudoInstSI<
(outs), (ins i32imm:$amt0, i32imm:$amt1),
[(callseq_start timm:$amt0, timm:$amt1)],
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index d02173f57ee37..bb4e7f8e6c653 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -29,7 +29,8 @@ class SILateBranchLowering : public MachineFunctionPass {
const SIInstrInfo *TII = nullptr;
MachineDominatorTree *MDT = nullptr;
- void expandChainCall(MachineInstr &MI);
+ void expandChainCall(MachineInstr &MI, const GCNSubtarget &ST,
+ bool DynamicVGPR);
void earlyTerm(MachineInstr &MI, MachineBasicBlock *EarlyExitBlock);
public:
@@ -116,14 +117,56 @@ static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI,
MDT->applyUpdates(DTUpdates);
}
-void SILateBranchLowering::expandChainCall(MachineInstr &MI) {
+static void addRegOrCopyOp(MachineInstrBuilder &MIB, MachineOperand &Op) {
+ if (Op.isReg())
+ MIB.addReg(Op.getReg());
+ else
+ MIB->addOperand(Op);
+}
+
+void SILateBranchLowering::expandChainCall(MachineInstr &MI,
+ const GCNSubtarget &ST,
+ bool DynamicVGPR) {
// This is a tail call that needs to be expanded into at least
// 2 instructions, one for setting EXEC and one for the actual tail call.
- constexpr unsigned ExecIdx = 3;
+ unsigned ExecIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::exec);
+ if (DynamicVGPR) {
+ // We have 3 extra operands and we need to:
+ // * Try to change the VGPR allocation
+ // * Select the callee based on the result of the reallocation attempt
+ // * Select the EXEC mask based on the result of the reallocation attempt
+ auto AllocMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ TII->get(AMDGPU::S_ALLOC_VGPR));
+ addRegOrCopyOp(AllocMI,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::numvgprs));
+
+ auto SelectCallee =
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ TII->get(AMDGPU::S_CSELECT_B64))
+ .addDef(TII->getNamedOperand(MI, AMDGPU::OpName::src0)->getReg());
+ addRegOrCopyOp(SelectCallee,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::src0));
+ addRegOrCopyOp(SelectCallee,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::fbcallee));
+
+ auto SelectExec = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ TII->get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
+ : AMDGPU::S_CSELECT_B64))
+ .addDef(ExecReg);
+
+ addRegOrCopyOp(SelectExec, *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
+ addRegOrCopyOp(SelectExec,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::fbexec));
+ } else {
+ auto SetExec = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ TII->get(MovOpc), ExecReg);
+ addRegOrCopyOp(SetExec, *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
+ }
- BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(MovOpc), ExecReg)
- ->addOperand(MI.getOperand(ExecIdx));
- MI.removeOperand(ExecIdx);
+ for (unsigned OpIdx = MI.getNumExplicitOperands() - 1; OpIdx >= ExecIdx;
+ --OpIdx)
+ MI.removeOperand(OpIdx);
MI.setDesc(TII->get(AMDGPU::SI_TCRETURN));
}
@@ -172,7 +215,12 @@ bool SILateBranchLowering::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_CS_CHAIN_TC_W32:
case AMDGPU::SI_CS_CHAIN_TC_W64:
- expandChainCall(MI);
+ expandChainCall(MI, ST, /*DynamicVGPR*/ false);
+ MadeChange = true;
+ break;
+ case AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR:
+ case AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR:
+ expandChainCall(MI, ST, /*DynamicVGPR*/ true);
MadeChange = true;
break;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll b/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll
new file mode 100644
index 0000000000000..f320a18f253e2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GISEL-GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=DAGISEL-GFX12 %s
+
+declare amdgpu_cs_chain void @callee(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
+declare amdgpu_cs_chain_preserve void @callee_preserve(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
+declare void @llvm.amdgcn.cs.chain(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) noreturn
+
+define amdgpu_cs_chain void @dynamic_vgprs(i32 inreg %exec, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 inreg %num_vgpr) {
+; GISEL-GFX12-LABEL: dynamic_vgprs:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: s_mov_b32 s5, s0
+; GISEL-GFX12-NEXT: s_mov_b32 s0, s1
+; GISEL-GFX12-NEXT: s_mov_b32 s1, s2
+; GISEL-GFX12-NEXT: s_mov_b32 s2, s3
+; GISEL-GFX12-NEXT: s_mov_b32 s6, callee at abs32@lo
+; GISEL-GFX12-NEXT: s_mov_b32 s7, callee at abs32@hi
+; GISEL-GFX12-NEXT: s_mov_b32 s8, retry_vgpr_alloc at abs32@lo
+; GISEL-GFX12-NEXT: s_mov_b32 s9, retry_vgpr_alloc at abs32@hi
+; GISEL-GFX12-NEXT: s_alloc_vgpr s4
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GISEL-GFX12-NEXT: s_cselect_b32 exec_lo, s5, -1
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_setpc_b64 s[6:7]
+;
+; DAGISEL-GFX12-LABEL: dynamic_vgprs:
+; DAGISEL-GFX12: ; %bb.0:
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: s_mov_b32 s5, s0
+; DAGISEL-GFX12-NEXT: s_mov_b32 s7, retry_vgpr_alloc at abs32@hi
+; DAGISEL-GFX12-NEXT: s_mov_b32 s6, retry_vgpr_alloc at abs32@lo
+; DAGISEL-GFX12-NEXT: s_mov_b32 s9, callee at abs32@hi
+; DAGISEL-GFX12-NEXT: s_mov_b32 s8, callee at abs32@lo
+; DAGISEL-GFX12-NEXT: s_mov_b32 s0, s1
+; DAGISEL-GFX12-NEXT: s_mov_b32 s1, s2
+; DAGISEL-GFX12-NEXT: s_mov_b32 s2, s3
+; DAGISEL-GFX12-NEXT: s_alloc_vgpr s4
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_cselect_b64 s[8:9], s[8:9], s[6:7]
+; DAGISEL-GFX12-NEXT: s_cselect_b32 exec_lo, s5, -1
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[8:9]
+ call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i32 %exec, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 1, i32 inreg %num_vgpr, i32 inreg -1, ptr @retry_vgpr_alloc)
+ unreachable
+}
+
+define amdgpu_cs_chain void @constants(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr) {
+; GISEL-GFX12-LABEL: constants:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: s_mov_b32 s4, callee at abs32@lo
+; GISEL-GFX12-NEXT: s_mov_b32 s5, callee at abs32@hi
+; GISEL-GFX12-NEXT: s_mov_b32 s6, retry_vgpr_alloc at abs32@lo
+; GISEL-GFX12-NEXT: s_mov_b32 s7, retry_vgpr_alloc at abs32@hi
+; GISEL-GFX12-NEXT: s_alloc_vgpr 64
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
+; GISEL-GFX12-NEXT: s_cselect_b32 exec_lo, 7, -1
+; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; GISEL-GFX12-NEXT: s_setpc_b64 s[4:5]
+;
+; DAGISEL-GFX12-LABEL: constants:
+; DAGISEL-GFX12: ; %bb.0:
+; DAGISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL-GFX12-NEXT: s_mov_b32 s5, retry_vgpr_alloc at abs32@hi
+; DAGISEL-GFX12-NEXT: s_mov_b32 s4, retry_vgpr_alloc at abs32@lo
+; DAGISEL-GFX12-NEXT: s_mov_b32 s7, callee at abs32@hi
+; DAGISEL-GFX12-NEXT: s_mov_b32 s6, callee at abs32@lo
+; DAGISEL-GFX12-NEXT: s_alloc_vgpr 64
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_cselect_b64 s[6:7], s[6:7], s[4:5]
+; DAGISEL-GFX12-NEXT: s_cselect_b32 exec_lo, 7, -1
+; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
+; DAGISEL-GFX12-NEXT: s_setpc_b64 s[6:7]
+ call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i32 7, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 1, i32 inreg 64, i32 inreg -1, ptr @retry_vgpr_alloc)
+ unreachable
+}
+
+declare amdgpu_cs_chain_preserve void @retry_vgpr_alloc(<3 x i32> inreg %sgpr)
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll
index a9fc6a8c4d210..ce8756c1587f6 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll
@@ -90,7 +90,6 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -98,7 +97,7 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: chain_to_chain
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -121,7 +120,6 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -130,7 +128,7 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i32 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -218,7 +216,6 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -226,7 +223,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: cs_to_chain
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -249,7 +246,6 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr100_sgpr101_sgpr102_sgpr103
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -258,7 +254,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i32 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -346,7 +342,6 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -354,7 +349,7 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: chain_to_chain_preserve
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -377,7 +372,6 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -386,7 +380,7 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee_preserve, i32 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -474,7 +468,6 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -482,7 +475,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: cs_to_chain_preserve
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -505,7 +498,6 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr100_sgpr101_sgpr102_sgpr103
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -514,7 +506,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B32_2]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee_preserve, i32 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -607,7 +599,6 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_3]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_4]]
@@ -615,7 +606,7 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B32_]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: indirect
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -645,7 +636,6 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX10-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY16:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY16]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_3]]
@@ -654,7 +644,7 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B32_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr %callee, i32 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -713,10 +703,9 @@ define amdgpu_cs_chain void @nonuniform_callee(ptr %callee, i32 inreg %sgpr, i32
; DAGISEL-GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_1]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_]], %subreg.sub1
; DAGISEL-GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX11-NEXT: $vgpr8 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B32_]], amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8
;
; DAGISEL-GFX10-LABEL: name: nonuniform_callee
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -737,11 +726,10 @@ define amdgpu_cs_chain void @nonuniform_callee(ptr %callee, i32 inreg %sgpr, i32
; DAGISEL-GFX10-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY7:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY7]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX10-NEXT: $vgpr8 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B32_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $vgpr8
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W32 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $vgpr8
call void(ptr, i32, i32, i32, i32, ...) @llvm.amdgcn.cs.chain(ptr %callee, i32 -1, i32 inreg %sgpr, i32 %vgpr, i32 0)
unreachable
}
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w64.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w64.ll
index dfd1f0685a931..49243fb429f6d 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w64.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w64.ll
@@ -90,7 +90,6 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -98,7 +97,7 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: chain_to_chain
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -121,7 +120,6 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -130,7 +128,7 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i64, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i64 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -218,7 +216,6 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -226,7 +223,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: cs_to_chain
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -249,7 +246,6 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr100_sgpr101_sgpr102_sgpr103
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -258,7 +254,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i64, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i64 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -346,7 +342,6 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -354,7 +349,7 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: chain_to_chain_preserve
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -377,7 +372,6 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -386,7 +380,7 @@ define amdgpu_cs_chain void @chain_to_chain_preserve(<3 x i32> inreg %sgpr, { i3
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i64, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee_preserve, i64 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -474,7 +468,6 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
@@ -482,7 +475,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: cs_to_chain_preserve
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -505,7 +498,6 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY10:%[0-9]+]]:sgpr_128 = COPY $sgpr100_sgpr101_sgpr102_sgpr103
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY10]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
@@ -514,7 +506,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %sgpr, { i32, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE]], @callee_preserve, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i64, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee_preserve, i64 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -607,7 +599,6 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec
; DAGISEL-GFX11-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX11-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_3]]
; DAGISEL-GFX11-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_4]]
@@ -615,7 +606,7 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
;
; DAGISEL-GFX10-LABEL: name: indirect
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -645,7 +636,6 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX10-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY16:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY16]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX10-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_3]]
@@ -654,7 +644,7 @@ define amdgpu_cs_chain void @indirect(ptr inreg %callee, <3 x i32> inreg %sgpr,
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
call void(ptr, i64, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr %callee, i64 -1, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 0)
unreachable
}
@@ -713,10 +703,9 @@ define amdgpu_cs_chain void @nonuniform_callee(ptr %callee, i32 inreg %sgpr, i32
; DAGISEL-GFX11-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_1]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_]], %subreg.sub1
; DAGISEL-GFX11-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; DAGISEL-GFX11-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX11-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX11-NEXT: $vgpr8 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8
+ ; DAGISEL-GFX11-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8
;
; DAGISEL-GFX10-LABEL: name: nonuniform_callee
; DAGISEL-GFX10: bb.0 (%ir-block.0):
@@ -737,11 +726,10 @@ define amdgpu_cs_chain void @nonuniform_callee(ptr %callee, i32 inreg %sgpr, i32
; DAGISEL-GFX10-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
; DAGISEL-GFX10-NEXT: [[COPY7:%[0-9]+]]:sgpr_128 = COPY $sgpr48_sgpr49_sgpr50_sgpr51
- ; DAGISEL-GFX10-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
; DAGISEL-GFX10-NEXT: $sgpr48_sgpr49_sgpr50_sgpr51 = COPY [[COPY7]]
; DAGISEL-GFX10-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
; DAGISEL-GFX10-NEXT: $vgpr8 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, killed [[S_MOV_B64_]], amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $vgpr8
+ ; DAGISEL-GFX10-NEXT: SI_CS_CHAIN_TC_W64 killed [[REG_SEQUENCE1]], 0, 0, -1, amdgpu_allvgprs, implicit $sgpr48_sgpr49_sgpr50_sgpr51, implicit $sgpr0, implicit $vgpr8
call void(ptr, i64, i32, i32, i32, ...) @llvm.amdgcn.cs.chain(ptr %callee, i64 -1, i32 inreg %sgpr, i32 %vgpr, i32 0)
unreachable
}
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll
new file mode 100644
index 0000000000000..955eb14a9d85f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll
@@ -0,0 +1,315 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX12 %s
+
+declare amdgpu_cs_chain void @callee(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
+declare amdgpu_cs_chain_preserve void @callee_preserve(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
+declare void @llvm.amdgcn.cs.chain(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) noreturn
+
+; This test uses immediates for the EXEC masks and number of VGPRs, to make sure we don't use registers we don't need.
+define amdgpu_cs_chain void @direct_callee_direct_fallback(<3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr) {
+ ; GISEL-GFX12-LABEL: name: direct_callee_direct_fallback
+ ; GISEL-GFX12: bb.1 (%ir-block.0):
+ ; GISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr8, $vgpr9, $vgpr10, $vgpr11
+ ; GISEL-GFX12-NEXT: {{ $}}
+ ; GISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; GISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; GISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; GISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY2]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; GISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY3]]
+ ; GISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY4]]
+ ; GISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY5]]
+ ; GISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY6]]
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @callee
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @callee
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_1]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[REG_SEQUENCE]]
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @retry_vgpr_alloc
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @retry_vgpr_alloc
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, [[S_MOV_B32_3]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[REG_SEQUENCE1]]
+ ; GISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR [[COPY10]], 0, 0, 15, 64, -1, [[COPY11]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ;
+ ; DAGISEL-GFX12-LABEL: name: direct_callee_direct_fallback
+ ; DAGISEL-GFX12: bb.0 (%ir-block.0):
+ ; DAGISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr8, $vgpr9, $vgpr10, $vgpr11
+ ; DAGISEL-GFX12-NEXT: {{ $}}
+ ; DAGISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; DAGISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; DAGISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; DAGISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; DAGISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; DAGISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; DAGISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @retry_vgpr_alloc
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @retry_vgpr_alloc
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[S_MOV_B32_1]], %subreg.sub0, killed [[S_MOV_B32_]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @callee
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @callee
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[S_MOV_B32_3]], %subreg.sub0, killed [[S_MOV_B32_2]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; DAGISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; DAGISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; DAGISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY3]]
+ ; DAGISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY2]]
+ ; DAGISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY1]]
+ ; DAGISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY]]
+ ; DAGISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR killed [[REG_SEQUENCE1]], 0, 0, 15, 64, -1, killed [[REG_SEQUENCE]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i32 15, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 1, i32 64, i32 inreg -1, ptr @retry_vgpr_alloc)
+ unreachable
+}
+
+define amdgpu_cs_chain void @indirect_callee_direct_fallback(i32 inreg %exec, ptr inreg %callee, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 inreg %num_vgpr) {
+ ; GISEL-GFX12-LABEL: name: indirect_callee_direct_fallback
+ ; GISEL-GFX12: bb.1 (%ir-block.0):
+ ; GISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr8, $vgpr9, $vgpr10, $vgpr11
+ ; GISEL-GFX12-NEXT: {{ $}}
+ ; GISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; GISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; GISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; GISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY3]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GISEL-GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GISEL-GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; GISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY6]]
+ ; GISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY7]]
+ ; GISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY8]]
+ ; GISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY9]]
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @retry_vgpr_alloc
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @retry_vgpr_alloc
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_1]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY14:%[0-9]+]]:ccr_sgpr_64 = COPY [[REG_SEQUENCE1]]
+ ; GISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR [[REG_SEQUENCE]], 0, 0, [[COPY]], [[COPY10]], -1, [[COPY14]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ;
+ ; DAGISEL-GFX12-LABEL: name: indirect_callee_direct_fallback
+ ; DAGISEL-GFX12: bb.0 (%ir-block.0):
+ ; DAGISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $sgpr6
+ ; DAGISEL-GFX12-NEXT: {{ $}}
+ ; DAGISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr6
+ ; DAGISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; DAGISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; DAGISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; DAGISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; DAGISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr5
+ ; DAGISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; DAGISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; DAGISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; DAGISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; DAGISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY12]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0
+ ; DAGISEL-GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY13]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY14]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_1]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @retry_vgpr_alloc
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @retry_vgpr_alloc
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[S_MOV_B32_1]], %subreg.sub0, killed [[S_MOV_B32_]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY16]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY17]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; DAGISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_3]]
+ ; DAGISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_4]]
+ ; DAGISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY4]]
+ ; DAGISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY3]]
+ ; DAGISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY2]]
+ ; DAGISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY1]]
+ ; DAGISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR killed [[REG_SEQUENCE1]], 0, 0, [[COPY10]], [[COPY]], -1, killed [[REG_SEQUENCE2]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr inreg %callee, i32 %exec, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 1, i32 inreg %num_vgpr, i32 inreg -1, ptr @retry_vgpr_alloc)
+ unreachable
+}
+
+define amdgpu_cs_chain void @direct_callee_indirect_fallback(i32 inreg %exec, ptr inreg %retry, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 inreg %num_vgpr) {
+ ; GISEL-GFX12-LABEL: name: direct_callee_indirect_fallback
+ ; GISEL-GFX12: bb.1 (%ir-block.0):
+ ; GISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr8, $vgpr9, $vgpr10, $vgpr11
+ ; GISEL-GFX12-NEXT: {{ $}}
+ ; GISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; GISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; GISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; GISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY3]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GISEL-GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GISEL-GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; GISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY6]]
+ ; GISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY7]]
+ ; GISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY8]]
+ ; GISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY9]]
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @callee
+ ; GISEL-GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @callee
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_1]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY14:%[0-9]+]]:ccr_sgpr_64 = COPY [[REG_SEQUENCE1]]
+ ; GISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR [[COPY14]], 0, 0, [[COPY]], [[COPY10]], -1, [[REG_SEQUENCE]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ;
+ ; DAGISEL-GFX12-LABEL: name: direct_callee_indirect_fallback
+ ; DAGISEL-GFX12: bb.0 (%ir-block.0):
+ ; DAGISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $sgpr6
+ ; DAGISEL-GFX12-NEXT: {{ $}}
+ ; DAGISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr6
+ ; DAGISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; DAGISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; DAGISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; DAGISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; DAGISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr5
+ ; DAGISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; DAGISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; DAGISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; DAGISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; DAGISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-hi) @callee
+ ; DAGISEL-GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 target-flags(amdgpu-abs32-lo) @callee
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[S_MOV_B32_1]], %subreg.sub0, killed [[S_MOV_B32_]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; DAGISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; DAGISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; DAGISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY4]]
+ ; DAGISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY3]]
+ ; DAGISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY2]]
+ ; DAGISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY1]]
+ ; DAGISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR killed [[REG_SEQUENCE1]], 0, 0, [[COPY10]], [[COPY]], -1, killed [[REG_SEQUENCE]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i32 %exec, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 1, i32 inreg %num_vgpr, i32 inreg -1, ptr inreg %retry)
+ unreachable
+}
+
+; This testcase uses registers for the EXEC masks and number of VGPRs, to make sure we can handle registers too.
+define amdgpu_cs_chain void @indirect_callee_indirect_fallback(i32 inreg %exec, i32 inreg %retry_exec, ptr inreg %callee, ptr inreg %retry, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 inreg %num_vgpr) {
+ ; GISEL-GFX12-LABEL: name: indirect_callee_indirect_fallback
+ ; GISEL-GFX12: bb.1 (%ir-block.0):
+ ; GISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr8, $vgpr9, $vgpr10, $vgpr11
+ ; GISEL-GFX12-NEXT: {{ $}}
+ ; GISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr8
+ ; GISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; GISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; GISEL-GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; GISEL-GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_32 = COPY $sgpr9
+ ; GISEL-GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GISEL-GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GISEL-GFX12-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY16]], implicit $exec
+ ; GISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; GISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY9]]
+ ; GISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY10]]
+ ; GISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY11]]
+ ; GISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY12]]
+ ; GISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR [[REG_SEQUENCE]], 0, 0, [[COPY]], [[COPY13]], [[COPY1]], [[REG_SEQUENCE1]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ ;
+ ; DAGISEL-GFX12-LABEL: name: indirect_callee_indirect_fallback
+ ; DAGISEL-GFX12: bb.0 (%ir-block.0):
+ ; DAGISEL-GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $sgpr9
+ ; DAGISEL-GFX12-NEXT: {{ $}}
+ ; DAGISEL-GFX12-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr9
+ ; DAGISEL-GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr11
+ ; DAGISEL-GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr10
+ ; DAGISEL-GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr9
+ ; DAGISEL-GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; DAGISEL-GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr8
+ ; DAGISEL-GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr7
+ ; DAGISEL-GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr6
+ ; DAGISEL-GFX12-NEXT: [[COPY8:%[0-9]+]]:sgpr_32 = COPY $sgpr5
+ ; DAGISEL-GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; DAGISEL-GFX12-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; DAGISEL-GFX12-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; DAGISEL-GFX12-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; DAGISEL-GFX12-NEXT: [[COPY13:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY14]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY15]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; DAGISEL-GFX12-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[COPY16]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY17]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:ccr_sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_1]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_]], %subreg.sub1
+ ; DAGISEL-GFX12-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; DAGISEL-GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
+ ; DAGISEL-GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]]
+ ; DAGISEL-GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_3]]
+ ; DAGISEL-GFX12-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_4]]
+ ; DAGISEL-GFX12-NEXT: $vgpr8 = COPY [[COPY4]]
+ ; DAGISEL-GFX12-NEXT: $vgpr9 = COPY [[COPY3]]
+ ; DAGISEL-GFX12-NEXT: $vgpr10 = COPY [[COPY2]]
+ ; DAGISEL-GFX12-NEXT: $vgpr11 = COPY [[COPY1]]
+ ; DAGISEL-GFX12-NEXT: SI_CS_CHAIN_TC_W32_DVGPR killed [[REG_SEQUENCE2]], 0, 0, [[COPY13]], [[COPY]], [[COPY12]], killed [[REG_SEQUENCE]], amdgpu_allvgprs, implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
+ call void(ptr, i32, <3 x i32>, { i32, ptr addrspace(5), i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain(ptr inreg %callee, i32 inreg %exec, <3 x i32> inreg %sgpr, { i32, ptr addrspace(5), i32, i32 } %vgpr, i32 1, i32 inreg %num_vgpr, i32 inreg %retry_exec, ptr inreg %retry)
+ unreachable
+}
+
+declare amdgpu_cs_chain_preserve void @retry_vgpr_alloc(<3 x i32> inreg %sgpr)
diff --git a/llvm/test/CodeGen/AMDGPU/remove-register-flags.mir b/llvm/test/CodeGen/AMDGPU/remove-register-flags.mir
new file mode 100644
index 0000000000000..d9dc449501203
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/remove-register-flags.mir
@@ -0,0 +1,19 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass=si-late-branch-lowering %s -o - | FileCheck %s
+
+---
+# Should remove killed flags from S_ALLOC and C_SELECT
+name: remove_killed
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr18, $sgpr19
+
+ ; CHECK-LABEL: name: remove_killed
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr18, $sgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_ALLOC_VGPR $sgpr19, implicit-def $scc
+ ; CHECK-NEXT: $sgpr20_sgpr21 = S_CSELECT_B64 $sgpr20_sgpr21, $sgpr22_sgpr23, implicit $scc
+ ; CHECK-NEXT: $exec_lo = S_CSELECT_B32 $sgpr18, -1, implicit $scc
+ ; CHECK-NEXT: SI_TCRETURN killed renamable $sgpr20_sgpr21, 0, 0, amdgpu_allvgprs, implicit killed $sgpr0, implicit killed $sgpr1, implicit killed $sgpr2, implicit killed $sgpr3, implicit killed $sgpr4, implicit killed $sgpr5, implicit killed $sgpr6, implicit killed $sgpr7, implicit killed $sgpr8, implicit killed $sgpr9, implicit killed $sgpr10, implicit killed $sgpr11, implicit killed $sgpr12, implicit killed $sgpr13, implicit killed $sgpr14, implicit killed $sgpr15, implicit killed $sgpr16, implicit killed $sgpr17, implicit $sgpr18, implicit $sgpr19
+ SI_CS_CHAIN_TC_W32_DVGPR killed renamable $sgpr20_sgpr21, 0, 0, killed renamable $sgpr18, killed renamable $sgpr19, -1, killed renamable $sgpr22_sgpr23, amdgpu_allvgprs, implicit killed $sgpr0, implicit killed $sgpr1, implicit killed $sgpr2, implicit killed $sgpr3, implicit killed $sgpr4, implicit killed $sgpr5, implicit killed $sgpr6, implicit killed $sgpr7, implicit killed $sgpr8, implicit killed $sgpr9, implicit killed $sgpr10, implicit killed $sgpr11, implicit killed $sgpr12, implicit killed $sgpr13, implicit killed $sgpr14, implicit killed $sgpr15, implicit killed $sgpr16, implicit killed $sgpr17, implicit $sgpr18, implicit $sgpr19
+...
>From 84fe000bfb344ca1b5c266398b731c39616cbc62 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Mon, 10 Mar 2025 14:09:11 +0100
Subject: [PATCH 05/23] Update llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index a440617319228..c3043e4042c5e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -990,10 +990,10 @@ static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
LLT::pointer(GV->getAddressSpace(), 64), GV);
CallInst.addReg(Ptr.getReg(0));
- if (IsDynamicVGPRChainCall)
+ if (IsDynamicVGPRChainCall) {
// DynamicVGPR chain calls are always indirect.
CallInst.addImm(0);
- else
+ } else
CallInst.add(Info.Callee);
} else
return false;
>From 21758f3f55d9081e7cb03e56260edc6a89864d9a Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Mon, 10 Mar 2025 14:09:27 +0100
Subject: [PATCH 06/23] Update llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index c3043e4042c5e..5270d1d7689f8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1233,7 +1233,7 @@ bool AMDGPUCallLowering::lowerTailCall(
const APInt &FlagsValue = cast<ConstantInt>(FlagsArg.OrigValue)->getValue();
if (FlagsValue.isZero()) {
if (Info.OrigArgs.size() != 5) {
- LLVM_DEBUG(dbgs() << "No additional args allowed if flags == 0");
+ LLVM_DEBUG(dbgs() << "No additional args allowed if flags == 0\n");
return false;
}
} else if (FlagsValue.isOneBitSet(0)) {
>From 1a99cabefdb73cb17b4df822964b38217fb9f24d Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Mon, 10 Mar 2025 14:09:37 +0100
Subject: [PATCH 07/23] Update llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 5270d1d7689f8..cbbf9dcd829c6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1246,7 +1246,7 @@ bool AMDGPUCallLowering::lowerTailCall(
// On GFX12, we can only change the VGPR allocation for wave32.
if (!ST.isWave32()) {
- LLVM_DEBUG(dbgs() << "Dynamic VGPR mode is only supported for wave32");
+ LLVM_DEBUG(dbgs() << "Dynamic VGPR mode is only supported for wave32\n");
return false;
}
>From 296a9db2611c31497fb40b5b01a027440dcda2a5 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Mon, 10 Mar 2025 14:10:54 +0100
Subject: [PATCH 08/23] Update llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index bb4e7f8e6c653..f1ecd25392124 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -215,7 +215,7 @@ bool SILateBranchLowering::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_CS_CHAIN_TC_W32:
case AMDGPU::SI_CS_CHAIN_TC_W64:
- expandChainCall(MI, ST, /*DynamicVGPR*/ false);
+ expandChainCall(MI, ST, /*DynamicVGPR=*/ false);
MadeChange = true;
break;
case AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR:
>From eb9955e4c7d7af7eb1c31b0dce5eec65ba4158ee Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Mon, 10 Mar 2025 14:34:51 +0100
Subject: [PATCH 09/23] Remove wave size mattr from tests
---
.../AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll | 4 ++--
.../CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll | 8 ++++----
.../AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll | 4 ++--
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll b/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll
index f320a18f253e2..77c9b9813571a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn-cs-chain-intrinsic-dyn-vgpr-w32.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GISEL-GFX12 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=DAGISEL-GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=GISEL-GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 < %s | FileCheck -check-prefix=DAGISEL-GFX12 %s
declare amdgpu_cs_chain void @callee(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
declare amdgpu_cs_chain_preserve void @callee_preserve(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll
index ce8756c1587f6..0e5ce9d2488b0 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgcn-cs-chain-intrinsic-w32.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1100 -mattr=+wavefrontsize32 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX11 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1030 -mattr=+wavefrontsize32 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX10 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1100 -mattr=+wavefrontsize32 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX11 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1030 -mattr=+wavefrontsize32 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX10 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1100 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX11 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1030 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX10 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1100 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX11 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1030 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX10 %s
declare amdgpu_cs_chain void @callee(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
declare amdgpu_cs_chain_preserve void @callee_preserve(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll
index 955eb14a9d85f..4e040748a34d8 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-intrinsic-dyn-vgpr-w32.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX12 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+wavefrontsize32,-wavefrontsize64 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=GISEL-GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -stop-after=finalize-isel -verify-machineinstrs < %s | FileCheck -check-prefix=DAGISEL-GFX12 %s
declare amdgpu_cs_chain void @callee(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
declare amdgpu_cs_chain_preserve void @callee_preserve(<3 x i32> inreg, { i32, ptr addrspace(5), i32, i32 })
>From 7decd1d2af8fe9ef19b9fea4d28a46ff9087e746 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Mon, 10 Mar 2025 15:25:22 +0100
Subject: [PATCH 10/23] Diagnose unsupported
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index cbbf9dcd829c6..813023677fd6c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1246,7 +1246,8 @@ bool AMDGPUCallLowering::lowerTailCall(
// On GFX12, we can only change the VGPR allocation for wave32.
if (!ST.isWave32()) {
- LLVM_DEBUG(dbgs() << "Dynamic VGPR mode is only supported for wave32\n");
+ F.getContext().diagnose(DiagnosticInfoUnsupported(
+ F, "Dynamic VGPR mode is only supported for wave32\n"));
return false;
}
>From e0635646ff1933b250af3d08beb8ce0f493e1f44 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Mon, 10 Mar 2025 15:25:53 +0100
Subject: [PATCH 11/23] debug loc & s/unsigned/int
---
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index f1ecd25392124..c38d2eebede6f 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -129,28 +129,29 @@ void SILateBranchLowering::expandChainCall(MachineInstr &MI,
bool DynamicVGPR) {
// This is a tail call that needs to be expanded into at least
// 2 instructions, one for setting EXEC and one for the actual tail call.
- unsigned ExecIdx =
+ int ExecIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::exec);
+ assert(ExecIdx != -1 && "Missing EXEC operand");
+ const DebugLoc &DL = MI.getDebugLoc();
if (DynamicVGPR) {
// We have 3 extra operands and we need to:
// * Try to change the VGPR allocation
// * Select the callee based on the result of the reallocation attempt
// * Select the EXEC mask based on the result of the reallocation attempt
- auto AllocMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
- TII->get(AMDGPU::S_ALLOC_VGPR));
+ auto AllocMI =
+ BuildMI(*MI.getParent(), MI, DL, TII->get(AMDGPU::S_ALLOC_VGPR));
addRegOrCopyOp(AllocMI,
*TII->getNamedOperand(MI, AMDGPU::OpName::numvgprs));
auto SelectCallee =
- BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
- TII->get(AMDGPU::S_CSELECT_B64))
+ BuildMI(*MI.getParent(), MI, DL, TII->get(AMDGPU::S_CSELECT_B64))
.addDef(TII->getNamedOperand(MI, AMDGPU::OpName::src0)->getReg());
addRegOrCopyOp(SelectCallee,
*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
addRegOrCopyOp(SelectCallee,
*TII->getNamedOperand(MI, AMDGPU::OpName::fbcallee));
- auto SelectExec = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ auto SelectExec = BuildMI(*MI.getParent(), MI, DL,
TII->get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64))
.addDef(ExecReg);
@@ -159,8 +160,7 @@ void SILateBranchLowering::expandChainCall(MachineInstr &MI,
addRegOrCopyOp(SelectExec,
*TII->getNamedOperand(MI, AMDGPU::OpName::fbexec));
} else {
- auto SetExec = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
- TII->get(MovOpc), ExecReg);
+ auto SetExec = BuildMI(*MI.getParent(), MI, DL, TII->get(MovOpc), ExecReg);
addRegOrCopyOp(SetExec, *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
}
>From 51d11116127b21cc62677c40759b05c8837fc7d8 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Tue, 11 Mar 2025 10:54:13 +0100
Subject: [PATCH 12/23] Fix tablegen indent
---
llvm/lib/Target/AMDGPU/SIInstructions.td | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index e5af35f2da686..66cb8ed0f015f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -705,16 +705,16 @@ multiclass SI_CS_CHAIN_TC<
SchedRW = [WriteBranch],
isConvergent = 1,
WaveSizePredicate = wavesizepred in {
- // This is essentially a tail call, but it also takes a mask to put in EXEC
- // right before jumping to the callee.
- def NAME: SPseudoInstSI <(outs),
- (ins CCR_SGPR_64:$src0, unknown:$callee, i32imm:$fpdiff, execrc:$exec)>;
-
- // Same as above, but it will first try to reallocate the VGPRs, and choose an
- // EXEC mask and a callee depending on the success of the reallocation attempt.
- def _DVGPR : SPseudoInstSI <(outs),
- (ins CCR_SGPR_64:$src0, i64imm:$callee, i32imm:$fpdiff, execrc:$exec,
- SSrc_b32:$numvgprs, execrc:$fbexec, CCR_SGPR_64:$fbcallee)>;
+ // This is essentially a tail call, but it also takes a mask to put in EXEC
+ // right before jumping to the callee.
+ def NAME: SPseudoInstSI <(outs),
+ (ins CCR_SGPR_64:$src0, unknown:$callee, i32imm:$fpdiff, execrc:$exec)>;
+
+ // Same as above, but it will first try to reallocate the VGPRs, and choose an
+ // EXEC mask and a callee depending on the success of the reallocation attempt.
+ def _DVGPR : SPseudoInstSI <(outs),
+ (ins CCR_SGPR_64:$src0, i64imm:$callee, i32imm:$fpdiff, execrc:$exec,
+ SSrc_b32:$numvgprs, execrc:$fbexec, CCR_SGPR_64:$fbcallee)>;
} // End FixedSize = 0 etc
}
>From 9ea5a9c52a9a78953436dd4e7299af6a1f93581d Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Tue, 11 Mar 2025 11:12:03 +0100
Subject: [PATCH 13/23] Explain removal of op flags
---
.../Target/AMDGPU/SILateBranchLowering.cpp | 28 +++++++++++--------
1 file changed, 17 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index c38d2eebede6f..30f953ce0397a 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -117,7 +117,8 @@ static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI,
MDT->applyUpdates(DTUpdates);
}
-static void addRegOrCopyOp(MachineInstrBuilder &MIB, MachineOperand &Op) {
+static void copyOpWithoutRegFlags(MachineInstrBuilder &MIB,
+ MachineOperand &Op) {
if (Op.isReg())
MIB.addReg(Op.getReg());
else
@@ -138,30 +139,35 @@ void SILateBranchLowering::expandChainCall(MachineInstr &MI,
// * Try to change the VGPR allocation
// * Select the callee based on the result of the reallocation attempt
// * Select the EXEC mask based on the result of the reallocation attempt
+ // If any of the register operands of the chain pseudo is used in more than
+ // one of these instructions, we need to make sure that the kill flags
+ // aren't copied along.
auto AllocMI =
BuildMI(*MI.getParent(), MI, DL, TII->get(AMDGPU::S_ALLOC_VGPR));
- addRegOrCopyOp(AllocMI,
- *TII->getNamedOperand(MI, AMDGPU::OpName::numvgprs));
+ copyOpWithoutRegFlags(AllocMI,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::numvgprs));
auto SelectCallee =
BuildMI(*MI.getParent(), MI, DL, TII->get(AMDGPU::S_CSELECT_B64))
.addDef(TII->getNamedOperand(MI, AMDGPU::OpName::src0)->getReg());
- addRegOrCopyOp(SelectCallee,
- *TII->getNamedOperand(MI, AMDGPU::OpName::src0));
- addRegOrCopyOp(SelectCallee,
- *TII->getNamedOperand(MI, AMDGPU::OpName::fbcallee));
+ copyOpWithoutRegFlags(SelectCallee,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::src0));
+ copyOpWithoutRegFlags(SelectCallee,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::fbcallee));
auto SelectExec = BuildMI(*MI.getParent(), MI, DL,
TII->get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
: AMDGPU::S_CSELECT_B64))
.addDef(ExecReg);
- addRegOrCopyOp(SelectExec, *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
- addRegOrCopyOp(SelectExec,
- *TII->getNamedOperand(MI, AMDGPU::OpName::fbexec));
+ copyOpWithoutRegFlags(SelectExec,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
+ copyOpWithoutRegFlags(SelectExec,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::fbexec));
} else {
auto SetExec = BuildMI(*MI.getParent(), MI, DL, TII->get(MovOpc), ExecReg);
- addRegOrCopyOp(SetExec, *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
+ copyOpWithoutRegFlags(SetExec,
+ *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
}
for (unsigned OpIdx = MI.getNumExplicitOperands() - 1; OpIdx >= ExecIdx;
>From cd932e672898a3fcdae82b6ac14f9c2d5ce99c9a Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Tue, 11 Mar 2025 11:22:35 +0100
Subject: [PATCH 14/23] Use specific ISD node for dvgpr case
---
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 1 +
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 1 +
llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td | 6 ++++++
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 5 ++++-
llvm/lib/Target/AMDGPU/SIInstructions.td | 7 +++----
5 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index ade81f17ecca5..16e1625eacb07 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5492,6 +5492,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(TC_RETURN)
NODE_NAME_CASE(TC_RETURN_GFX)
NODE_NAME_CASE(TC_RETURN_CHAIN)
+ NODE_NAME_CASE(TC_RETURN_CHAIN_DVGPR)
NODE_NAME_CASE(TRAP)
NODE_NAME_CASE(RET_GLUE)
NODE_NAME_CASE(WAVE_ADDRESS)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index c74dc7942f52c..876561cf2a8f5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -402,6 +402,7 @@ enum NodeType : unsigned {
TC_RETURN,
TC_RETURN_GFX,
TC_RETURN_CHAIN,
+ TC_RETURN_CHAIN_DVGPR,
TRAP,
// Masked control flow nodes.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index bec294a945d2f..ce58e93a15207 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -99,6 +99,12 @@ def AMDGPUtc_return_chain: SDNode<"AMDGPUISD::TC_RETURN_CHAIN",
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]
>;
+// With dynamic VGPRs.
+def AMDGPUtc_return_chain_dvgpr: SDNode<"AMDGPUISD::TC_RETURN_CHAIN_DVGPR",
+ SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]
+>;
+
def AMDGPUtrap : SDNode<"AMDGPUISD::TRAP",
SDTypeProfile<0, 1, [SDTCisVT<0, i16>]>,
[SDNPHasChain, SDNPVariadic, SDNPSideEffect, SDNPOptInGlue]
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5438c6f50dea2..f4075fac87116 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3683,6 +3683,7 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
SDValue Callee = CLI.Callee;
llvm::SmallVector<SDValue, 6> ChainCallSpecialArgs;
+ bool UsesDynamicVGPRs = false;
if (IsChainCallConv) {
// The last arguments should be the value that we need to put in EXEC,
// followed by the flags and any other arguments with special meanings.
@@ -3731,6 +3732,7 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
return lowerUnhandledCall(CLI, InVals, "Expected 3 additional args");
}
+ UsesDynamicVGPRs = true;
std::for_each(CLI.Args.begin() + ChainCallArgIdx::NumVGPRs,
CLI.Args.end(), PushNodeOrTargetConstant);
}
@@ -4064,7 +4066,8 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
break;
case CallingConv::AMDGPU_CS_Chain:
case CallingConv::AMDGPU_CS_ChainPreserve:
- OPC = AMDGPUISD::TC_RETURN_CHAIN;
+ OPC = UsesDynamicVGPRs ? AMDGPUISD::TC_RETURN_CHAIN_DVGPR
+ : AMDGPUISD::TC_RETURN_CHAIN;
break;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 66cb8ed0f015f..9e9aba80f8878 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -748,16 +748,15 @@ defm : si_cs_chain_tc_patterns<i64>;
multiclass si_cs_chain_tc_dvgpr_patterns<
ValueType execvt, RegisterOperand execrc = getSOPSrcForVT<execvt>.ret,
Instruction tc = SI_CS_CHAIN_TC_W32_DVGPR> {
- let AddedComplexity = 90 in {
foreach callee = [ (i64 0), (tglobaladdr) ] in {
def : GCNPat<
- (AMDGPUtc_return_chain i64:$src0, callee, (i32 timm:$fpdiff), execvt:$exec,
- i32:$numvgprs, execvt:$fbexec, i64:$fbcallee),
+ (AMDGPUtc_return_chain_dvgpr i64:$src0, callee, (i32 timm:$fpdiff),
+ execvt:$exec, i32:$numvgprs,
+ execvt:$fbexec, i64:$fbcallee),
(tc CCR_SGPR_64:$src0, (i64 0), i32imm:$fpdiff, execrc:$exec,
SSrc_b32:$numvgprs, execrc:$fbexec, CCR_SGPR_64:$fbcallee)
>;
}
- } // AddedComplexity
}
defm : si_cs_chain_tc_dvgpr_patterns<i32>; // On GFX12, dVGPR mode is wave32-only.
>From 8e17cbe5c98d4afb099094b6bae2e28e0f2f9df9 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Tue, 11 Mar 2025 11:27:30 +0100
Subject: [PATCH 15/23] Update comment in
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index 30f953ce0397a..c0969901c10da 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -226,7 +226,7 @@ bool SILateBranchLowering::runOnMachineFunction(MachineFunction &MF) {
break;
case AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR:
case AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR:
- expandChainCall(MI, ST, /*DynamicVGPR*/ true);
+ expandChainCall(MI, ST, /*DynamicVGPR=*/ true);
MadeChange = true;
break;
>From 776eb73a43cc76f64040fd2d46dfe3e571ede918 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Tue, 11 Mar 2025 11:32:45 +0100
Subject: [PATCH 16/23] Fixup formatting
---
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index c0969901c10da..9f7d643f18a24 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -221,12 +221,12 @@ bool SILateBranchLowering::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_CS_CHAIN_TC_W32:
case AMDGPU::SI_CS_CHAIN_TC_W64:
- expandChainCall(MI, ST, /*DynamicVGPR=*/ false);
+ expandChainCall(MI, ST, /*DynamicVGPR=*/false);
MadeChange = true;
break;
case AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR:
case AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR:
- expandChainCall(MI, ST, /*DynamicVGPR=*/ true);
+ expandChainCall(MI, ST, /*DynamicVGPR=*/true);
MadeChange = true;
break;
>From c7a45e66608bb380256c78197cff45914ec1dd39 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Wed, 12 Mar 2025 09:03:31 +0100
Subject: [PATCH 17/23] Update error msg in
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 813023677fd6c..b2bb4ee1edd8f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1247,7 +1247,7 @@ bool AMDGPUCallLowering::lowerTailCall(
// On GFX12, we can only change the VGPR allocation for wave32.
if (!ST.isWave32()) {
F.getContext().diagnose(DiagnosticInfoUnsupported(
- F, "Dynamic VGPR mode is only supported for wave32\n"));
+ F, "dynamic VGPR mode is only supported for wave32"));
return false;
}
>From 6f26c9f04e7d580042cbe8e4395689fdd5d2c0d3 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Wed, 12 Mar 2025 09:03:49 +0100
Subject: [PATCH 18/23] Update err msg in
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index b2bb4ee1edd8f..5b1f365a3374a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1255,7 +1255,7 @@ bool AMDGPUCallLowering::lowerTailCall(
assert(FallbackExecArg.Regs.size() == 1 &&
"Expected single register for fallback EXEC");
if (!FallbackExecArg.Ty->isIntegerTy(ST.getWavefrontSize())) {
- LLVM_DEBUG(dbgs() << "Bad type for fallback EXEC");
+ LLVM_DEBUG(dbgs() << "Bad type for fallback EXEC\n");
return false;
}
}
>From a30443c393f90aff086be8764ebf7fbcf43dcb38 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Wed, 12 Mar 2025 09:04:10 +0100
Subject: [PATCH 19/23] s/addOperand/add
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
index 9f7d643f18a24..d22d246e8528f 100644
--- a/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp
@@ -122,7 +122,7 @@ static void copyOpWithoutRegFlags(MachineInstrBuilder &MIB,
if (Op.isReg())
MIB.addReg(Op.getReg());
else
- MIB->addOperand(Op);
+ MIB.add(Op);
}
void SILateBranchLowering::expandChainCall(MachineInstr &MI,
>From f6858237ab5ae6f2f1a0afc168afb033ce6b7b0d Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Thu, 13 Mar 2025 09:42:31 +0100
Subject: [PATCH 20/23] Update dbg string
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 5b1f365a3374a..a15f193549936 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -1240,7 +1240,7 @@ bool AMDGPUCallLowering::lowerTailCall(
IsDynamicVGPRChainCall = true;
if (Info.OrigArgs.size() != 8) {
- LLVM_DEBUG(dbgs() << "Expected 3 additional args");
+ LLVM_DEBUG(dbgs() << "Expected 3 additional args\n");
return false;
}
>From 1d610fd59fc50e283d1b72fc986247ce2b18db03 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Thu, 13 Mar 2025 11:00:55 +0100
Subject: [PATCH 21/23] Test for unsupported dgvpr on w64
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 5 +++++
llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll | 11 +++++++++++
2 files changed, 16 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f4075fac87116..0a658831144b7 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3732,6 +3732,11 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
return lowerUnhandledCall(CLI, InVals, "Expected 3 additional args");
}
+ if (!Subtarget->isWave32()) {
+ return lowerUnhandledCall(
+ CLI, InVals, "Dynamic VGPR mode is only supported for wave32");
+ }
+
UsesDynamicVGPRs = true;
std::for_each(CLI.Args.begin() + ChainCallArgIdx::NumVGPRs,
CLI.Args.end(), PushNodeOrTargetConstant);
diff --git a/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll b/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll
new file mode 100644
index 0000000000000..dba43181f88f4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll
@@ -0,0 +1,11 @@
+; RUN: not --crash llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -global-isel=1 -mattr=+wavefrontsize64 -verify-machineinstrs=0 < %s 2>&1 | FileCheck %s
+; RUN: not llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -global-isel=0 -mattr=+wavefrontsize64 -verify-machineinstrs=0 < %s 2>&1 | FileCheck %s
+
+declare amdgpu_cs_chain void @callee() nounwind
+
+; CHECK: in function test_dvgpr void (): Dynamic VGPR mode is only supported for wave32
+define amdgpu_cs_chain void @test_dvgpr() {
+ call void(ptr, i64, i32, i32, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i64 -1, i32 inreg 1, i32 2, i32 1, i32 inreg 32, i32 inreg -1, ptr @callee)
+ unreachable
+}
+
>From b848934ff86f176c1a63c16fad9d5910ed39d1d4 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Fri, 14 Mar 2025 15:19:48 +0100
Subject: [PATCH 22/23] Capitalization
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 +++---
llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0a658831144b7..57f4a29bf5130 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3726,15 +3726,15 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
if (FlagsValue.isZero()) {
if (CLI.Args.size() > ChainCallArgIdx::Flags + 1)
return lowerUnhandledCall(CLI, InVals,
- "No additional args allowed if flags == 0");
+ "no additional args allowed if flags == 0");
} else if (FlagsValue.isOneBitSet(0)) {
if (CLI.Args.size() != ChainCallArgIdx::FallbackCallee + 1) {
- return lowerUnhandledCall(CLI, InVals, "Expected 3 additional args");
+ return lowerUnhandledCall(CLI, InVals, "expected 3 additional args");
}
if (!Subtarget->isWave32()) {
return lowerUnhandledCall(
- CLI, InVals, "Dynamic VGPR mode is only supported for wave32");
+ CLI, InVals, "dynamic VGPR mode is only supported for wave32");
}
UsesDynamicVGPRs = true;
diff --git a/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll b/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll
index dba43181f88f4..3d6fd0a7b0b80 100644
--- a/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll
+++ b/llvm/test/CodeGen/AMDGPU/unsupported-cs-chain.ll
@@ -3,7 +3,7 @@
declare amdgpu_cs_chain void @callee() nounwind
-; CHECK: in function test_dvgpr void (): Dynamic VGPR mode is only supported for wave32
+; CHECK: in function test_dvgpr void (): dynamic VGPR mode is only supported for wave32
define amdgpu_cs_chain void @test_dvgpr() {
call void(ptr, i64, i32, i32, i32, ...) @llvm.amdgcn.cs.chain(ptr @callee, i64 -1, i32 inreg 1, i32 2, i32 1, i32 inreg 32, i32 inreg -1, ptr @callee)
unreachable
>From d6c48448a579ed01382066365f3a3b670d1071f3 Mon Sep 17 00:00:00 2001
From: Diana Picus <Diana-Magda.Picus at amd.com>
Date: Wed, 19 Mar 2025 10:33:48 +0100
Subject: [PATCH 23/23] Style issues in
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Co-authored-by: Matt Arsenault <Matthew.Arsenault at amd.com>
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 57f4a29bf5130..aca7cef613dbc 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3710,10 +3710,10 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
// Convert constants into TargetConstants, so they become immediate operands
// instead of being selected into S_MOV.
auto PushNodeOrTargetConstant = [&](TargetLowering::ArgListEntry Arg) {
- if (auto ArgNode = dyn_cast<ConstantSDNode>(Arg.Node))
+ if (const auto *ArgNode = dyn_cast<ConstantSDNode>(Arg.Node)) {
ChainCallSpecialArgs.push_back(DAG.getTargetConstant(
ArgNode->getAPIntValue(), DL, ArgNode->getValueType(0)));
- else
+ } else
ChainCallSpecialArgs.push_back(Arg.Node);
};
More information about the llvm-commits
mailing list