[llvm] [AMDGPU] - Add s_quadmask intrinsics (PR #70804)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 31 06:50:23 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-ir
Author: Jessica Del (OutOfCache)
<details>
<summary>Changes</summary>
Add intrinsics to generate `s_quadmask_b32`
and `s_quadmask_b64`.
Support VGPR arguments by inserting a `v_readfirstlane`.
---
Full diff: https://github.com/llvm/llvm-project/pull/70804.diff
5 Files Affected:
- (modified) llvm/include/llvm/IR/IntrinsicsAMDGPU.td (+5)
- (modified) llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp (+9)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+4-2)
- (modified) llvm/lib/Target/AMDGPU/SOPInstructions.td (+4-2)
- (added) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll (+90)
``````````diff
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 50c63621b8e416e..0daa5a71340d6e4 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1932,6 +1932,11 @@ def int_amdgcn_inverse_ballot :
def int_amdgcn_s_bitreplicate :
DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
+// Lowers to S_QUADMASK_B{32,64}
+// The argument must be uniform; otherwise, the result is undefined.
+def int_amdgcn_s_quadmask :
+ DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem, IntrConvergent]>;
+
class AMDGPUWaveReduce<LLVMType data_ty = llvm_anyint_ty> : Intrinsic<
[data_ty],
[
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 0c5ed649bcdbe34..259af55885fc0e4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2995,6 +2995,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
return;
case Intrinsic::amdgcn_inverse_ballot:
case Intrinsic::amdgcn_s_bitreplicate:
+ case Intrinsic::amdgcn_s_quadmask:
applyDefaultMapping(OpdMapper);
constrainOpWithReadfirstlane(B, MI, 2); // Mask
return;
@@ -4537,6 +4538,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[2] = AMDGPU::getValueMapping(MaskBank, MaskSize);
break;
}
+ case Intrinsic::amdgcn_s_quadmask: {
+ Register MaskReg = MI.getOperand(2).getReg();
+ unsigned MaskSize = MRI.getType(MaskReg).getSizeInBits();
+ unsigned MaskBank = getRegBankID(MaskReg, MRI, AMDGPU::SGPRRegBankID);
+ OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, MaskSize);
+ OpdsMapping[2] = AMDGPU::getValueMapping(MaskBank, MaskSize);
+ break;
+ }
case Intrinsic::amdgcn_wave_reduce_umin:
case Intrinsic::amdgcn_wave_reduce_umax: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 1bd7a28ca650e35..0a6dd442686e77c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -6473,8 +6473,10 @@ SIInstrInfo::legalizeOperands(MachineInstr &MI,
return CreatedBB;
}
- // Legalize S_BITREPLICATE
- if (MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32) {
+ // Legalize S_BITREPLICATE and S_QUADMASK
+ if (MI.getOpcode() == AMDGPU::S_BITREPLICATE_B64_B32 ||
+ MI.getOpcode() == AMDGPU::S_QUADMASK_B32 ||
+ MI.getOpcode() == AMDGPU::S_QUADMASK_B64) {
MachineOperand &Src = MI.getOperand(1);
if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index c419b5f7a5711f9..c50bbead903dcad 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -326,8 +326,10 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">;
} // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]
-def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32">;
-def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64">;
+def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32",
+[(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>;
+def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64",
+[(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>;
let Uses = [M0] in {
def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
new file mode 100644
index 000000000000000..f182c850793695f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; NOTE: llc -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+
+declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
+declare i64 @llvm.amdgcn.s.quadmask.i64(i64)
+
+define i32 @test_quadmask_constant_i32() {
+; GFX11-LABEL: test_quadmask_constant_i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_quadmask_b32 s0, 0x85fe3a92
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 u0x85FE3A92)
+ ret i32 %qm
+}
+
+define amdgpu_cs void @test_quadmask_sgpr_i32(i32 inreg %mask, ptr addrspace(1) %out) {
+; GFX11-LABEL: test_quadmask_sgpr_i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_quadmask_b32 s0, s0
+; GFX11-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+entry:
+ %qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %mask)
+ store i32 %qm, ptr addrspace(1) %out
+ ret void
+}
+
+
+define i32 @test_quadmask_vgpr_i32(i32 %mask) {
+; GFX11-LABEL: test_quadmask_vgpr_i32:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-NEXT: s_quadmask_b32 s0, s0
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %mask)
+ ret i32 %qm
+}
+
+define i64 @test_quadmask_constant_i64() {
+; GFX11-LABEL: test_quadmask_constant_i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_mov_b32 s0, 0x85fe3a92
+; GFX11-NEXT: s_mov_b32 s1, 0x67de48fc
+; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 u0x67DE48FC85FE3A92)
+ ret i64 %qm
+}
+
+define amdgpu_cs void @test_quadmask_sgpr_i64(i64 inreg %mask, ptr addrspace(1) %out) {
+; GFX11-LABEL: test_quadmask_sgpr_i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+entry:
+ %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
+ store i64 %qm, ptr addrspace(1) %out
+ ret void
+}
+
+define i64 @test_quadmask_vgpr_i64(i64 %mask) {
+; GFX11-LABEL: test_quadmask_vgpr_i64:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v0
+; GFX11-NEXT: v_readfirstlane_b32 s1, v1
+; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
+ ret i64 %qm
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/70804
More information about the llvm-commits
mailing list