[llvm] [AMDGPU] Allow sinking of free vector ops (PR #109172)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 18 11:10:10 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Jeffrey Byrnes (jrbyrnes)
<details>
<summary>Changes</summary>
Certain types of `ShuffleVectors` / `ExtractElement` / `InsertElement` instructions have zero cost and will be optimized out. Sinking these into user blocks may help enable SDAG combines by providing visibility to the values, rather than just emitting CopyTo/FromRegs. The sink IR pass disables sinking into loops, so this PR extends the `CodeGenPrepare` target hook `shouldSinkOperands`
---
Patch is 148.13 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/109172.diff
11 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (+87-1)
- (modified) llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp (+112-22)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.td (+10)
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp (+12)
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h (+3)
- (modified) llvm/lib/Target/AMDGPU/VOP3Instructions.td (+2)
- (modified) llvm/lib/Target/AMDGPU/VOPInstructions.td (+2)
- (added) llvm/test/CodeGen/AMDGPU/dst-sel-hazard.mir (+436)
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.ll (+2)
- (added) llvm/test/CodeGen/AMDGPU/loop-vector-sink.ll (+48)
- (modified) llvm/test/CodeGen/AMDGPU/srem.ll (+1114-1100)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e57c8f8b7b4835..1ebd0376ebd7b3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -6000,7 +6000,7 @@ bool AMDGPUTargetLowering::isReassocProfitable(MachineRegisterInfo &MRI,
/// Whether it is profitable to sink the operands of an
/// Instruction I to the basic block of I.
-/// This helps using several modifiers (like abs and neg) more often.
+
bool AMDGPUTargetLowering::shouldSinkOperands(
Instruction *I, SmallVectorImpl<Use *> &Ops) const {
using namespace PatternMatch;
@@ -6010,8 +6010,94 @@ bool AMDGPUTargetLowering::shouldSinkOperands(
if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
continue;
+ // This helps using several modifiers (abs and neg) more often.
if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
Ops.push_back(&Op);
+
+ // Zero cost vector instructions (e.g. extractelement 0 of i32 vectors)
+ // will be optimized away, and sinking them can help SDAG combines.
+ DataLayout DL = I->getModule()->getDataLayout();
+
+ auto IsFreeExtractInsert = [&DL, this](VectorType *VecType,
+ unsigned VecIndex) {
+ unsigned EltSize = DL.getTypeSizeInBits(VecType->getElementType());
+ return EltSize >= 32 ||
+ (EltSize == 16 && VecIndex == 0 && Subtarget->has16BitInsts());
+ };
+
+ uint64_t VecIndex;
+ Value *Vec;
+ if (match(Op.get(), m_ExtractElt(m_Value(Vec), m_ConstantInt(VecIndex)))) {
+ Instruction *VecOpInst =
+ dyn_cast<Instruction>(cast<Instruction>(Op.get())->getOperand(0));
+ // If a zero cost extractvector instruction is the only use of the vector,
+ // then it may be combined with the def.
+ if (VecOpInst && VecOpInst->hasOneUse())
+ continue;
+
+ if (IsFreeExtractInsert(cast<VectorType>(Vec->getType()), VecIndex))
+ Ops.push_back(&Op);
+
+ continue;
+ }
+
+ if (match(Op.get(),
+ m_InsertElt(m_Value(Vec), m_Value(), m_ConstantInt(VecIndex)))) {
+ if (IsFreeExtractInsert(cast<VectorType>(Vec->getType()), VecIndex))
+ Ops.push_back(&Op);
+
+ continue;
+ }
+
+ if (auto *Shuffle = dyn_cast<ShuffleVectorInst>(Op.get())) {
+ if (Shuffle->isIdentity()) {
+ Ops.push_back(&Op);
+ continue;
+ }
+
+ unsigned EltSize = DL.getTypeSizeInBits(
+ cast<VectorType>(cast<VectorType>(Shuffle->getType()))
+ ->getElementType());
+
+ // For i32 (or greater) shufflevectors, these will be lowered into a
+ // series of insert / extract elements, which will be coalesced away.
+ if (EltSize >= 32) {
+ Ops.push_back(&Op);
+ continue;
+ }
+
+ if (EltSize < 16 || !Subtarget->has16BitInsts())
+ continue;
+
+ int NumSubElts, SubIndex;
+ if (Shuffle->changesLength()) {
+ if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding()) {
+ Ops.push_back(&Op);
+ continue;
+ }
+
+ if (Shuffle->isExtractSubvectorMask(SubIndex) ||
+ Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex)) {
+ if (!(SubIndex % 2)) {
+ Ops.push_back(&Op);
+ continue;
+ }
+ }
+ }
+
+ if (Shuffle->isReverse() || Shuffle->isZeroEltSplat() ||
+ Shuffle->isSingleSource()) {
+ Ops.push_back(&Op);
+ continue;
+ }
+
+ if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex)) {
+ if (!(SubIndex % 2)) {
+ Ops.push_back(&Op);
+ continue;
+ }
+ }
+ }
}
return !Ops.empty();
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index a8b171aa82840a..a6b7264405ade1 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -876,6 +876,7 @@ GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
return DataIdx >= 0 &&
TRI->regsOverlap(MI.getOperand(DataIdx).getReg(), Reg);
};
+
int WaitStatesNeededForDef =
VALUWaitStates - getWaitStatesSince(IsHazardFn, VALUWaitStates);
WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
@@ -883,6 +884,70 @@ GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
return WaitStatesNeeded;
}
+/// Dest sel forwarding issue occurs if additional logic is needed to swizzle /
+/// pack the computed value into correct bit position of the dest register. This
+/// occurs if we have SDWA with dst_sel != DWORD or if we have op_sel with
+/// dst_sel that is not aligned to the register. This function analayzes the \p
+/// MI and \returns an operand with dst forwarding issue, or nullptr if
+/// none exists.
+static const MachineOperand *
+getDstSelForwardingOperand(const MachineInstr &MI, const GCNSubtarget &ST) {
+ if (!SIInstrInfo::isVALU(MI))
+ return nullptr;
+
+ const SIInstrInfo *TII = ST.getInstrInfo();
+
+ unsigned Opcode = MI.getOpcode();
+
+ // There are three different types of instructions
+ // which produce forwarded dest: 1. SDWA with dst_sel != DWORD, 2. VOP3
+ // which write hi bits (e.g. op_sel[3] == 1), and 3. CVR_SR_FP8_F32 and
+ // CVT_SR_BF8_F32 with op_sel[3:2]
+ // != 0
+ if (SIInstrInfo::isSDWA(MI)) {
+ // Type 1: SDWA with dst_sel != DWORD
+ if (auto *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel))
+ if (DstSel->getImm() == AMDGPU::SDWA::DWORD)
+ return nullptr;
+ } else {
+ // Type 2 && Type 3: (VOP3 which write the hi bits) || (CVT_SR_FP8_F32 and
+ // CVT_SR_BF8_F32 with op_sel[3:2] != 0)
+ if (!AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel) ||
+ !(TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() &
+ SISrcMods::DST_OP_SEL ||
+ (AMDGPU::isFP8DstSelInst(Opcode) &&
+ (TII->getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() &
+ SISrcMods::OP_SEL_0))))
+ return nullptr;
+ }
+
+ return TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
+}
+
+/// Checks whether the provided \p MI "consumes" the operand with a Dest sel
+/// fowarding issue \p Dst . We may "consume" the Dst via a standard explicit
+/// RAW, or through irregular ways (e.g implicit RAW, certain types of WAW)
+static bool consumesDstSelForwardingOperand(const MachineInstr *VALU,
+ const MachineOperand *Dst,
+ const SIRegisterInfo *TRI) {
+ // We must consider implicit reads of the VALU. SDWA with dst_sel and
+ // UNUSED_PRESERVE will implicitly read the result from forwarded dest,
+ // and we must account for that hazard.
+ // We also must account for WAW hazards. In particular, WAW with dest
+ // preserve semantics (e.g. VOP3 with op_sel, VOP2 &&
+ // !zeroesHigh16BitsOfDest) will read the forwarded dest for parity
+ // check for ECC. Without accounting for this hazard, the ECC will be
+ // wrong.
+ // TODO: limit to RAW (including implicit reads) + problematic WAW (i.e.
+ // complete zeroesHigh16BitsOfDest)
+ for (auto &Operand : VALU->operands()) {
+ if (Operand.isReg() && TRI->regsOverlap(Dst->getReg(), Operand.getReg())) {
+ return true;
+ }
+ }
+ return false;
+}
+
int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
int WaitStatesNeeded = 0;
@@ -913,27 +978,18 @@ int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
if (ST.hasDstSelForwardingHazard()) {
const int Shift16DefWaitstates = 1;
- auto IsShift16BitDefFn = [this, VALU](const MachineInstr &MI) {
- if (!SIInstrInfo::isVALU(MI))
- return false;
- const SIInstrInfo *TII = ST.getInstrInfo();
- if (SIInstrInfo::isSDWA(MI)) {
- if (auto *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel))
- if (DstSel->getImm() == AMDGPU::SDWA::DWORD)
- return false;
- } else {
- if (!AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::op_sel) ||
- !(TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)
- ->getImm() &
- SISrcMods::DST_OP_SEL))
- return false;
- }
+ auto IsShift16BitDefFn = [this, VALU](const MachineInstr &ProducerMI) {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
- if (auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
- Register Def = Dst->getReg();
+ const MachineOperand *ForwardedDst =
+ getDstSelForwardingOperand(ProducerMI, ST);
+ if (ForwardedDst) {
+ return consumesDstSelForwardingOperand(VALU, ForwardedDst, TRI);
+ }
- for (const MachineOperand &Use : VALU->explicit_uses()) {
- if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg()))
+ if (ProducerMI.isInlineAsm()) {
+ // Assume inline asm has dst forwarding hazard
+ for (auto &Def : ProducerMI.all_defs()) {
+ if (consumesDstSelForwardingOperand(VALU, &Def, TRI))
return true;
}
}
@@ -1030,7 +1086,7 @@ int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
// problematic thus far.
// see checkVALUHazards()
- if (!ST.has12DWordStoreHazard())
+ if (!ST.has12DWordStoreHazard() && !ST.hasDstSelForwardingHazard())
return 0;
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -1039,11 +1095,45 @@ int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
for (const MachineOperand &Op :
llvm::drop_begin(IA->operands(), InlineAsm::MIOp_FirstOperand)) {
if (Op.isReg() && Op.isDef()) {
- WaitStatesNeeded =
- std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
+ if (!TRI.isVectorRegister(MRI, Op.getReg()))
+ continue;
+
+ if (ST.has12DWordStoreHazard()) {
+ WaitStatesNeeded =
+ std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
+ }
}
}
+ if (ST.hasDstSelForwardingHazard()) {
+ const int Shift16DefWaitstates = 1;
+
+ auto IsShift16BitDefFn = [this, &IA](const MachineInstr &ProducerMI) {
+ const MachineOperand *Dst = getDstSelForwardingOperand(ProducerMI, ST);
+ // Assume inline asm reads the dst
+ if (Dst)
+ return IA->modifiesRegister(Dst->getReg(), &TRI) ||
+ IA->readsRegister(Dst->getReg(), &TRI);
+
+ if (ProducerMI.isInlineAsm()) {
+ // If MI is inline asm, assume it has dst forwarding hazard
+ for (auto &Def : ProducerMI.all_defs()) {
+ if (IA->modifiesRegister(Def.getReg(), &TRI) ||
+ IA->readsRegister(Def.getReg(), &TRI)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ };
+
+ int WaitStatesNeededForDef =
+ Shift16DefWaitstates -
+ getWaitStatesSince(IsShift16BitDefFn, Shift16DefWaitstates);
+ WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
+ }
+
return WaitStatesNeeded;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 85281713e22b1f..2b54429dc9a03f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2342,6 +2342,7 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableClamp = 0> {
field bit IsFP8SrcByteSel = 0;
field bit IsFP8DstByteSel = 0;
+ field bit HasFP8DstByteSel = 0;
field bit IsFP8ByteSel = !or(IsFP8SrcByteSel, IsFP8DstByteSel);
field bit HasDst = !ne(DstVT.Value, untyped.Value);
@@ -2921,6 +2922,15 @@ def getVCMPXOpFromVCMP : InstrMapping {
let ValueCols = [["1"]];
}
+def FP8DstByteSelTable : GenericTable {
+ let FilterClass = "VOP3_Pseudo";
+ let CppTypeName = "FP8DstByteSelInfo";
+ let Fields = ["Opcode", "HasFP8DstByteSel"];
+
+ let PrimaryKey = ["Opcode"];
+ let PrimaryKeyName = "getFP8DstByteSelHelper";
+}
+
def VOPDComponentTable : GenericTable {
let FilterClass = "VOPD_Component";
let CppTypeName = "VOPDComponentInfo";
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 5b41a2cd731607..cda664a151ef54 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -385,6 +385,13 @@ struct SingleUseExceptionInfo {
bool IsInvalidSingleUseProducer;
};
+struct FP8DstByteSelInfo {
+ uint16_t Opcode;
+ bool HasFP8DstByteSel;
+};
+
+#define GET_FP8DstByteSelTable_DECL
+#define GET_FP8DstByteSelTable_IMPL
#define GET_MTBUFInfoTable_DECL
#define GET_MTBUFInfoTable_IMPL
#define GET_MUBUFInfoTable_DECL
@@ -629,6 +636,11 @@ bool isInvalidSingleUseProducerInst(unsigned Opc) {
return Info && Info->IsInvalidSingleUseProducer;
}
+bool isFP8DstSelInst(unsigned Opc) {
+ const FP8DstByteSelInfo *Info = getFP8DstByteSelHelper(Opc);
+ return Info ? Info->HasFP8DstByteSel : false;
+}
+
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
return Info ? Info->Opcode3Addr : ~0u;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index a4e6a7ebe0558b..35c080d8e0bebc 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -861,6 +861,9 @@ getVOPDInstInfo(unsigned VOPDOpcode, const MCInstrInfo *InstrInfo);
LLVM_READONLY
bool isTrue16Inst(unsigned Opc);
+LLVM_READONLY
+bool isFP8DstSelInst(unsigned Opc);
+
LLVM_READONLY
bool isInvalidSingleUseConsumerInst(unsigned Opc);
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 6748eff9376b0d..466114b95f9f90 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -568,6 +568,7 @@ def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile<VOPProfile<[i32, f32, i32, f32]>,
let HasSrc2Mods = 1;
let HasExtVOP3DPP = 1;
let HasOpSel = 1;
+ let HasFP8DstByteSel = 1;
let AsmVOP3OpSel = !subst(", $src2_modifiers", "",
getAsmVOP3OpSel<3, HasClamp, HasOMod,
HasSrc0FloatMods, HasSrc1FloatMods,
@@ -587,6 +588,7 @@ def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile<VOPProfile<[i32, f32, i32, f32]>,
class VOP3_CVT_SR_F8_ByteSel_Profile<ValueType SrcVT> :
VOP3_Profile<VOPProfile<[i32, SrcVT, i32, untyped]>> {
let IsFP8DstByteSel = 1;
+ let HasFP8DstByteSel = 1;
let HasClamp = 0;
defvar bytesel = (ins VGPR_32:$vdst_in, ByteSel:$byte_sel);
let Ins64 = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 3851415ab0caed..5a460ef0d42320 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -113,6 +113,8 @@ class VOP3_Pseudo <string opName, VOPProfile P, list<dag> pattern = [],
let IsWMMA = P.IsWMMA;
let IsSWMMAC = P.IsSWMMAC;
+ bit HasFP8DstByteSel = P.HasFP8DstByteSel;
+
let AsmOperands = !if(isVop3OpSel,
P.AsmVOP3OpSel,
!if(!and(isVOP3P, P.IsPacked), P.AsmVOP3P, P.Asm64));
diff --git a/llvm/test/CodeGen/AMDGPU/dst-sel-hazard.mir b/llvm/test/CodeGen/AMDGPU/dst-sel-hazard.mir
new file mode 100644
index 00000000000000..e24817078d8bc9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/dst-sel-hazard.mir
@@ -0,0 +1,436 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx942 -run-pass post-RA-hazard-rec -o - %s | FileCheck -check-prefix=HAZARD %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass post-RA-hazard-rec -o - %s | FileCheck -check-prefix=NOHAZARD %s
+
+---
+name: sdwa_opsel_hazard
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+
+ ; HAZARD-LABEL: name: sdwa_opsel_hazard
+ ; HAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; HAZARD-NEXT: {{ $}}
+ ; HAZARD-NEXT: renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec
+ ; HAZARD-NEXT: S_NOP 0
+ ; HAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; HAZARD-NEXT: S_ENDPGM 0
+ ;
+ ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
+ ; NOHAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec
+ ; NOHAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: S_ENDPGM 0
+ renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec
+ renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ S_ENDPGM 0
+...
+
+---
+name: sdwa_lo_opsel_hazard
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+
+ ; HAZARD-LABEL: name: sdwa_lo_opsel_hazard
+ ; HAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; HAZARD-NEXT: {{ $}}
+ ; HAZARD-NEXT: renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec
+ ; HAZARD-NEXT: S_NOP 0
+ ; HAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 4, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; HAZARD-NEXT: S_ENDPGM 0
+ ;
+ ; NOHAZARD-LABEL: name: sdwa_lo_opsel_hazard
+ ; NOHAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec
+ ; NOHAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 4, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: S_ENDPGM 0
+ renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec
+ renamable $vgpr0 = V_MAD_U16_gfx9_e64 4, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ S_ENDPGM 0
+...
+
+---
+name: opsel_sdwa_hazard
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+
+ ; HAZARD-LABEL: name: opsel_sdwa_hazard
+ ; HAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; HAZARD-NEXT: {{ $}}
+ ; HAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; HAZARD-NEXT: S_NOP 0
+ ; HAZARD-NEXT: renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec, implicit killed $vgpr0(tied-def 0)
+ ; HAZARD-NEXT: S_ENDPGM 0
+ ;
+ ; NOHAZARD-LABEL: name: opsel_sdwa_hazard
+ ; NOHAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec, implicit killed $vgpr0(tied-def 0)
+ ; NOHAZARD-NEXT: S_ENDPGM 0
+ renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ renamable $vgpr0 = V_ADD_U16_sdwa 0, $vgpr1, 0, $vgpr2, 0, 1, 0, 3, 3, implicit $exec, implicit killed $vgpr0(tied-def 0)
+ S_ENDPGM 0
+...
+
+
+# TODO -- there is no reason for s_nop (V_ADD_U16 doesn't preserve the dest)
+
+---
+name: opsel_no_sdwa_no_hazard
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+
+ ; HAZARD-LABEL: name: opsel_no_sdwa_no_hazard
+ ; HAZARD: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $exec, $mode
+ ; HAZARD-NEXT: {{ $}}
+ ; HAZARD-NEXT: renamable $vgpr0 = V_MAD_U16_gfx9_e64 12, killed $vgpr3, 4, killed $vgpr4, 4, killed $vgpr2, 0, 0, implicit $exec
+ ; HAZARD-NEXT: S_NOP 0
+ ; HAZARD...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/109172
More information about the llvm-commits
mailing list