[llvm] AMDGPU: Implement getConstValDefinedInReg and use in foldImmediate (NFC) (PR #127482)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 17 20:11:05 PST 2025
https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/127482
>From 8846074e2817570f00fc3b4a3613d96b1910853a Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 17 Feb 2025 13:22:06 +0700
Subject: [PATCH] AMDGPU: Implement getConstValDefinedInReg and use in
foldImmediate (NFC)
This is NFC because it currently only matters for cases that are not
isMoveImmediate, and we do not yet implement any of those. This just
moves the implementation of foldImmediate to use the common interface,
similar to how x86 does it.
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 67 ++++++++++++++------------
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 22 +++++++--
2 files changed, 54 insertions(+), 35 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index f51527d0eb148..9e99df7524f4d 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1327,6 +1327,33 @@ Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
return Reg;
}
+bool SIInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
+ const Register Reg,
+ int64_t &ImmVal) const {
+ // TODO: Handle all the special cases handled in SIShrinkInstructions
+ // (e.g. s_brev_b32 imm -> reverse(imm))
+ switch (MI.getOpcode()) {
+ case AMDGPU::V_MOV_B32_e32:
+ case AMDGPU::S_MOV_B32:
+ case AMDGPU::S_MOVK_I32:
+ case AMDGPU::S_MOV_B64:
+ case AMDGPU::V_MOV_B64_e32:
+ case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
+ case AMDGPU::S_MOV_B64_IMM_PSEUDO:
+ case AMDGPU::V_MOV_B64_PSEUDO: {
+ const MachineOperand &Src0 = MI.getOperand(1);
+ if (Src0.isImm()) {
+ ImmVal = Src0.getImm();
+ return MI.getOperand(0).getReg() == Reg;
+ }
+
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
if (RI.isAGPRClass(DstRC))
@@ -3395,27 +3422,11 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (!MRI->hasOneNonDBGUse(Reg))
return false;
- switch (DefMI.getOpcode()) {
- default:
- return false;
- case AMDGPU::V_MOV_B64_e32:
- case AMDGPU::S_MOV_B64:
- case AMDGPU::V_MOV_B64_PSEUDO:
- case AMDGPU::S_MOV_B64_IMM_PSEUDO:
- case AMDGPU::V_MOV_B32_e32:
- case AMDGPU::S_MOV_B32:
- case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
- break;
- }
-
- const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
- assert(ImmOp);
- // FIXME: We could handle FrameIndex values here.
- if (!ImmOp->isImm())
+ int64_t Imm;
+ if (!getConstValDefinedInReg(DefMI, Reg, Imm))
return false;
- auto getImmFor = [ImmOp](const MachineOperand &UseOp) -> int64_t {
- int64_t Imm = ImmOp->getImm();
+ auto getImmFor = [=](const MachineOperand &UseOp) -> int64_t {
switch (UseOp.getSubReg()) {
default:
return Imm;
@@ -3502,12 +3513,14 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
// If this is a free constant, there's no reason to do this.
// TODO: We could fold this here instead of letting SIFoldOperands do it
// later.
- MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
+ int Src0Idx = getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::src0);
// Any src operand can be used for the legality check.
- if (isInlineConstant(UseMI, *Src0, *ImmOp))
+ if (isInlineConstant(UseMI, Src0Idx, Imm))
return false;
+ MachineOperand *Src0 = &UseMI.getOperand(Src0Idx);
+
bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64;
bool IsFMA =
@@ -4267,18 +4280,11 @@ bool SIInstrInfo::isInlineConstant(const APFloat &Imm) const {
}
}
-bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
- uint8_t OperandType) const {
- assert(!MO.isReg() && "isInlineConstant called on register operand!");
- if (!MO.isImm())
- return false;
-
+bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
// MachineOperand provides no way to tell the true operand size, since it only
// records a 64-bit value. We need to know the size to determine if a 32-bit
// floating point immediate bit pattern is legal for an integer immediate. It
// would be for any 32-bit integer operand, but would not be for a 64-bit one.
-
- int64_t Imm = MO.getImm();
switch (OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
@@ -4300,8 +4306,7 @@ bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
- return AMDGPU::isInlinableLiteral64(MO.getImm(),
- ST.hasInv2PiInlineImm());
+ return AMDGPU::isInlinableLiteral64(Imm, ST.hasInv2PiInlineImm());
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 811e4fcbebf57..ddd15e1766f70 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -278,6 +278,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
MachineBasicBlock::iterator I, const DebugLoc &DL,
Register SrcReg, int Value) const;
+ bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg,
+ int64_t &ImmVal) const override;
+
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
@@ -1063,7 +1066,13 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
// Some operands like FrameIndexes could resolve to an inline immediate value
// that will not require an additional 4-bytes; this function assumes that it
// will.
- bool isInlineConstant(const MachineOperand &MO, uint8_t OperandType) const;
+ bool isInlineConstant(const MachineOperand &MO, uint8_t OperandType) const {
+ assert(!MO.isReg() && "isInlineConstant called on register operand!");
+ if (!MO.isImm())
+ return false;
+ return isInlineConstant(MO.getImm(), OperandType);
+ }
+ bool isInlineConstant(int64_t ImmVal, uint8_t OperandType) const;
bool isInlineConstant(const MachineOperand &MO,
const MCOperandInfo &OpInfo) const {
@@ -1091,7 +1100,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
}
bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
- const MachineOperand &MO) const {
+ int64_t ImmVal) const {
if (OpIdx >= MI.getDesc().NumOperands)
return false;
@@ -1101,10 +1110,15 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
uint8_t OpType = (Size == 8) ?
AMDGPU::OPERAND_REG_IMM_INT64 : AMDGPU::OPERAND_REG_IMM_INT32;
- return isInlineConstant(MO, OpType);
+ return isInlineConstant(ImmVal, OpType);
}
- return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType);
+ return isInlineConstant(ImmVal, MI.getDesc().operands()[OpIdx].OperandType);
+ }
+
+ bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
+ const MachineOperand &MO) const {
+ return isInlineConstant(MI, OpIdx, MO.getImm());
}
bool isInlineConstant(const MachineOperand &MO) const {
More information about the llvm-commits
mailing list