[llvm] 29c17ed - [AMDGPU/MemOpsCluster] Code clean-up around accessing of memory operand width
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 3 01:34:17 PDT 2020
Author: hsmahesha
Date: 2020-06-03T14:03:52+05:30
New Revision: 29c17ed96ed53399279c3358cffde51224178e72
URL: https://github.com/llvm/llvm-project/commit/29c17ed96ed53399279c3358cffde51224178e72
DIFF: https://github.com/llvm/llvm-project/commit/29c17ed96ed53399279c3358cffde51224178e72.diff
LOG: [AMDGPU/MemOpsCluster] Code clean-up around accessing of memory operand width
Summary:
Clean-up the width computing logic given a memory operand, and re-arrange code to avoid
code duplication.
Reviewers: foad, rampitec, arsenm, vpykhtin, javedabsar
Reviewed By: foad
Subscribers: kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D80946
Added:
Modified:
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 9f954743d271..2c385a2c1b2f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -264,17 +264,6 @@ static bool isStride64(unsigned Opc) {
}
}
-unsigned SIInstrInfo::getOperandSizeInBytes(const MachineInstr &LdSt,
- const MachineOperand *MOp) const {
- assert(MOp && "Unexpected null machine operand!");
- const MachineRegisterInfo &MRI = LdSt.getParent()->getParent()->getRegInfo();
- const Register Reg = MOp->getReg();
- const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg)
- ? MRI.getRegClass(Reg)
- : RI.getPhysRegClass(Reg);
- return (RI.getRegSizeInBits(*DstRC) / 8);
-}
-
bool SIInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
@@ -284,7 +273,8 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
unsigned Opc = LdSt.getOpcode();
OffsetIsScalable = false;
- const MachineOperand *BaseOp, *OffsetOp, *MOp;
+ const MachineOperand *BaseOp, *OffsetOp;
+ int DataOpIdx;
if (isDS(LdSt)) {
BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
@@ -299,10 +289,10 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
BaseOps.push_back(BaseOp);
Offset = OffsetOp->getImm();
// Get appropriate operand, and compute width accordingly.
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdst);
- if (!MOp)
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::data0);
- Width = getOperandSizeInBytes(LdSt, MOp);
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
+ if (DataOpIdx == -1)
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
+ Width = getOpSize(LdSt, DataOpIdx);
} else {
// The 2 offset instructions use offset0 and offset1 instead. We can treat
// these as a load with a single offset if the 2 offsets are consecutive.
@@ -335,14 +325,14 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
BaseOps.push_back(BaseOp);
Offset = EltSize * Offset0;
// Get appropriate operand(s), and compute width accordingly.
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdst);
- if (!MOp) {
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::data0);
- Width = getOperandSizeInBytes(LdSt, MOp);
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::data1);
- Width += getOperandSizeInBytes(LdSt, MOp);
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
+ if (DataOpIdx == -1) {
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
+ Width = getOpSize(LdSt, DataOpIdx);
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
+ Width += getOpSize(LdSt, DataOpIdx);
} else {
- Width = getOperandSizeInBytes(LdSt, MOp);
+ Width = getOpSize(LdSt, DataOpIdx);
}
}
return true;
@@ -368,33 +358,27 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
BaseOps.push_back(RSrc);
BaseOps.push_back(SOffset);
Offset = OffsetImm->getImm();
- // Get appropriate operand, and compute width accordingly.
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdst);
- if (!MOp)
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdata);
- Width = getOperandSizeInBytes(LdSt, MOp);
- return true;
- }
-
- BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
- if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL
- return false;
- BaseOps.push_back(BaseOp);
-
- BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
- if (BaseOp)
+ } else {
+ BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
+ if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL
+ return false;
BaseOps.push_back(BaseOp);
- const MachineOperand *OffsetImm =
- getNamedOperand(LdSt, AMDGPU::OpName::offset);
- Offset = OffsetImm->getImm();
- if (SOffset) // soffset can be an inline immediate.
- Offset += SOffset->getImm();
+ BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
+ if (BaseOp)
+ BaseOps.push_back(BaseOp);
+
+ const MachineOperand *OffsetImm =
+ getNamedOperand(LdSt, AMDGPU::OpName::offset);
+ Offset = OffsetImm->getImm();
+ if (SOffset) // soffset can be an inline immediate.
+ Offset += SOffset->getImm();
+ }
// Get appropriate operand, and compute width accordingly.
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdst);
- if (!MOp)
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdata);
- Width = getOperandSizeInBytes(LdSt, MOp);
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
+ if (DataOpIdx == -1)
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
+ Width = getOpSize(LdSt, DataOpIdx);
return true;
}
@@ -406,8 +390,8 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
Offset = OffsetOp ? OffsetOp->getImm() : 0;
// Get appropriate operand, and compute width accordingly.
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::sdst);
- Width = getOperandSizeInBytes(LdSt, MOp);
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
+ Width = getOpSize(LdSt, DataOpIdx);
return true;
}
@@ -421,10 +405,10 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
BaseOps.push_back(BaseOp);
Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
// Get appropriate operand, and compute width accordingly.
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdst);
- if (!MOp)
- MOp = getNamedOperand(LdSt, AMDGPU::OpName::vdata);
- Width = getOperandSizeInBytes(LdSt, MOp);
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
+ if (DataOpIdx == -1)
+ DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
+ Width = getOpSize(LdSt, DataOpIdx);
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 2665d70bface..0f0e8420f9cf 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -181,9 +181,6 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
int64_t &Offset1,
int64_t &Offset2) const override;
- unsigned getOperandSizeInBytes(const MachineInstr &LdSt,
- const MachineOperand *MOp) const;
-
bool getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt,
SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
More information about the llvm-commits
mailing list