[llvm] baa3386 - [GlobalISel] GIntrinsic subclass to represent intrinsics in Generic Machine IR
Sameer Sahasrabuddhe via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 26 21:31:32 PDT 2023
Author: Sameer Sahasrabuddhe
Date: 2023-07-27T10:00:45+05:30
New Revision: baa3386edb11a2f9bcadda8cf58d56f3707c39fa
URL: https://github.com/llvm/llvm-project/commit/baa3386edb11a2f9bcadda8cf58d56f3707c39fa
DIFF: https://github.com/llvm/llvm-project/commit/baa3386edb11a2f9bcadda8cf58d56f3707c39fa.diff
LOG: [GlobalISel] GIntrinsic subclass to represent intrinsics in Generic Machine IR
Some opcodes in generic MIR represent calls to intrinsics, where the intrinsic
ID is the first non-def operand to the instruction. These are now represented as
a subclass of GenericMachineInstr, and the method MachineInstr::getIntrinsicID()
is now moved to this subclass GIntrinsic.
Some target-defined instructions behave like GMIR intrinsics, and have an
Intrinsic::ID operand. But they should not be recognized as generic intrinsics,
and should not use GIntrinsic::getIntrinsicID(). Separated these out by
introducing a new AMDGPU::getIntrinsicID().
Reviewed By: arsenm, Pierre-vh
Differential Revision: https://reviews.llvm.org/D155556
Added:
Modified:
llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
llvm/include/llvm/CodeGen/MachineInstr.h
llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
llvm/lib/Target/SPIRV/SPIRVUtils.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 8484d970aff0a9..9d73402ed65db1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -358,6 +358,29 @@ class GAddSubCarryInOut : public GAddSubCarryOut {
}
};
+/// Represents a call to an intrinsic.
+class GIntrinsic final : public GenericMachineInstr {
+public:
+ Intrinsic::ID getIntrinsicID() const {
+ return getOperand(getNumExplicitDefs()).getIntrinsicID();
+ }
+
+ bool is(Intrinsic::ID ID) const { return getIntrinsicID() == ID; }
+ bool hasSideEffects() const {
+ return getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
+ }
+
+ static bool classof(const MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case TargetOpcode::G_INTRINSIC:
+ case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
+ return true;
+ default:
+ return false;
+ }
+ }
+};
+
} // namespace llvm
#endif // LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index 2928ccfbcef72d..7d1e39a47b9637 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -1930,12 +1930,6 @@ class MachineInstr
/// and point them to \p Reg instead.
void changeDebugValuesDefReg(Register Reg);
- /// Returns the Intrinsic::ID for this instruction.
- /// \pre Must have an intrinsic ID operand.
- unsigned getIntrinsicID() const {
- return getOperand(getNumExplicitDefs()).getIntrinsicID();
- }
-
/// Sets all register debug operands in this debug value instruction to be
/// undef.
void setDebugValueUndef() {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
index 78fdedc0b511f6..ea2cb5d6874c49 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
@@ -9,6 +9,7 @@
#include "AMDGPUCombinerHelper.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/Target/TargetMachine.h"
@@ -42,7 +43,7 @@ static bool fnegFoldsIntoMI(const MachineInstr &MI) {
case AMDGPU::G_AMDGPU_FMAX_LEGACY:
return true;
case AMDGPU::G_INTRINSIC: {
- unsigned IntrinsicID = MI.getIntrinsicID();
+ unsigned IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::amdgcn_rcp:
case Intrinsic::amdgcn_rcp_legacy:
@@ -92,7 +93,7 @@ static bool hasSourceMods(const MachineInstr &MI) {
case AMDGPU::G_PHI:
return false;
case AMDGPU::G_INTRINSIC: {
- unsigned IntrinsicID = MI.getIntrinsicID();
+ unsigned IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::amdgcn_interp_p1:
case Intrinsic::amdgcn_interp_p2:
@@ -228,7 +229,7 @@ bool AMDGPUCombinerHelper::matchFoldableFneg(MachineInstr &MI,
case AMDGPU::G_AMDGPU_RCP_IFLAG:
return true;
case AMDGPU::G_INTRINSIC: {
- unsigned IntrinsicID = MatchInfo->getIntrinsicID();
+ unsigned IntrinsicID = cast<GIntrinsic>(MatchInfo)->getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::amdgcn_rcp:
case Intrinsic::amdgcn_rcp_legacy:
@@ -327,7 +328,7 @@ void AMDGPUCombinerHelper::applyFoldableFneg(MachineInstr &MI,
NegateOperand(MatchInfo->getOperand(1));
break;
case AMDGPU::G_INTRINSIC: {
- unsigned IntrinsicID = MatchInfo->getIntrinsicID();
+ unsigned IntrinsicID = cast<GIntrinsic>(MatchInfo)->getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::amdgcn_rcp:
case Intrinsic::amdgcn_rcp_legacy:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
index f2d62956e25b09..d41e704a4a11a3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
@@ -14,6 +14,7 @@
#include "AMDGPUInstrInfo.h"
#include "AMDGPU.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instruction.h"
@@ -26,6 +27,9 @@ using namespace llvm;
AMDGPUInstrInfo::AMDGPUInstrInfo(const GCNSubtarget &ST) { }
+Intrinsic::ID AMDGPU::getIntrinsicID(const MachineInstr &I) {
+ return I.getOperand(I.getNumExplicitDefs()).getIntrinsicID();
+}
// TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence.
bool AMDGPUInstrInfo::isUniformMMO(const MachineMemOperand *MMO) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index e7ee364476824c..515decea3921ed 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -21,6 +21,7 @@ namespace llvm {
class GCNSubtarget;
class MachineMemOperand;
+class MachineInstr;
class AMDGPUInstrInfo {
public:
@@ -31,6 +32,13 @@ class AMDGPUInstrInfo {
namespace AMDGPU {
+/// Return the intrinsic ID for opcodes with the G_AMDGPU_INTRIN_ prefix.
+///
+/// These opcodes have an Intrinsic::ID operand similar to a GIntrinsic. But
+/// they are not actual instances of GIntrinsics, so we cannot use
+/// GIntrinsic::getIntrinsicID() on them.
+unsigned getIntrinsicID(const MachineInstr &I);
+
struct RsrcIntrinsic {
unsigned Intr;
uint8_t RsrcArg;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 747f9fe2f8aeff..551ac4b2133ee7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -21,6 +21,7 @@
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -1001,7 +1002,7 @@ bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
}
bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
- unsigned IntrinsicID = I.getIntrinsicID();
+ unsigned IntrinsicID = cast<GIntrinsic>(I).getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::amdgcn_if_break: {
MachineBasicBlock *BB = I.getParent();
@@ -2008,7 +2009,7 @@ bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
MachineInstr &I) const {
- unsigned IntrinsicID = I.getIntrinsicID();
+ unsigned IntrinsicID = cast<GIntrinsic>(I).getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::amdgcn_end_cf:
return selectEndCfIntrinsic(I);
@@ -2689,8 +2690,8 @@ static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
isVCmpResult(MI.getOperand(2).getReg(), MRI);
- if (Opcode == TargetOpcode::G_INTRINSIC)
- return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
+ if (auto *GI = dyn_cast<GIntrinsic>(&MI))
+ return GI->is(Intrinsic::amdgcn_class);
return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
}
@@ -3252,7 +3253,7 @@ bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
unsigned Opc;
- switch (MI.getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
break;
@@ -3457,8 +3458,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
- const AMDGPU::ImageDimIntrinsicInfo *Intr
- = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
+ const AMDGPU::ImageDimIntrinsicInfo *Intr =
+ AMDGPU::getImageDimIntrinsicInfo(AMDGPU::getIntrinsicID(I));
assert(Intr && "not an image intrinsic with image pseudo");
return selectImageIntrinsic(I, Intr);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 120c00b14a3693..7b3dced6735017 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -21,6 +21,7 @@
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -6524,7 +6525,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineRegisterInfo &MRI = *B.getMRI();
// Replace the use G_BRCOND with the exec manipulate and branch pseudos.
- auto IntrID = MI.getIntrinsicID();
+ auto IntrID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrID) {
case Intrinsic::amdgcn_if:
case Intrinsic::amdgcn_else: {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
index 536fb02cb4ecf6..3b523d88e239e3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/TargetPassConfig.h"
@@ -268,10 +269,10 @@ bool AMDGPUPostLegalizerCombinerImpl::matchRcpSqrtToRsq(
auto getRcpSrc = [=](const MachineInstr &MI) {
MachineInstr *ResMI = nullptr;
- if (MI.getOpcode() == TargetOpcode::G_INTRINSIC &&
- MI.getIntrinsicID() == Intrinsic::amdgcn_rcp)
- ResMI = MRI.getVRegDef(MI.getOperand(2).getReg());
-
+ if (auto *GI = dyn_cast<GIntrinsic>(&MI)) {
+ if (GI->is(Intrinsic::amdgcn_rcp))
+ ResMI = MRI.getVRegDef(MI.getOperand(2).getReg());
+ }
return ResMI;
};
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 0203af32e3891c..13441a6f9fcb93 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -337,7 +337,7 @@ AMDGPURegisterBankInfo::addMappingFromTable(
RegisterBankInfo::InstructionMappings
AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsic(
const MachineInstr &MI, const MachineRegisterInfo &MRI) const {
- switch (MI.getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
case Intrinsic::amdgcn_readlane: {
static const OpRegBankEntry<3> Table[2] = {
// Perfectly legal.
@@ -378,7 +378,7 @@ RegisterBankInfo::InstructionMappings
AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsicWSideEffects(
const MachineInstr &MI, const MachineRegisterInfo &MRI) const {
- switch (MI.getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
case Intrinsic::amdgcn_s_buffer_load: {
static const OpRegBankEntry<2> Table[4] = {
// Perfectly legal.
@@ -2949,7 +2949,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
return;
}
case AMDGPU::G_INTRINSIC: {
- switch (MI.getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
case Intrinsic::amdgcn_readlane: {
substituteSimpleCopyRegs(OpdMapper, 2);
@@ -3019,8 +3019,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
- const AMDGPU::RsrcIntrinsic *RSrcIntrin
- = AMDGPU::lookupRsrcIntrinsic(MI.getIntrinsicID());
+ const AMDGPU::RsrcIntrinsic *RSrcIntrin =
+ AMDGPU::lookupRsrcIntrinsic(AMDGPU::getIntrinsicID(MI));
assert(RSrcIntrin && RSrcIntrin->IsImage);
// Non-images can have complications from operands that allow both SGPR
// and VGPR. For now it's too complicated to figure out the final opcode
@@ -3035,7 +3035,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
return;
}
case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
- auto IntrID = MI.getIntrinsicID();
+ auto IntrID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrID) {
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap: {
@@ -4198,7 +4198,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
case AMDGPU::G_INTRINSIC: {
- switch (MI.getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
default:
return getInvalidInstructionMapping();
case Intrinsic::amdgcn_div_fmas:
@@ -4531,7 +4531,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
- auto IntrID = MI.getIntrinsicID();
+ auto IntrID = AMDGPU::getIntrinsicID(MI);
const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(IntrID);
assert(RSrcIntrin && "missing RsrcIntrinsic for image intrinsic");
// Non-images can have complications from operands that allow both SGPR
@@ -4560,7 +4560,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
break;
}
case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
- auto IntrID = MI.getIntrinsicID();
+ auto IntrID = cast<GIntrinsic>(MI).getIntrinsicID();
switch (IntrID) {
case Intrinsic::amdgcn_s_getreg:
case Intrinsic::amdgcn_s_memtime:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3148f49ff0d530..77a2d507c270f9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -28,6 +28,7 @@
#include "llvm/CodeGen/ByteProvider.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -11302,7 +11303,7 @@ bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF,
return false;
return true;
case AMDGPU::G_INTRINSIC:
- switch (MI->getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI)->getIntrinsicID()) {
case Intrinsic::amdgcn_fmul_legacy:
case Intrinsic::amdgcn_fmad_ftz:
case Intrinsic::amdgcn_sqrt:
@@ -13736,7 +13737,7 @@ void SITargetLowering::computeKnownBitsForTargetInstr(
const MachineInstr *MI = MRI.getVRegDef(R);
switch (MI->getOpcode()) {
case AMDGPU::G_INTRINSIC: {
- switch (MI->getIntrinsicID()) {
+ switch (cast<GIntrinsic>(MI)->getIntrinsicID()) {
case Intrinsic::amdgcn_workitem_id_x:
knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0);
break;
@@ -13801,21 +13802,17 @@ Align SITargetLowering::computeKnownAlignForTargetInstr(
GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI,
unsigned Depth) const {
const MachineInstr *MI = MRI.getVRegDef(R);
- switch (MI->getOpcode()) {
- case AMDGPU::G_INTRINSIC:
- case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: {
+ if (auto *GI = dyn_cast<GIntrinsic>(MI)) {
// FIXME: Can this move to generic code? What about the case where the call
// site specifies a lower alignment?
- Intrinsic::ID IID = MI->getIntrinsicID();
+ Intrinsic::ID IID = GI->getIntrinsicID();
LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext();
AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID);
if (MaybeAlign RetAlign = Attrs.getRetAlignment())
return *RetAlign;
return Align(1);
}
- default:
- return Align(1);
- }
+ return Align(1);
}
Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 278cf2b69ee37d..8b902aeda56119 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -18,6 +18,7 @@
#include "GCNSubtarget.h"
#include "SIMachineFunctionInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -8603,9 +8604,8 @@ unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
InstructionUniformity
SIInstrInfo::getGenericInstructionUniformity(const MachineInstr &MI) const {
unsigned opcode = MI.getOpcode();
- if (opcode == AMDGPU::G_INTRINSIC ||
- opcode == AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS) {
- auto IID = static_cast<Intrinsic::ID>(MI.getIntrinsicID());
+ if (auto *GI = dyn_cast<GIntrinsic>(&MI)) {
+ auto IID = GI->getIntrinsicID();
if (AMDGPU::isIntrinsicSourceOfDivergence(IID))
return InstructionUniformity::NeverUniform;
if (AMDGPU::isIntrinsicAlwaysUniform(IID))
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 2fc7342458de57..7f00bb4a6c43fc 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -21,6 +21,7 @@
#include "SPIRVUtils.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -1314,7 +1315,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
MachineBasicBlock &BB = *I.getParent();
- switch (I.getIntrinsicID()) {
+ switch (cast<GIntrinsic>(I).getIntrinsicID()) {
case Intrinsic::spv_load:
return selectLoad(ResVReg, ResType, I);
case Intrinsic::spv_store:
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index f4f3cdce1ac3c1..3ef90f9fe8c2b7 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -15,6 +15,7 @@
#include "SPIRV.h"
#include "SPIRVInstrInfo.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -209,13 +210,14 @@ SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
const MachineRegisterInfo *MRI) {
MachineInstr *ConstInstr = MRI->getVRegDef(ConstReg);
- if (ConstInstr->getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
- ConstInstr->getIntrinsicID() == Intrinsic::spv_track_constant) {
- ConstReg = ConstInstr->getOperand(2).getReg();
- ConstInstr = MRI->getVRegDef(ConstReg);
+ if (auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
+ if (GI->is(Intrinsic::spv_track_constant)) {
+ ConstReg = ConstInstr->getOperand(2).getReg();
+ return MRI->getVRegDef(ConstReg);
+ }
} else if (ConstInstr->getOpcode() == SPIRV::ASSIGN_TYPE) {
ConstReg = ConstInstr->getOperand(1).getReg();
- ConstInstr = MRI->getVRegDef(ConstReg);
+ return MRI->getVRegDef(ConstReg);
}
return ConstInstr;
}
@@ -227,8 +229,9 @@ uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI) {
}
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID) {
- return MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
- MI.getIntrinsicID() == IntrinsicID;
+ if (auto *GI = dyn_cast<GIntrinsic>(&MI))
+ return GI->is(IntrinsicID);
+ return false;
}
Type *getMDOperandAsType(const MDNode *N, unsigned I) {
More information about the llvm-commits
mailing list