[llvm] [AMDGPU] Move renamedInGFX9 from TableGen to SIInstrInfo helper function/macro to free up a bit slot (PR #82787)
Corbin Robeck via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 12 07:54:52 PDT 2024
https://github.com/CRobeck updated https://github.com/llvm/llvm-project/pull/82787
>From 52e6a50d1338ae5df23786e4fc054e88f926d01b Mon Sep 17 00:00:00 2001
From: Corbin Robeck <corbin.robeck at amd.com>
Date: Thu, 22 Feb 2024 13:12:11 -0600
Subject: [PATCH 1/4] remove renamedInGFX9 bit and move check into SIInstrInfo
helper function
---
llvm/lib/Target/AMDGPU/SIInstrFormats.td | 8 +--
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 73 +++++++++++++++++++-
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 2 +
llvm/lib/Target/AMDGPU/VOP2Instructions.td | 78 ++++++++--------------
llvm/lib/Target/AMDGPU/VOP3Instructions.td | 42 ++++++------
5 files changed, 125 insertions(+), 78 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
index 327eb89efcb88c..e6674a18000b69 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
@@ -84,10 +84,6 @@ class InstSI <dag outs, dag ins, string asm = "",
// Is it possible for this instruction to be atomic?
field bit maybeAtomic = 1;
- // This bit indicates that this is a VI instruction which is renamed
- // in GFX9. Required for correct mapping from pseudo to MC.
- field bit renamedInGFX9 = 0;
-
// This bit indicates that this has a floating point result type, so
// the clamp modifier has floating point semantics.
field bit FPClamp = 0;
@@ -214,7 +210,9 @@ class InstSI <dag outs, dag ins, string asm = "",
let TSFlags{42} = VOP3_OPSEL;
let TSFlags{43} = maybeAtomic;
- let TSFlags{44} = renamedInGFX9;
+
+ // Reserved, must be 0.
+ let TSFlags{44} = 0;
let TSFlags{45} = FPClamp;
let TSFlags{46} = IntClamp;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index edd87e340d10d2..501e7607066224 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -9134,14 +9134,83 @@ bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
}
}
+bool SIInstrInfo::isRenamedInGFX9(int Opcode) const {
+ switch(Opcode) {
+ case AMDGPU::V_ADDC_U32_dpp:
+ case AMDGPU::V_ADDC_U32_e32:
+ case AMDGPU::V_ADDC_U32_e64:
+ case AMDGPU::V_ADDC_U32_e64_dpp:
+ case AMDGPU::V_ADDC_U32_sdwa:
+ //
+ case AMDGPU::V_ADD_CO_U32_dpp:
+ case AMDGPU::V_ADD_CO_U32_e32:
+ case AMDGPU::V_ADD_CO_U32_e64:
+ case AMDGPU::V_ADD_CO_U32_e64_dpp:
+ case AMDGPU::V_ADD_CO_U32_sdwa:
+ //
+ case AMDGPU::V_ADD_U32_dpp:
+ case AMDGPU::V_ADD_U32_e32:
+ case AMDGPU::V_ADD_U32_e64:
+ case AMDGPU::V_ADD_U32_e64_dpp:
+ case AMDGPU::V_ADD_U32_sdwa:
+ //
+ case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
+ case AMDGPU::V_FMA_F16_gfx9_e64:
+ case AMDGPU::V_INTERP_P2_F16:
+ case AMDGPU::V_MAD_F16_e64:
+ case AMDGPU::V_MAD_U16_e64:
+ case AMDGPU::V_MAD_I16_e64:
+ //
+ case AMDGPU::V_SUBBREV_U32_dpp:
+ case AMDGPU::V_SUBBREV_U32_e32:
+ case AMDGPU::V_SUBBREV_U32_e64:
+ case AMDGPU::V_SUBBREV_U32_e64_dpp:
+ case AMDGPU::V_SUBBREV_U32_sdwa:
+ //
+ case AMDGPU::V_SUBB_U32_dpp:
+ case AMDGPU::V_SUBB_U32_e32:
+ case AMDGPU::V_SUBB_U32_e64:
+ case AMDGPU::V_SUBB_U32_e64_dpp:
+ case AMDGPU::V_SUBB_U32_sdwa:
+ //
+ case AMDGPU::V_SUBREV_CO_U32_dpp:
+ case AMDGPU::V_SUBREV_CO_U32_e32:
+ case AMDGPU::V_SUBREV_CO_U32_e64:
+ case AMDGPU::V_SUBREV_CO_U32_e64_dpp:
+ case AMDGPU::V_SUBREV_CO_U32_sdwa:
+ //
+ case AMDGPU::V_SUBREV_U32_dpp:
+ case AMDGPU::V_SUBREV_U32_e32:
+ case AMDGPU::V_SUBREV_U32_e64:
+ case AMDGPU::V_SUBREV_U32_e64_dpp:
+ case AMDGPU::V_SUBREV_U32_sdwa:
+ //
+ case AMDGPU::V_SUB_CO_U32_dpp:
+ case AMDGPU::V_SUB_CO_U32_e32:
+ case AMDGPU::V_SUB_CO_U32_e64:
+ case AMDGPU::V_SUB_CO_U32_e64_dpp:
+ case AMDGPU::V_SUB_CO_U32_sdwa:
+ //
+ case AMDGPU::V_SUB_U32_dpp:
+ case AMDGPU::V_SUB_U32_e32:
+ case AMDGPU::V_SUB_U32_e64:
+ case AMDGPU::V_SUB_U32_e64_dpp:
+ case AMDGPU::V_SUB_U32_sdwa:
+ return true;
+ default:
+ return false;
+ }
+}
+
int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
Opcode = SIInstrInfo::getNonSoftWaitcntOpcode(Opcode);
unsigned Gen = subtargetEncodingFamily(ST);
- if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 &&
- ST.getGeneration() == AMDGPUSubtarget::GFX9)
+ if (isRenamedInGFX9(Opcode) &&
+ ST.getGeneration() == AMDGPUSubtarget::GFX9){
Gen = SIEncodingFamily::GFX9;
+ }
// Adjust the encoding family to GFX80 for D16 buffer instructions when the
// subtarget has UnpackedD16VMem feature.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index dab2cb2946ac97..06e79c4bba8d82 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1339,6 +1339,8 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
/// Return true if this opcode should not be used by codegen.
bool isAsmOnlyOpcode(int MCOp) const;
+ bool isRenamedInGFX9(int Opcode) const;
+
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF)
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 13fe79b4759608..8c5212bb58eca4 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -141,26 +141,21 @@ class getVOP2Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies {
multiclass VOP2Inst_e32<string opName,
VOPProfile P,
SDPatternOperator node = null_frag,
- string revOp = opName,
- bit GFX9Renamed = 0> {
- let renamedInGFX9 = GFX9Renamed in {
+ string revOp = opName> {
def _e32 : VOP2_Pseudo <opName, P, VOPPatOrNull<node,P>.ret>,
Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
- } // End renamedInGFX9 = GFX9Renamed
}
multiclass
VOP2Inst_e32_VOPD<string opName, VOPProfile P, bits<5> VOPDOp,
string VOPDName, SDPatternOperator node = null_frag,
- string revOp = opName, bit GFX9Renamed = 0> {
- defm NAME : VOP2Inst_e32<opName, P, node, revOp, GFX9Renamed>,
+ string revOp = opName> {
+ defm NAME : VOP2Inst_e32<opName, P, node, revOp>,
VOPD_Component<VOPDOp, VOPDName>;
}
multiclass VOP2Inst_e64<string opName,
VOPProfile P,
SDPatternOperator node = null_frag,
- string revOp = opName,
- bit GFX9Renamed = 0> {
- let renamedInGFX9 = GFX9Renamed in {
+ string revOp = opName> {
def _e64 : VOP3InstBase <opName, P, node, 1>,
Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
@@ -168,45 +163,37 @@ multiclass VOP2Inst_e64<string opName,
if P.HasExtVOP3DPP then
def _e64_dpp : VOP3_DPP_Pseudo <opName, P>;
} // End SubtargetPredicate = isGFX11Plus
- } // End renamedInGFX9 = GFX9Renamed
}
multiclass VOP2Inst_sdwa<string opName,
- VOPProfile P,
- bit GFX9Renamed = 0> {
- let renamedInGFX9 = GFX9Renamed in {
+ VOPProfile P> {
if P.HasExtSDWA then
def _sdwa : VOP2_SDWA_Pseudo <opName, P>;
- } // End renamedInGFX9 = GFX9Renamed
}
multiclass VOP2Inst<string opName,
VOPProfile P,
SDPatternOperator node = null_frag,
- string revOp = opName,
- bit GFX9Renamed = 0> :
- VOP2Inst_e32<opName, P, node, revOp, GFX9Renamed>,
- VOP2Inst_e64<opName, P, node, revOp, GFX9Renamed>,
- VOP2Inst_sdwa<opName, P, GFX9Renamed> {
- let renamedInGFX9 = GFX9Renamed in {
+ string revOp = opName> :
+ VOP2Inst_e32<opName, P, node, revOp>,
+ VOP2Inst_e64<opName, P, node, revOp>,
+ VOP2Inst_sdwa<opName, P> {
if P.HasExtDPP then
def _dpp : VOP2_DPP_Pseudo <opName, P>;
- }
}
multiclass VOP2Inst_t16<string opName,
VOPProfile P,
SDPatternOperator node = null_frag,
- string revOp = opName,
- bit GFX9Renamed = 0> {
+ string revOp = opName> {
let SubtargetPredicate = NotHasTrue16BitInsts, OtherPredicates = [Has16BitInsts] in {
- defm NAME : VOP2Inst<opName, P, node, revOp, GFX9Renamed>;
+ defm NAME : VOP2Inst<opName, P, node, revOp>;
}
let SubtargetPredicate = UseRealTrue16Insts in {
- defm _t16 : VOP2Inst<opName#"_t16", VOPProfile_True16<P>, node, revOp#"_t16", GFX9Renamed>;
+ defm _t16 : VOP2Inst<opName#"_t16", VOPProfile_True16<P>, node, revOp#"_t16">;
}
let SubtargetPredicate = UseFakeTrue16Insts in {
- defm _fake16 : VOP2Inst<opName#"_fake16", VOPProfile_Fake16<P>, node, revOp#"_fake16", GFX9Renamed>;
+ defm _fake16 : VOP2Inst<opName#"_fake16", VOPProfile_Fake16<P>, node, revOp#"_fake16">;
}
}
@@ -217,13 +204,12 @@ multiclass VOP2Inst_t16<string opName,
multiclass VOP2Inst_e64_t16<string opName,
VOPProfile P,
SDPatternOperator node = null_frag,
- string revOp = opName,
- bit GFX9Renamed = 0> {
+ string revOp = opName> {
let SubtargetPredicate = NotHasTrue16BitInsts, OtherPredicates = [Has16BitInsts] in {
- defm NAME : VOP2Inst<opName, P, node, revOp, GFX9Renamed>;
+ defm NAME : VOP2Inst<opName, P, node, revOp>;
}
let SubtargetPredicate = HasTrue16BitInsts in {
- defm _t16 : VOP2Inst_e64<opName#"_t16", VOPProfile_Fake16<P>, node, revOp#"_t16", GFX9Renamed>;
+ defm _t16 : VOP2Inst_e64<opName#"_t16", VOPProfile_Fake16<P>, node, revOp#"_t16">;
}
}
@@ -232,24 +218,19 @@ multiclass VOP2Inst_VOPD<string opName,
bits<5> VOPDOp,
string VOPDName,
SDPatternOperator node = null_frag,
- string revOp = opName,
- bit GFX9Renamed = 0> :
- VOP2Inst_e32_VOPD<opName, P, VOPDOp, VOPDName, node, revOp, GFX9Renamed>,
- VOP2Inst_e64<opName, P, node, revOp, GFX9Renamed>,
- VOP2Inst_sdwa<opName, P, GFX9Renamed> {
- let renamedInGFX9 = GFX9Renamed in {
+ string revOp = opName> :
+ VOP2Inst_e32_VOPD<opName, P, VOPDOp, VOPDName, node, revOp>,
+ VOP2Inst_e64<opName, P, node, revOp>,
+ VOP2Inst_sdwa<opName, P> {
if P.HasExtDPP then
def _dpp : VOP2_DPP_Pseudo <opName, P>;
- }
}
multiclass VOP2bInst <string opName,
VOPProfile P,
SDPatternOperator node = null_frag,
string revOp = opName,
- bit GFX9Renamed = 0,
bit useSGPRInput = !eq(P.NumSrcArgs, 3)> {
- let renamedInGFX9 = GFX9Renamed in {
let SchedRW = [Write32Bit, WriteSALU] in {
let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]), Defs = [VCC] in {
def _e32 : VOP2_Pseudo <opName, P, VOPPatOrNull<node,P>.ret>,
@@ -273,7 +254,6 @@ multiclass VOP2bInst <string opName,
def _e64_dpp : VOP3_DPP_Pseudo <opName, P>;
} // End SubtargetPredicate = isGFX11Plus
}
- }
}
class VOP2bInstAlias <VOP2_Pseudo ps, Instruction inst,
@@ -751,18 +731,18 @@ def V_MADAK_F32 : VOP2_Pseudo <"v_madak_f32", VOP_MADAK_F32, []>;
// No patterns so that the scalar instructions are always selected.
// The scalar versions will be replaced with vector when needed later.
-defm V_ADD_CO_U32 : VOP2bInst <"v_add_co_u32", VOP2b_I32_I1_I32_I32, null_frag, "v_add_co_u32", 1>;
-defm V_SUB_CO_U32 : VOP2bInst <"v_sub_co_u32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_co_u32", 1>;
-defm V_SUBREV_CO_U32 : VOP2bInst <"v_subrev_co_u32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_co_u32", 1>;
-defm V_ADDC_U32 : VOP2bInst <"v_addc_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_addc_u32", 1>;
-defm V_SUBB_U32 : VOP2bInst <"v_subb_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32", 1>;
-defm V_SUBBREV_U32 : VOP2bInst <"v_subbrev_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32", 1>;
+defm V_ADD_CO_U32 : VOP2bInst <"v_add_co_u32", VOP2b_I32_I1_I32_I32, null_frag, "v_add_co_u32">;
+defm V_SUB_CO_U32 : VOP2bInst <"v_sub_co_u32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_co_u32">;
+defm V_SUBREV_CO_U32 : VOP2bInst <"v_subrev_co_u32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_co_u32">;
+defm V_ADDC_U32 : VOP2bInst <"v_addc_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_addc_u32">;
+defm V_SUBB_U32 : VOP2bInst <"v_subb_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32">;
+defm V_SUBBREV_U32 : VOP2bInst <"v_subbrev_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32">;
let SubtargetPredicate = HasAddNoCarryInsts, isReMaterializable = 1 in {
-defm V_ADD_U32 : VOP2Inst_VOPD <"v_add_u32", VOP_I32_I32_I32_ARITH, 0x10, "v_add_nc_u32", null_frag, "v_add_u32", 1>;
-defm V_SUB_U32 : VOP2Inst <"v_sub_u32", VOP_I32_I32_I32_ARITH, null_frag, "v_sub_u32", 1>;
-defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32_ARITH, null_frag, "v_sub_u32", 1>;
+defm V_ADD_U32 : VOP2Inst_VOPD <"v_add_u32", VOP_I32_I32_I32_ARITH, 0x10, "v_add_nc_u32", null_frag, "v_add_u32">;
+defm V_SUB_U32 : VOP2Inst <"v_sub_u32", VOP_I32_I32_I32_ARITH, null_frag, "v_sub_u32">;
+defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32_ARITH, null_frag, "v_sub_u32">;
}
} // End isCommutable = 1
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 334cfad478f151..d5d0cd5091289d 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -333,35 +333,33 @@ let FPDPRounding = 1 in {
defm V_FMA_F16 : VOP3Inst <"v_fma_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, any_fma>;
} // End Predicates = [Has16BitInsts, isGFX8Only]
- let renamedInGFX9 = 1, SubtargetPredicate = isGFX9Plus in {
+ let SubtargetPredicate = isGFX9Plus in {
defm V_DIV_FIXUP_F16_gfx9 : VOP3Inst <"v_div_fixup_f16_gfx9",
VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, AMDGPUdiv_fixup>;
defm V_FMA_F16_gfx9 : VOP3Inst <"v_fma_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>, any_fma>;
- } // End renamedInGFX9 = 1, SubtargetPredicate = isGFX9Plus
+ } // End SubtargetPredicate = isGFX9Plus
} // End FPDPRounding = 1
let SubtargetPredicate = Has16BitInsts, isCommutable = 1 in {
-let renamedInGFX9 = 1 in {
- defm V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
- defm V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
- let FPDPRounding = 1 in {
- defm V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, any_fmad>;
- let Uses = [MODE, M0, EXEC] in {
- let OtherPredicates = [isNotGFX90APlus] in
- // For some reason the intrinsic operands are in a different order
- // from the instruction operands.
- def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>,
- [(set f16:$vdst,
- (int_amdgcn_interp_p2_f16 (VOP3Mods f32:$src2, i32:$src2_modifiers),
- (VOP3Mods f32:$src0, i32:$src0_modifiers),
- (i32 timm:$attrchan),
- (i32 timm:$attr),
- (i1 timm:$high),
- M0))]>;
- } // End Uses = [M0, MODE, EXEC]
- } // End FPDPRounding = 1
-} // End renamedInGFX9 = 1
+defm V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
+defm V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
+let FPDPRounding = 1 in {
+ defm V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, any_fmad>;
+ let Uses = [MODE, M0, EXEC] in {
+ let OtherPredicates = [isNotGFX90APlus] in
+ // For some reason the intrinsic operands are in a different order
+ // from the instruction operands.
+ def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>,
+ [(set f16:$vdst,
+ (int_amdgcn_interp_p2_f16 (VOP3Mods f32:$src2, i32:$src2_modifiers),
+ (VOP3Mods f32:$src0, i32:$src0_modifiers),
+ (i32 timm:$attrchan),
+ (i32 timm:$attr),
+ (i1 timm:$high),
+ M0))]>;
+ } // End Uses = [M0, MODE, EXEC]
+} // End FPDPRounding = 1
let SubtargetPredicate = isGFX9Only, FPDPRounding = 1 in {
defm V_MAD_F16_gfx9 : VOP3Inst <"v_mad_f16_gfx9", VOP3_Profile<VOP_F16_F16_F16_F16, VOP3_OPSEL>> ;
>From a67713d956ee20384898f096a0b35430e173d703 Mon Sep 17 00:00:00 2001
From: Corbin Robeck <corbin.robeck at amd.com>
Date: Fri, 23 Feb 2024 08:56:39 -0600
Subject: [PATCH 2/4] formatting
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 127 ++++++++++++-------------
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 2 +-
2 files changed, 64 insertions(+), 65 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 501e7607066224..2b7b6040afb4b7 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -9135,67 +9135,67 @@ bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
}
bool SIInstrInfo::isRenamedInGFX9(int Opcode) const {
- switch(Opcode) {
- case AMDGPU::V_ADDC_U32_dpp:
- case AMDGPU::V_ADDC_U32_e32:
- case AMDGPU::V_ADDC_U32_e64:
- case AMDGPU::V_ADDC_U32_e64_dpp:
- case AMDGPU::V_ADDC_U32_sdwa:
- //
- case AMDGPU::V_ADD_CO_U32_dpp:
- case AMDGPU::V_ADD_CO_U32_e32:
- case AMDGPU::V_ADD_CO_U32_e64:
- case AMDGPU::V_ADD_CO_U32_e64_dpp:
- case AMDGPU::V_ADD_CO_U32_sdwa:
- //
- case AMDGPU::V_ADD_U32_dpp:
- case AMDGPU::V_ADD_U32_e32:
- case AMDGPU::V_ADD_U32_e64:
- case AMDGPU::V_ADD_U32_e64_dpp:
- case AMDGPU::V_ADD_U32_sdwa:
- //
- case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
- case AMDGPU::V_FMA_F16_gfx9_e64:
- case AMDGPU::V_INTERP_P2_F16:
- case AMDGPU::V_MAD_F16_e64:
- case AMDGPU::V_MAD_U16_e64:
- case AMDGPU::V_MAD_I16_e64:
- //
- case AMDGPU::V_SUBBREV_U32_dpp:
- case AMDGPU::V_SUBBREV_U32_e32:
- case AMDGPU::V_SUBBREV_U32_e64:
- case AMDGPU::V_SUBBREV_U32_e64_dpp:
- case AMDGPU::V_SUBBREV_U32_sdwa:
- //
- case AMDGPU::V_SUBB_U32_dpp:
- case AMDGPU::V_SUBB_U32_e32:
- case AMDGPU::V_SUBB_U32_e64:
- case AMDGPU::V_SUBB_U32_e64_dpp:
- case AMDGPU::V_SUBB_U32_sdwa:
- //
- case AMDGPU::V_SUBREV_CO_U32_dpp:
- case AMDGPU::V_SUBREV_CO_U32_e32:
- case AMDGPU::V_SUBREV_CO_U32_e64:
- case AMDGPU::V_SUBREV_CO_U32_e64_dpp:
- case AMDGPU::V_SUBREV_CO_U32_sdwa:
- //
- case AMDGPU::V_SUBREV_U32_dpp:
- case AMDGPU::V_SUBREV_U32_e32:
- case AMDGPU::V_SUBREV_U32_e64:
- case AMDGPU::V_SUBREV_U32_e64_dpp:
- case AMDGPU::V_SUBREV_U32_sdwa:
- //
- case AMDGPU::V_SUB_CO_U32_dpp:
- case AMDGPU::V_SUB_CO_U32_e32:
- case AMDGPU::V_SUB_CO_U32_e64:
- case AMDGPU::V_SUB_CO_U32_e64_dpp:
- case AMDGPU::V_SUB_CO_U32_sdwa:
- //
- case AMDGPU::V_SUB_U32_dpp:
- case AMDGPU::V_SUB_U32_e32:
- case AMDGPU::V_SUB_U32_e64:
- case AMDGPU::V_SUB_U32_e64_dpp:
- case AMDGPU::V_SUB_U32_sdwa:
+ switch (Opcode) {
+ case AMDGPU::V_ADDC_U32_dpp:
+ case AMDGPU::V_ADDC_U32_e32:
+ case AMDGPU::V_ADDC_U32_e64:
+ case AMDGPU::V_ADDC_U32_e64_dpp:
+ case AMDGPU::V_ADDC_U32_sdwa:
+ //
+ case AMDGPU::V_ADD_CO_U32_dpp:
+ case AMDGPU::V_ADD_CO_U32_e32:
+ case AMDGPU::V_ADD_CO_U32_e64:
+ case AMDGPU::V_ADD_CO_U32_e64_dpp:
+ case AMDGPU::V_ADD_CO_U32_sdwa:
+ //
+ case AMDGPU::V_ADD_U32_dpp:
+ case AMDGPU::V_ADD_U32_e32:
+ case AMDGPU::V_ADD_U32_e64:
+ case AMDGPU::V_ADD_U32_e64_dpp:
+ case AMDGPU::V_ADD_U32_sdwa:
+ //
+ case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
+ case AMDGPU::V_FMA_F16_gfx9_e64:
+ case AMDGPU::V_INTERP_P2_F16:
+ case AMDGPU::V_MAD_F16_e64:
+ case AMDGPU::V_MAD_U16_e64:
+ case AMDGPU::V_MAD_I16_e64:
+ //
+ case AMDGPU::V_SUBBREV_U32_dpp:
+ case AMDGPU::V_SUBBREV_U32_e32:
+ case AMDGPU::V_SUBBREV_U32_e64:
+ case AMDGPU::V_SUBBREV_U32_e64_dpp:
+ case AMDGPU::V_SUBBREV_U32_sdwa:
+ //
+ case AMDGPU::V_SUBB_U32_dpp:
+ case AMDGPU::V_SUBB_U32_e32:
+ case AMDGPU::V_SUBB_U32_e64:
+ case AMDGPU::V_SUBB_U32_e64_dpp:
+ case AMDGPU::V_SUBB_U32_sdwa:
+ //
+ case AMDGPU::V_SUBREV_CO_U32_dpp:
+ case AMDGPU::V_SUBREV_CO_U32_e32:
+ case AMDGPU::V_SUBREV_CO_U32_e64:
+ case AMDGPU::V_SUBREV_CO_U32_e64_dpp:
+ case AMDGPU::V_SUBREV_CO_U32_sdwa:
+ //
+ case AMDGPU::V_SUBREV_U32_dpp:
+ case AMDGPU::V_SUBREV_U32_e32:
+ case AMDGPU::V_SUBREV_U32_e64:
+ case AMDGPU::V_SUBREV_U32_e64_dpp:
+ case AMDGPU::V_SUBREV_U32_sdwa:
+ //
+ case AMDGPU::V_SUB_CO_U32_dpp:
+ case AMDGPU::V_SUB_CO_U32_e32:
+ case AMDGPU::V_SUB_CO_U32_e64:
+ case AMDGPU::V_SUB_CO_U32_e64_dpp:
+ case AMDGPU::V_SUB_CO_U32_sdwa:
+ //
+ case AMDGPU::V_SUB_U32_dpp:
+ case AMDGPU::V_SUB_U32_e32:
+ case AMDGPU::V_SUB_U32_e64:
+ case AMDGPU::V_SUB_U32_e64_dpp:
+ case AMDGPU::V_SUB_U32_sdwa:
return true;
default:
return false;
@@ -9207,10 +9207,9 @@ int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
unsigned Gen = subtargetEncodingFamily(ST);
- if (isRenamedInGFX9(Opcode) &&
- ST.getGeneration() == AMDGPUSubtarget::GFX9){
+ if (isRenamedInGFX9(Opcode) && ST.getGeneration() == AMDGPUSubtarget::GFX9) {
Gen = SIEncodingFamily::GFX9;
- }
+ }
// Adjust the encoding family to GFX80 for D16 buffer instructions when the
// subtarget has UnpackedD16VMem feature.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 06e79c4bba8d82..fc2ecd2d1534f5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1340,7 +1340,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
bool isAsmOnlyOpcode(int MCOp) const;
bool isRenamedInGFX9(int Opcode) const;
-
+
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF)
>From 9f311a064bbfe0e56e00d5cc0c0dcdf1837dbaf4 Mon Sep 17 00:00:00 2001
From: Corbin Robeck <corbin.robeck at amd.com>
Date: Tue, 27 Feb 2024 08:29:02 -0600
Subject: [PATCH 3/4] move isRenamedInGFX9 to be static and add helper macro
for case prefixes
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 71 ++++++--------------------
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 2 -
2 files changed, 17 insertions(+), 56 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 2b7b6040afb4b7..1e5664d4c7cade 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -9134,25 +9134,24 @@ bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
}
}
-bool SIInstrInfo::isRenamedInGFX9(int Opcode) const {
+#define GENERATE_RENAMED_GFX9_CASES(OPCODE) \
+ case OPCODE##_dpp: \
+ case OPCODE##_e32: \
+ case OPCODE##_e64: \
+ case OPCODE##_e64_dpp: \
+ case OPCODE##_sdwa:
+
+static bool isRenamedInGFX9(int Opcode) {
switch (Opcode) {
- case AMDGPU::V_ADDC_U32_dpp:
- case AMDGPU::V_ADDC_U32_e32:
- case AMDGPU::V_ADDC_U32_e64:
- case AMDGPU::V_ADDC_U32_e64_dpp:
- case AMDGPU::V_ADDC_U32_sdwa:
- //
- case AMDGPU::V_ADD_CO_U32_dpp:
- case AMDGPU::V_ADD_CO_U32_e32:
- case AMDGPU::V_ADD_CO_U32_e64:
- case AMDGPU::V_ADD_CO_U32_e64_dpp:
- case AMDGPU::V_ADD_CO_U32_sdwa:
- //
- case AMDGPU::V_ADD_U32_dpp:
- case AMDGPU::V_ADD_U32_e32:
- case AMDGPU::V_ADD_U32_e64:
- case AMDGPU::V_ADD_U32_e64_dpp:
- case AMDGPU::V_ADD_U32_sdwa:
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADDC_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADD_CO_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_ADD_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBBREV_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBB_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBREV_CO_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUBREV_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUB_CO_U32)
+ GENERATE_RENAMED_GFX9_CASES(AMDGPU::V_SUB_U32)
//
case AMDGPU::V_DIV_FIXUP_F16_gfx9_e64:
case AMDGPU::V_FMA_F16_gfx9_e64:
@@ -9160,42 +9159,6 @@ bool SIInstrInfo::isRenamedInGFX9(int Opcode) const {
case AMDGPU::V_MAD_F16_e64:
case AMDGPU::V_MAD_U16_e64:
case AMDGPU::V_MAD_I16_e64:
- //
- case AMDGPU::V_SUBBREV_U32_dpp:
- case AMDGPU::V_SUBBREV_U32_e32:
- case AMDGPU::V_SUBBREV_U32_e64:
- case AMDGPU::V_SUBBREV_U32_e64_dpp:
- case AMDGPU::V_SUBBREV_U32_sdwa:
- //
- case AMDGPU::V_SUBB_U32_dpp:
- case AMDGPU::V_SUBB_U32_e32:
- case AMDGPU::V_SUBB_U32_e64:
- case AMDGPU::V_SUBB_U32_e64_dpp:
- case AMDGPU::V_SUBB_U32_sdwa:
- //
- case AMDGPU::V_SUBREV_CO_U32_dpp:
- case AMDGPU::V_SUBREV_CO_U32_e32:
- case AMDGPU::V_SUBREV_CO_U32_e64:
- case AMDGPU::V_SUBREV_CO_U32_e64_dpp:
- case AMDGPU::V_SUBREV_CO_U32_sdwa:
- //
- case AMDGPU::V_SUBREV_U32_dpp:
- case AMDGPU::V_SUBREV_U32_e32:
- case AMDGPU::V_SUBREV_U32_e64:
- case AMDGPU::V_SUBREV_U32_e64_dpp:
- case AMDGPU::V_SUBREV_U32_sdwa:
- //
- case AMDGPU::V_SUB_CO_U32_dpp:
- case AMDGPU::V_SUB_CO_U32_e32:
- case AMDGPU::V_SUB_CO_U32_e64:
- case AMDGPU::V_SUB_CO_U32_e64_dpp:
- case AMDGPU::V_SUB_CO_U32_sdwa:
- //
- case AMDGPU::V_SUB_U32_dpp:
- case AMDGPU::V_SUB_U32_e32:
- case AMDGPU::V_SUB_U32_e64:
- case AMDGPU::V_SUB_U32_e64_dpp:
- case AMDGPU::V_SUB_U32_sdwa:
return true;
default:
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index fc2ecd2d1534f5..dab2cb2946ac97 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -1339,8 +1339,6 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
/// Return true if this opcode should not be used by codegen.
bool isAsmOnlyOpcode(int MCOp) const;
- bool isRenamedInGFX9(int Opcode) const;
-
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF)
>From 2d5d5c3c9eb1f4d5fefacf149ee8b51e42209c4b Mon Sep 17 00:00:00 2001
From: Corbin Robeck <corbin.robeck at amd.com>
Date: Fri, 1 Mar 2024 10:08:19 +0100
Subject: [PATCH 4/4] reorder conditional statement
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 1e5664d4c7cade..29b2963165ea8b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -9170,9 +9170,8 @@ int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
unsigned Gen = subtargetEncodingFamily(ST);
- if (isRenamedInGFX9(Opcode) && ST.getGeneration() == AMDGPUSubtarget::GFX9) {
+ if (ST.getGeneration() == AMDGPUSubtarget::GFX9 && isRenamedInGFX9(Opcode))
Gen = SIEncodingFamily::GFX9;
- }
// Adjust the encoding family to GFX80 for D16 buffer instructions when the
// subtarget has UnpackedD16VMem feature.
More information about the llvm-commits
mailing list