[llvm] r366234 - AMDGPU: Redefine load PatFrags
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 16 10:38:50 PDT 2019
Author: arsenm
Date: Tue Jul 16 10:38:50 2019
New Revision: 366234
URL: http://llvm.org/viewvc/llvm-project?rev=366234&view=rev
Log:
AMDGPU: Redefine load PatFrags
Rewrite PatFrags using the new PatFrag address space matching in
tablegen. These will now work with both SelectionDAG and GlobalISel.
Modified:
llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
llvm/trunk/lib/Target/AMDGPU/BUFInstructions.td
llvm/trunk/lib/Target/AMDGPU/FLATInstructions.td
llvm/trunk/lib/Target/AMDGPU/R600Instructions.td
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td?rev=366234&r1=366233&r2=366234&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td Tue Jul 16 10:38:50 2019
@@ -11,6 +11,18 @@
//
//===----------------------------------------------------------------------===//
+class AddressSpacesImpl {
+ int Flat = 0;
+ int Global = 1;
+ int Region = 2;
+ int Local = 3;
+ int Constant = 4;
+ int Private = 5;
+}
+
+def AddrSpaces : AddressSpacesImpl;
+
+
class AMDGPUInst <dag outs, dag ins, string asm = "",
list<dag> pattern = []> : Instruction {
field bit isRegisterLoad = 0;
@@ -323,6 +335,10 @@ def TEX_SHADOW_ARRAY : PatLeaf<
// Load/Store Pattern Fragments
//===----------------------------------------------------------------------===//
+class AddressSpaceList<list<int> AS> {
+ list<int> AddrSpaces = AS;
+}
+
class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
}]>;
@@ -341,25 +357,25 @@ class StoreHi16<SDPatternOperator op> :
(ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)
>;
-class PrivateAddress : CodePatPred<[{
- return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
-}]>;
+def LoadAddress_constant : AddressSpaceList<[ AddrSpaces.Constant ]>;
+def LoadAddress_global : AddressSpaceList<[ AddrSpaces.Global, AddrSpaces.Constant ]>;
+def StoreAddress_global : AddressSpaceList<[ AddrSpaces.Global ]>;
+
+def LoadAddress_flat : AddressSpaceList<[ AddrSpaces.Flat,
+ AddrSpaces.Global,
+ AddrSpaces.Constant ]>;
+def StoreAddress_flat : AddressSpaceList<[ AddrSpaces.Flat, AddrSpaces.Global ]>;
-class ConstantAddress : CodePatPred<[{
- return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
-}]>;
+def LoadAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
+def StoreAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
-class LocalAddress : CodePatPred<[{
- return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
-}]>;
+def LoadAddress_local : AddressSpaceList<[ AddrSpaces.Local ]>;
+def StoreAddress_local : AddressSpaceList<[ AddrSpaces.Local ]>;
+
+def LoadAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>;
+def StoreAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>;
-class RegionAddress : CodePatPred<[{
- return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
-}]>;
-class GlobalAddress : CodePatPred<[{
- return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
-}]>;
class GlobalLoadAddress : CodePatPred<[{
auto AS = cast<MemSDNode>(N)->getAddressSpace();
@@ -373,37 +389,86 @@ class FlatLoadAddress : CodePatPred<[{
AS == AMDGPUAS::CONSTANT_ADDRESS;
}]>;
+class GlobalAddress : CodePatPred<[{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
+}]>;
+
+class PrivateAddress : CodePatPred<[{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
+}]>;
+
+class LocalAddress : CodePatPred<[{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+class RegionAddress : CodePatPred<[{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
+}]>;
+
class FlatStoreAddress : CodePatPred<[{
const auto AS = cast<MemSDNode>(N)->getAddressSpace();
return AS == AMDGPUAS::FLAT_ADDRESS ||
AS == AMDGPUAS::GLOBAL_ADDRESS;
}]>;
-class PrivateLoad <SDPatternOperator op> : LoadFrag <op>, PrivateAddress;
+// TODO: Remove these when stores to new PatFrag format.
class PrivateStore <SDPatternOperator op> : StoreFrag <op>, PrivateAddress;
-
-class LocalLoad <SDPatternOperator op> : LoadFrag <op>, LocalAddress;
class LocalStore <SDPatternOperator op> : StoreFrag <op>, LocalAddress;
-
-class RegionLoad <SDPatternOperator op> : LoadFrag <op>, RegionAddress;
class RegionStore <SDPatternOperator op> : StoreFrag <op>, RegionAddress;
-
-class GlobalLoad <SDPatternOperator op> : LoadFrag<op>, GlobalLoadAddress;
class GlobalStore <SDPatternOperator op> : StoreFrag<op>, GlobalAddress;
-
-class FlatLoad <SDPatternOperator op> : LoadFrag <op>, FlatLoadAddress;
class FlatStore <SDPatternOperator op> : StoreFrag <op>, FlatStoreAddress;
-class ConstantLoad <SDPatternOperator op> : LoadFrag <op>, ConstantAddress;
+foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in {
+let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in {
+
+def load_#as : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
+ let IsLoad = 1;
+ let IsNonExtLoad = 1;
+}
+
+def extloadi8_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+ let IsLoad = 1;
+ let MemoryVT = i8;
+}
+
+def extloadi16_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+ let IsLoad = 1;
+ let MemoryVT = i16;
+}
+
+def sextloadi8_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+ let IsLoad = 1;
+ let MemoryVT = i8;
+}
+
+def sextloadi16_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+ let IsLoad = 1;
+ let MemoryVT = i16;
+}
+
+def zextloadi8_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+ let IsLoad = 1;
+ let MemoryVT = i8;
+}
+
+def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+ let IsLoad = 1;
+ let MemoryVT = i16;
+}
+
+def atomic_load_32_#as : PatFrag<(ops node:$ptr), (atomic_load_32 node:$ptr)> {
+ let IsAtomic = 1;
+ let MemoryVT = i32;
+}
+
+def atomic_load_64_#as : PatFrag<(ops node:$ptr), (atomic_load_64 node:$ptr)> {
+ let IsAtomic = 1;
+ let MemoryVT = i64;
+}
-def load_private : PrivateLoad <load>;
-def extloadi8_private : PrivateLoad <extloadi8>;
-def zextloadi8_private : PrivateLoad <zextloadi8>;
-def sextloadi8_private : PrivateLoad <sextloadi8>;
-def extloadi16_private : PrivateLoad <extloadi16>;
-def zextloadi16_private : PrivateLoad <zextloadi16>;
-def sextloadi16_private : PrivateLoad <sextloadi16>;
+} // End let AddressSpaces = ...
+} // End foreach AddrSpace
def store_private : PrivateStore <store>;
def truncstorei8_private : PrivateStore<truncstorei8>;
@@ -411,16 +476,6 @@ def truncstorei16_private : PrivateStore
def store_hi16_private : StoreHi16 <truncstorei16>, PrivateAddress;
def truncstorei8_hi16_private : StoreHi16<truncstorei8>, PrivateAddress;
-
-def load_global : GlobalLoad <load>;
-def sextloadi8_global : GlobalLoad <sextloadi8>;
-def extloadi8_global : GlobalLoad <extloadi8>;
-def zextloadi8_global : GlobalLoad <zextloadi8>;
-def sextloadi16_global : GlobalLoad <sextloadi16>;
-def extloadi16_global : GlobalLoad <extloadi16>;
-def zextloadi16_global : GlobalLoad <zextloadi16>;
-def atomic_load_global : GlobalLoad<atomic_load>;
-
def store_global : GlobalStore <store>;
def truncstorei8_global : GlobalStore <truncstorei8>;
def truncstorei16_global : GlobalStore <truncstorei16>;
@@ -428,16 +483,6 @@ def store_atomic_global : GlobalStore<at
def truncstorei8_hi16_global : StoreHi16 <truncstorei8>, GlobalAddress;
def truncstorei16_hi16_global : StoreHi16 <truncstorei16>, GlobalAddress;
-def load_local : LocalLoad <load>;
-def extloadi8_local : LocalLoad <extloadi8>;
-def zextloadi8_local : LocalLoad <zextloadi8>;
-def sextloadi8_local : LocalLoad <sextloadi8>;
-def extloadi16_local : LocalLoad <extloadi16>;
-def zextloadi16_local : LocalLoad <zextloadi16>;
-def sextloadi16_local : LocalLoad <sextloadi16>;
-def atomic_load_32_local : LocalLoad<atomic_load_32>;
-def atomic_load_64_local : LocalLoad<atomic_load_64>;
-
def store_local : LocalStore <store>;
def truncstorei8_local : LocalStore <truncstorei8>;
def truncstorei16_local : LocalStore <truncstorei16>;
@@ -461,15 +506,6 @@ def store_align16_local : Aligned16Bytes
(ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
>;
-def load_flat : FlatLoad <load>;
-def extloadi8_flat : FlatLoad <extloadi8>;
-def zextloadi8_flat : FlatLoad <zextloadi8>;
-def sextloadi8_flat : FlatLoad <sextloadi8>;
-def extloadi16_flat : FlatLoad <extloadi16>;
-def zextloadi16_flat : FlatLoad <zextloadi16>;
-def sextloadi16_flat : FlatLoad <sextloadi16>;
-def atomic_load_flat : FlatLoad<atomic_load>;
-
def store_flat : FlatStore <store>;
def truncstorei8_flat : FlatStore <truncstorei8>;
def truncstorei16_flat : FlatStore <truncstorei16>;
@@ -478,15 +514,6 @@ def truncstorei8_hi16_flat : StoreHi16<
def truncstorei16_hi16_flat : StoreHi16<truncstorei16>, FlatStoreAddress;
-def constant_load : ConstantLoad<load>;
-def sextloadi8_constant : ConstantLoad <sextloadi8>;
-def extloadi8_constant : ConstantLoad <extloadi8>;
-def zextloadi8_constant : ConstantLoad <zextloadi8>;
-def sextloadi16_constant : ConstantLoad <sextloadi16>;
-def extloadi16_constant : ConstantLoad <extloadi16>;
-def zextloadi16_constant : ConstantLoad <zextloadi16>;
-
-
class local_binary_atomic_op<SDNode atomic_op> :
PatFrag<(ops node:$ptr, node:$value),
(atomic_op node:$ptr, node:$value), [{
Modified: llvm/trunk/lib/Target/AMDGPU/BUFInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/BUFInstructions.td?rev=366234&r1=366233&r2=366234&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/BUFInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/BUFInstructions.td Tue Jul 16 10:38:50 2019
@@ -1445,8 +1445,8 @@ def : MUBUFLoad_PatternADDR64 <BUFFER_LO
def : MUBUFLoad_PatternADDR64 <BUFFER_LOAD_USHORT_ADDR64, i32, extloadi16_constant>;
def : MUBUFLoad_PatternADDR64 <BUFFER_LOAD_USHORT_ADDR64, i32, zextloadi16_constant>;
-defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORD_ADDR64, BUFFER_LOAD_DWORD_OFFSET, i32, atomic_load_global>;
-defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, BUFFER_LOAD_DWORDX2_OFFSET, i64, atomic_load_global>;
+defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORD_ADDR64, BUFFER_LOAD_DWORD_OFFSET, i32, atomic_load_32_global>;
+defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, BUFFER_LOAD_DWORDX2_OFFSET, i64, atomic_load_64_global>;
} // End SubtargetPredicate = isGFX6GFX7
multiclass MUBUFLoad_Pattern <MUBUF_Pseudo Instr_OFFSET, ValueType vt,
Modified: llvm/trunk/lib/Target/AMDGPU/FLATInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/FLATInstructions.td?rev=366234&r1=366233&r2=366234&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/FLATInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/FLATInstructions.td Tue Jul 16 10:38:50 2019
@@ -782,8 +782,8 @@ def : FlatLoadPat <FLAT_LOAD_DWORDX2, lo
def : FlatLoadPat <FLAT_LOAD_DWORDX3, load_flat, v3i32>;
def : FlatLoadPat <FLAT_LOAD_DWORDX4, load_flat, v4i32>;
-def : FlatLoadAtomicPat <FLAT_LOAD_DWORD, atomic_load_flat, i32>;
-def : FlatLoadAtomicPat <FLAT_LOAD_DWORDX2, atomic_load_flat, i64>;
+def : FlatLoadAtomicPat <FLAT_LOAD_DWORD, atomic_load_32_flat, i32>;
+def : FlatLoadAtomicPat <FLAT_LOAD_DWORDX2, atomic_load_64_flat, i64>;
def : FlatStorePat <FLAT_STORE_BYTE, truncstorei8_flat, i32>;
def : FlatStorePat <FLAT_STORE_SHORT, truncstorei16_flat, i32>;
@@ -868,8 +868,8 @@ def : FlatLoadSignedPat <GLOBAL_LOAD_DWO
def : FlatLoadSignedPat <GLOBAL_LOAD_DWORDX3, load_global, v3i32>;
def : FlatLoadSignedPat <GLOBAL_LOAD_DWORDX4, load_global, v4i32>;
-def : FlatLoadAtomicPat <GLOBAL_LOAD_DWORD, atomic_load_global, i32>;
-def : FlatLoadAtomicPat <GLOBAL_LOAD_DWORDX2, atomic_load_global, i64>;
+def : FlatLoadAtomicPat <GLOBAL_LOAD_DWORD, atomic_load_32_global, i32>;
+def : FlatLoadAtomicPat <GLOBAL_LOAD_DWORDX2, atomic_load_64_global, i64>;
def : FlatStoreSignedPat <GLOBAL_STORE_BYTE, truncstorei8_global, i32>;
def : FlatStoreSignedPat <GLOBAL_STORE_BYTE, truncstorei8_global, i16>;
Modified: llvm/trunk/lib/Target/AMDGPU/R600Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Instructions.td?rev=366234&r1=366233&r2=366234&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600Instructions.td Tue Jul 16 10:38:50 2019
@@ -296,6 +296,8 @@ class VTX_READ <string name, dag outs, l
}
// FIXME: Deprecated.
+class LocalLoad <SDPatternOperator op> : LoadFrag <op>, LocalAddress;
+
class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr),
(ld_node node:$ptr), [{
LoadSDNode *L = cast<LoadSDNode>(N);
More information about the llvm-commits
mailing list