[llvm] r367373 - TableGen: Add MinAlignment predicate

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 30 17:14:43 PDT 2019


Author: arsenm
Date: Tue Jul 30 17:14:43 2019
New Revision: 367373

URL: http://llvm.org/viewvc/llvm-project?rev=367373&view=rev
Log:
TableGen: Add MinAlignment predicate

AMDGPU uses some custom code predicates for testing alignments.

I'm still having trouble comprehending the behavior of predicate bits
in the PatFrag hierarchy. Any attempt to abstract these properties
unexpectdly fails to apply them.

Modified:
    llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
    llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
    llvm/trunk/include/llvm/Target/TargetSelectionDAG.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
    llvm/trunk/test/TableGen/address-space-patfrags.td
    llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp
    llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h
    llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp

Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelector.h Tue Jul 30 17:14:43 2019
@@ -148,6 +148,13 @@ enum {
   /// - AddrSpaceN+1 ...
   GIM_CheckMemoryAddressSpace,
 
+  /// Check the minimum alignment of the memory access for the given machine
+  /// memory operand.
+  /// - InsnID - Instruction ID
+  /// - MMOIdx - MMO index
+  /// - MinAlign - Minimum acceptable alignment
+  GIM_CheckMemoryAlignment,
+
   /// Check the size of the memory access for the given machine memory operand
   /// against the size of an operand.
   /// - InsnID - Instruction ID

Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h Tue Jul 30 17:14:43 2019
@@ -409,6 +409,30 @@ bool InstructionSelector::executeMatchTa
         return false;
       break;
     }
+    case GIM_CheckMemoryAlignment: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t MMOIdx = MatchTable[CurrentIdx++];
+      unsigned MinAlign = MatchTable[CurrentIdx++];
+
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineMemOperand *MMO
+        = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
+                      << "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+                      << ")->getAlignment() >= " << MinAlign << ")\n");
+      if (MMO->getAlignment() < MinAlign && handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
     case GIM_CheckMemorySizeEqualTo: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t MMOIdx = MatchTable[CurrentIdx++];

Modified: llvm/trunk/include/llvm/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetSelectionDAG.td?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetSelectionDAG.td (original)
+++ llvm/trunk/include/llvm/Target/TargetSelectionDAG.td Tue Jul 30 17:14:43 2019
@@ -741,6 +741,10 @@ class PatFrags<dag ops, list<dag> frags,
   // If this empty, accept any address space.
   list<int> AddressSpaces = ?;
 
+  // cast<MemSDNode>(N)->getAlignment() >=
+  // If this is empty, accept any alignment.
+  int MinAlignment = ?;
+
   // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Monotonic
   bit IsAtomicOrderingMonotonic = ?;
   // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Acquire
@@ -766,8 +770,6 @@ class PatFrags<dag ops, list<dag> frags,
   // cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
   // cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
   ValueType ScalarMemoryVT = ?;
-
-  // TODO: Add alignment
 }
 
 // PatFrag - A version of PatFrags matching only a single fragment.

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td Tue Jul 30 17:14:43 2019
@@ -307,13 +307,9 @@ class AddressSpaceList<list<int> AS> {
   list<int> AddrSpaces = AS;
 }
 
-class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
-  return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
-}]>;
-
-class Aligned16Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
-  return cast<MemSDNode>(N)->getAlignment() >= 16;
-}]>;
+class Aligned<int Bytes> {
+  int MinAlignment = Bytes;
+}
 
 class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>;
 
@@ -481,21 +477,27 @@ def store_local_hi16 : StoreHi16 <truncs
 def truncstorei8_local_hi16 : StoreHi16<truncstorei8>, LocalAddress;
 def atomic_store_local : LocalStore <atomic_store>;
 
-def load_align8_local : Aligned8Bytes <
-  (ops node:$ptr), (load_local node:$ptr)
->;
 
-def load_align16_local : Aligned16Bytes <
-  (ops node:$ptr), (load_local node:$ptr)
->;
+def load_align8_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> {
+  let IsLoad = 1;
+  let MinAlignment = 8;
+}
 
-def store_align8_local : Aligned8Bytes <
-  (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
->;
+def load_align16_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> {
+  let IsLoad = 1;
+  let MinAlignment = 16;
+}
+
+def store_align8_local: PatFrag<(ops node:$val, node:$ptr),
+                                (store_local node:$val, node:$ptr)>, Aligned<8> {
+  let IsStore = 1;
+
+}
+def store_align16_local: PatFrag<(ops node:$val, node:$ptr),
+                                (store_local node:$val, node:$ptr)>, Aligned<16> {
+  let IsStore = 1;
+}
 
-def store_align16_local : Aligned16Bytes <
-  (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
->;
 
 def atomic_store_flat  : FlatStore <atomic_store>;
 def truncstorei8_hi16_flat  : StoreHi16<truncstorei8>, FlatStoreAddress;

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td Tue Jul 30 17:14:43 2019
@@ -329,10 +329,12 @@ def AMDGPUatomic_ld_glue : SDNode <"ISD:
 
 def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr)> {
   let IsUnindexed = 1;
+  let IsLoad = 1;
 }
 
 def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr)> {
   let IsNonExtLoad = 1;
+  let IsLoad = 1;
 }
 
 def atomic_load_32_glue : PatFrag<(ops node:$ptr),
@@ -347,7 +349,7 @@ def atomic_load_64_glue : PatFrag<(ops n
   let MemoryVT = i64;
 }
 
-def extload_glue : PatFrag<(ops node:$ptr), (load_glue node:$ptr)> {
+def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
   let IsLoad = 1;
   let IsAnyExtLoad = 1;
 }
@@ -392,23 +394,31 @@ def sextloadi16_glue : PatFrag<(ops node
   let MemoryVT = i16;
 }
 
-def load_glue_align8 : Aligned8Bytes <
-  (ops node:$ptr), (load_glue node:$ptr)
->;
-def load_glue_align16 : Aligned16Bytes <
-  (ops node:$ptr), (load_glue node:$ptr)
->;
 
+let IsLoad = 1, AddressSpaces = LoadAddress_local.AddrSpaces in {
+def load_local_m0 : PatFrag<(ops node:$ptr), (load_glue node:$ptr)>;
+
+let MemoryVT = i8 in {
+def extloadi8_local_m0 : PatFrag<(ops node:$ptr), (extloadi8_glue node:$ptr)>;
+def sextloadi8_local_m0 : PatFrag<(ops node:$ptr), (sextloadi8_glue node:$ptr)>;
+def zextloadi8_local_m0 : PatFrag<(ops node:$ptr), (zextloadi8_glue node:$ptr)>;
+}
+
+let MemoryVT = i16 in {
+def extloadi16_local_m0 : PatFrag<(ops node:$ptr), (extloadi16_glue node:$ptr)>;
+def sextloadi16_local_m0 : PatFrag<(ops node:$ptr), (sextloadi16_glue node:$ptr)>;
+def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>;
+}
+
+def load_align8_local_m0 : LoadFrag <load_glue>, LocalAddress {
+  let MinAlignment = 8;
+}
+def load_align16_local_m0 : LoadFrag <load_glue>, LocalAddress {
+  let MinAlignment = 16;
+}
+
+} // End IsLoad = 1
 
-def load_local_m0 : LoadFrag<load_glue>, LocalAddress;
-def sextloadi8_local_m0 : LoadFrag<sextloadi8_glue>, LocalAddress;
-def sextloadi16_local_m0 : LoadFrag<sextloadi16_glue>, LocalAddress;
-def extloadi8_local_m0 : LoadFrag<extloadi8_glue>, LocalAddress;
-def zextloadi8_local_m0 : LoadFrag<zextloadi8_glue>, LocalAddress;
-def extloadi16_local_m0 : LoadFrag<extloadi16_glue>, LocalAddress;
-def zextloadi16_local_m0 : LoadFrag<zextloadi16_glue>, LocalAddress;
-def load_align8_local_m0 : LoadFrag <load_glue_align8>, LocalAddress;
-def load_align16_local_m0 : LoadFrag <load_glue_align16>, LocalAddress;
 def atomic_load_32_local_m0 : LoadFrag<atomic_load_32_glue>, LocalAddress;
 def atomic_load_64_local_m0 : LoadFrag<atomic_load_64_glue>, LocalAddress;
 
@@ -455,13 +465,11 @@ def truncstorei16_glue : PatFrag<(ops no
   let MemoryVT = i16;
 }
 
-def store_glue_align8 : Aligned8Bytes <
-  (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
->;
-
-def store_glue_align16 : Aligned16Bytes <
-  (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
->;
+let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in {
+def store_glue_align8 : PatFrag<(ops node:$val, node:$ptr),
+                                (store_glue node:$val, node:$ptr)>, Aligned<8>;
+def store_glue_align16 : PatFrag<(ops node:$val, node:$ptr),
+                                (store_glue node:$val, node:$ptr)>, Aligned<16>;
 
 def store_local_m0 : StoreFrag<store_glue>, LocalAddress;
 def truncstorei8_local_m0 : StoreFrag<truncstorei8_glue>, LocalAddress;
@@ -470,6 +478,7 @@ def atomic_store_local_m0 : StoreFrag<AM
 
 def store_align8_local_m0 : StoreFrag<store_glue_align8>, LocalAddress;
 def store_align16_local_m0 : StoreFrag<store_glue_align16>, LocalAddress;
+}
 
 def si_setcc_uniform : PatFrag <
   (ops node:$lhs, node:$rhs, node:$cond),

Modified: llvm/trunk/test/TableGen/address-space-patfrags.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/TableGen/address-space-patfrags.td?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/test/TableGen/address-space-patfrags.td (original)
+++ llvm/trunk/test/TableGen/address-space-patfrags.td Tue Jul 30 17:14:43 2019
@@ -19,6 +19,7 @@ def pat_frag_a : PatFrag <(ops node:$ptr
   let AddressSpaces = [ 999 ];
   let IsLoad = 1; // FIXME: Can this be inferred?
   let MemoryVT = i32;
+  let MinAlignment = 2;
 }
 
 // With multiple address spaces
@@ -44,49 +45,53 @@ def inst_c : Instruction {
 }
 
 // SDAG: case 2: {
-// SDAG: // Predicate_pat_frag_a
+// SDAG-NEXT: // Predicate_pat_frag_b
 // SDAG-NEXT: SDNode *N = Node;
 // SDAG-NEXT: (void)N;
 // SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
-
-// SDAG-NEXT: if (AddrSpace != 999)
+// SDAG-NEXT: if (AddrSpace != 123 && AddrSpace != 455)
 // SDAG-NEXT: return false;
 // SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
 // SDAG-NEXT: return true;
 
-// GISEL: GIM_Try, /*On fail goto*//*Label 0*/ 47, // Rule ID 0 //
+
+// GISEL: GIM_Try, /*On fail goto*//*Label 0*/ {{[0-9]+}}, // Rule ID 0 //
 // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
 // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
 // GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
-// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/1, /*AddrSpace*/999,
+// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
 // GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
 // GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
 def : Pat <
-  (pat_frag_a GPR32:$src),
-  (inst_a GPR32:$src)
+  (pat_frag_b GPR32:$src),
+  (inst_b GPR32:$src)
 >;
 
+
 // SDAG: case 3: {
-// SDAG-NEXT: // Predicate_pat_frag_b
+// SDAG: // Predicate_pat_frag_a
 // SDAG-NEXT: SDNode *N = Node;
 // SDAG-NEXT: (void)N;
 // SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
-// SDAG-NEXT: if (AddrSpace != 123 && AddrSpace != 455)
+
+// SDAG-NEXT: if (AddrSpace != 999)
+// SDAG-NEXT: return false;
+// SDAG-NEXT: if (cast<MemSDNode>(N)->getAlignment() < 2)
 // SDAG-NEXT: return false;
 // SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
 // SDAG-NEXT: return true;
 
-
-// GISEL: GIM_Try, /*On fail goto*//*Label 1*/ 95, // Rule ID 1 //
+// GISEL: GIM_Try, /*On fail goto*//*Label 1*/ {{[0-9]+}}, // Rule ID 1 //
 // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
 // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
 // GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
-// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
+// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/1, /*AddrSpace*/999,
+// GISEL-NEXT: GIM_CheckMemoryAlignment, /*MI*/0, /*MMO*/0, /*MinAlign*/2,
 // GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
 // GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
 def : Pat <
-  (pat_frag_b GPR32:$src),
-  (inst_b GPR32:$src)
+  (pat_frag_a GPR32:$src),
+  (inst_a GPR32:$src)
 >;
 
 
@@ -98,7 +103,7 @@ def truncstorei16_addrspace : PatFrag<(o
 }
 
 // Test truncstore without a specific MemoryVT
-// GISEL: GIM_Try, /*On fail goto*//*Label 2*/ 133, // Rule ID 2 //
+// GISEL: GIM_Try, /*On fail goto*//*Label 2*/ {{[0-9]+}}, // Rule ID 2 //
 // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
 // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_STORE,
 // GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
@@ -111,7 +116,7 @@ def : Pat <
 >;
 
 // Test truncstore with specific MemoryVT
-// GISEL: GIM_Try, /*On fail goto*//*Label 3*/ 181, // Rule ID 3 //
+// GISEL: GIM_Try, /*On fail goto*//*Label 3*/ {{[0-9]+}}, // Rule ID 3 //
 // GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
 // GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_STORE,
 // GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,

Modified: llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp (original)
+++ llvm/trunk/utils/TableGen/CodeGenDAGPatterns.cpp Tue Jul 30 17:14:43 2019
@@ -883,7 +883,8 @@ std::string TreePredicateFn::getPredCode
   if (isLoad()) {
     if (!isUnindexed() && !isNonExtLoad() && !isAnyExtLoad() &&
         !isSignExtLoad() && !isZeroExtLoad() && getMemoryVT() == nullptr &&
-        getScalarMemoryVT() == nullptr)
+        getScalarMemoryVT() == nullptr && getAddressSpaces() == nullptr &&
+        getMinAlignment() < 1)
       PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
                       "IsLoad cannot be used by itself");
   } else {
@@ -903,7 +904,8 @@ std::string TreePredicateFn::getPredCode
 
   if (isStore()) {
     if (!isUnindexed() && !isTruncStore() && !isNonTruncStore() &&
-        getMemoryVT() == nullptr && getScalarMemoryVT() == nullptr)
+        getMemoryVT() == nullptr && getScalarMemoryVT() == nullptr &&
+        getAddressSpaces() == nullptr && getMinAlignment() < 1)
       PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
                       "IsStore cannot be used by itself");
   } else {
@@ -977,6 +979,13 @@ std::string TreePredicateFn::getPredCode
       Code += ")\nreturn false;\n";
     }
 
+    int64_t MinAlign = getMinAlignment();
+    if (MinAlign > 0) {
+      Code += "if (cast<MemSDNode>(N)->getAlignment() < ";
+      Code += utostr(MinAlign);
+      Code += ")\nreturn false;\n";
+    }
+
     Record *MemoryVT = getMemoryVT();
 
     if (MemoryVT)
@@ -1177,6 +1186,13 @@ ListInit *TreePredicateFn::getAddressSpa
   return R->getValueAsListInit("AddressSpaces");
 }
 
+int64_t TreePredicateFn::getMinAlignment() const {
+  Record *R = getOrigPatFragRecord()->getRecord();
+  if (R->isValueUnset("MinAlignment"))
+    return 0;
+  return R->getValueAsInt("MinAlignment");
+}
+
 Record *TreePredicateFn::getScalarMemoryVT() const {
   Record *R = getOrigPatFragRecord()->getRecord();
   if (R->isValueUnset("ScalarMemoryVT"))

Modified: llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h (original)
+++ llvm/trunk/utils/TableGen/CodeGenDAGPatterns.h Tue Jul 30 17:14:43 2019
@@ -594,6 +594,7 @@ public:
   Record *getScalarMemoryVT() const;
 
   ListInit *getAddressSpaces() const;
+  int64_t getMinAlignment() const;
 
   // If true, indicates that GlobalISel-based C++ code was supplied.
   bool hasGISelPredicateCode() const;

Modified: llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp?rev=367373&r1=367372&r2=367373&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp (original)
+++ llvm/trunk/utils/TableGen/GlobalISelEmitter.cpp Tue Jul 30 17:14:43 2019
@@ -249,6 +249,10 @@ static std::string explainPredicates(con
       OS << ']';
     }
 
+    int64_t MinAlign = P.getMinAlignment();
+    if (MinAlign > 0)
+      Explanation += " MinAlign=" + utostr(MinAlign);
+
     if (P.isAtomicOrderingMonotonic())
       Explanation += " monotonic";
     if (P.isAtomicOrderingAcquire())
@@ -329,6 +333,9 @@ static Error isTrivialOperatorNode(const
       const ListInit *AddrSpaces = Predicate.getAddressSpaces();
       if (AddrSpaces && !AddrSpaces->empty())
         continue;
+
+      if (Predicate.getMinAlignment() > 0)
+        continue;
     }
 
     if (Predicate.isAtomic() && Predicate.getMemoryVT())
@@ -1052,6 +1059,7 @@ public:
     IPM_MemoryLLTSize,
     IPM_MemoryVsLLTSize,
     IPM_MemoryAddressSpace,
+    IPM_MemoryAlignment,
     IPM_GenericPredicate,
     OPM_SameOperand,
     OPM_ComplexPattern,
@@ -1849,6 +1857,40 @@ public:
   }
 };
 
+class MemoryAlignmentPredicateMatcher : public InstructionPredicateMatcher {
+protected:
+  unsigned MMOIdx;
+  int MinAlign;
+
+public:
+  MemoryAlignmentPredicateMatcher(unsigned InsnVarID, unsigned MMOIdx,
+                                  int MinAlign)
+      : InstructionPredicateMatcher(IPM_MemoryAlignment, InsnVarID),
+        MMOIdx(MMOIdx), MinAlign(MinAlign) {
+    assert(MinAlign > 0);
+  }
+
+  static bool classof(const PredicateMatcher *P) {
+    return P->getKind() == IPM_MemoryAlignment;
+  }
+
+  bool isIdentical(const PredicateMatcher &B) const override {
+    if (!InstructionPredicateMatcher::isIdentical(B))
+      return false;
+    auto *Other = cast<MemoryAlignmentPredicateMatcher>(&B);
+    return MMOIdx == Other->MMOIdx && MinAlign == Other->MinAlign;
+  }
+
+  void emitPredicateOpcodes(MatchTable &Table,
+                            RuleMatcher &Rule) const override {
+    Table << MatchTable::Opcode("GIM_CheckMemoryAlignment")
+          << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+          << MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
+          << MatchTable::Comment("MinAlign") << MatchTable::IntValue(MinAlign)
+          << MatchTable::LineBreak;
+  }
+};
+
 /// Generates code to check that the size of an MMO is less-than, equal-to, or
 /// greater than a given LLT.
 class MemoryVsLLTSizePredicateMatcher : public InstructionPredicateMatcher {
@@ -3287,6 +3329,10 @@ Expected<InstructionMatcher &> GlobalISe
             0, ParsedAddrSpaces);
         }
       }
+
+      int64_t MinAlign = Predicate.getMinAlignment();
+      if (MinAlign > 0)
+        InsnMatcher.addPredicate<MemoryAlignmentPredicateMatcher>(0, MinAlign);
     }
 
     // G_LOAD is used for both non-extending and any-extending loads.




More information about the llvm-commits mailing list