[llvm] 116affb - TableGen/GlobalISel: Allow inst matcher to check multiple opcodes

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 24 10:48:58 PDT 2020


Author: Matt Arsenault
Date: 2020-08-24T13:48:51-04:00
New Revision: 116affb18dfc8c48ad0bd5134b42a51e34ad6fd8

URL: https://github.com/llvm/llvm-project/commit/116affb18dfc8c48ad0bd5134b42a51e34ad6fd8
DIFF: https://github.com/llvm/llvm-project/commit/116affb18dfc8c48ad0bd5134b42a51e34ad6fd8.diff

LOG: TableGen/GlobalISel: Allow inst matcher to check multiple opcodes

This is to initially handleg immAllOnesV, which should match
G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC. In the future, it could be
used for other patterns cases that map to multiple G_* instructions,
such as G_ADD and G_PTR_ADD.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
    llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
    llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
    llvm/test/TableGen/GlobalISelEmitter-immAllZeroOne.td
    llvm/utils/TableGen/GlobalISelEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 756eab28e3ac..17c1ec36c24f 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -112,6 +112,14 @@ enum {
   /// - InsnID - Instruction ID
   /// - Expected opcode
   GIM_CheckOpcode,
+
+  /// Check the opcode on the specified instruction, checking 2 acceptable
+  /// alternatives.
+  /// - InsnID - Instruction ID
+  /// - Expected opcode
+  /// - Alternative expected opcode
+  GIM_CheckOpcodeIsEither,
+
   /// Check the instruction has the right number of operands
   /// - InsnID - Instruction ID
   /// - Expected number of operands

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index 85caed0ecae3..1f1fb5aca875 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -154,24 +154,31 @@ bool InstructionSelector::executeMatchTable(
       break;
     }
 
-    case GIM_CheckOpcode: {
+    case GIM_CheckOpcode:
+    case GIM_CheckOpcodeIsEither: {
       int64_t InsnID = MatchTable[CurrentIdx++];
-      int64_t Expected = MatchTable[CurrentIdx++];
+      int64_t Expected0 = MatchTable[CurrentIdx++];
+      int64_t Expected1 = -1;
+      if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+        Expected1 = MatchTable[CurrentIdx++];
 
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       unsigned Opcode = State.MIs[InsnID]->getOpcode();
 
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
-                      dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
-                             << "], ExpectedOpcode=" << Expected
-                             << ") // Got=" << Opcode << "\n");
-      if (Opcode != Expected) {
+        dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+        << "], ExpectedOpcode=" << Expected0;
+        if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+          dbgs() << " || " << Expected1;
+        dbgs() << ") // Got=" << Opcode << "\n";
+      );
+
+      if (Opcode != Expected0 && Opcode != Expected1) {
         if (handleReject() == RejectAndGiveUp)
           return false;
       }
       break;
     }
-
     case GIM_SwitchOpcode: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t LowerBound = MatchTable[CurrentIdx++];

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
index 4fb1b2b8e725..dffe5ec9cdc4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
@@ -346,8 +346,7 @@ define amdgpu_ps i32 @s_andn2_v2i16(<2 x i16> inreg %src0, <2 x i16> inreg %src1
 ;
 ; GFX9-LABEL: s_andn2_v2i16:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_xor_b32 s0, s3, -1
-; GFX9-NEXT:    s_and_b32 s0, s2, s0
+; GFX9-NEXT:    s_andn2_b32 s0, s2, s3
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src1 = xor <2 x i16> %src1, <i16 -1, i16 -1>
   %and = and <2 x i16> %src0, %not.src1
@@ -371,8 +370,7 @@ define amdgpu_ps i32 @s_andn2_v2i16_commute(<2 x i16> inreg %src0, <2 x i16> inr
 ;
 ; GFX9-LABEL: s_andn2_v2i16_commute:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_xor_b32 s0, s3, -1
-; GFX9-NEXT:    s_and_b32 s0, s0, s2
+; GFX9-NEXT:    s_andn2_b32 s0, s2, s3
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src1 = xor <2 x i16> %src1, <i16 -1, i16 -1>
   %and = and <2 x i16> %not.src1, %src0
@@ -397,7 +395,7 @@ define amdgpu_ps { i32, i32 } @s_andn2_v2i16_multi_use(<2 x i16> inreg %src0, <2
 ; GFX9-LABEL: s_andn2_v2i16_multi_use:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_xor_b32 s1, s3, -1
-; GFX9-NEXT:    s_and_b32 s0, s2, s1
+; GFX9-NEXT:    s_andn2_b32 s0, s2, s3
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src1 = xor <2 x i16> %src1, <i16 -1, i16 -1>
   %and = and <2 x i16> %src0, %not.src1
@@ -429,9 +427,8 @@ define amdgpu_ps { i32, i32 } @s_andn2_v2i16_multi_foldable_use(<2 x i16> inreg
 ;
 ; GFX9-LABEL: s_andn2_v2i16_multi_foldable_use:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_xor_b32 s1, s4, -1
-; GFX9-NEXT:    s_and_b32 s0, s2, s1
-; GFX9-NEXT:    s_and_b32 s1, s3, s1
+; GFX9-NEXT:    s_andn2_b32 s0, s2, s4
+; GFX9-NEXT:    s_andn2_b32 s1, s3, s4
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src2 = xor <2 x i16> %src2, <i16 -1, i16 -1>
   %and0 = and <2 x i16> %src0, %not.src2

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
index 2db7d19ac9dd..337aad36f3bc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
@@ -346,8 +346,7 @@ define amdgpu_ps i32 @s_orn2_v2i16(<2 x i16> inreg %src0, <2 x i16> inreg %src1)
 ;
 ; GFX9-LABEL: s_orn2_v2i16:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_xor_b32 s0, s3, -1
-; GFX9-NEXT:    s_or_b32 s0, s2, s0
+; GFX9-NEXT:    s_orn2_b32 s0, s2, s3
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src1 = xor <2 x i16> %src1, <i16 -1, i16 -1>
   %or = or <2 x i16> %src0, %not.src1
@@ -371,8 +370,7 @@ define amdgpu_ps i32 @s_orn2_v2i16_commute(<2 x i16> inreg %src0, <2 x i16> inre
 ;
 ; GFX9-LABEL: s_orn2_v2i16_commute:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_xor_b32 s0, s3, -1
-; GFX9-NEXT:    s_or_b32 s0, s0, s2
+; GFX9-NEXT:    s_orn2_b32 s0, s2, s3
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src1 = xor <2 x i16> %src1, <i16 -1, i16 -1>
   %or = or <2 x i16> %not.src1, %src0
@@ -397,7 +395,7 @@ define amdgpu_ps { i32, i32 } @s_orn2_v2i16_multi_use(<2 x i16> inreg %src0, <2
 ; GFX9-LABEL: s_orn2_v2i16_multi_use:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_xor_b32 s1, s3, -1
-; GFX9-NEXT:    s_or_b32 s0, s2, s1
+; GFX9-NEXT:    s_orn2_b32 s0, s2, s3
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src1 = xor <2 x i16> %src1, <i16 -1, i16 -1>
   %or = or <2 x i16> %src0, %not.src1
@@ -429,9 +427,8 @@ define amdgpu_ps { i32, i32 } @s_orn2_v2i16_multi_foldable_use(<2 x i16> inreg %
 ;
 ; GFX9-LABEL: s_orn2_v2i16_multi_foldable_use:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_xor_b32 s1, s4, -1
-; GFX9-NEXT:    s_or_b32 s0, s2, s1
-; GFX9-NEXT:    s_or_b32 s1, s3, s1
+; GFX9-NEXT:    s_orn2_b32 s0, s2, s4
+; GFX9-NEXT:    s_orn2_b32 s1, s3, s4
 ; GFX9-NEXT:    ; return to shader part epilog
   %not.src2 = xor <2 x i16> %src2, <i16 -1, i16 -1>
   %or0 = or <2 x i16> %src0, %not.src2

diff  --git a/llvm/test/TableGen/GlobalISelEmitter-immAllZeroOne.td b/llvm/test/TableGen/GlobalISelEmitter-immAllZeroOne.td
index df04ffa63282..7ff56c06f21c 100644
--- a/llvm/test/TableGen/GlobalISelEmitter-immAllZeroOne.td
+++ b/llvm/test/TableGen/GlobalISelEmitter-immAllZeroOne.td
@@ -4,13 +4,20 @@
 include "llvm/Target/Target.td"
 include "GlobalISelEmitterCommon.td"
 
+// GISEL-OPT: GIM_SwitchOpcode
+// GISEL-OPT-NEXT: /*TargetOpcode::G_SHL*/
+// GISEL-OPT-NEXT: /*TargetOpcode::G_LSHR*/
+// GISEL-OPT-NEXT: // Label
 
+
+// GISEL-OPT: GIM_Try,
 // GISEL-OPT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_v4s16,
-// GISEL-OPT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_BUILD_VECTOR,
+// GISEL-OPT: GIM_CheckOpcodeIsEither, /*MI*/1, TargetOpcode::G_BUILD_VECTOR, TargetOpcode::G_BUILD_VECTOR_TRUNC,
 // GISEL-OPT: GIM_CheckIsBuildVectorAllZeros, /*MI*/1,
 
+// GISEL-OPT: GIM_Try,
 // GISEL-OPT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_v4s16,
-// GISEL-OPT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_BUILD_VECTOR,
+// GISEL-OPT: GIM_CheckOpcodeIsEither, /*MI*/1, TargetOpcode::G_BUILD_VECTOR, TargetOpcode::G_BUILD_VECTOR_TRUNC,
 // GISEL-OPT: GIM_CheckIsBuildVectorAllOnes, /*MI*/1,
 
 
@@ -19,7 +26,7 @@ include "GlobalISelEmitterCommon.td"
 // GISEL-NOOPT: // MIs[0] Operand 2
 // GISEL-NOOPT-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_v4s16,
 // GISEL-NOOPT-NEXT: GIM_RecordInsn, /*DefineMI*/1, /*MI*/0, /*OpIdx*/2, // MIs[1]
-// GISEL-NOOPT-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_BUILD_VECTOR,
+// GISEL-NOOPT-NEXT: GIM_CheckOpcodeIsEither, /*MI*/1, TargetOpcode::G_BUILD_VECTOR, TargetOpcode::G_BUILD_VECTOR_TRUNC,
 // GISEL-NOOPT-NEXT: GIM_CheckIsBuildVectorAllOnes, /*MI*/1,
 // GISEL-NOOPT-NEXT: // MIs[1] Operand 0
 // GISEL-NOOPT-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_v4s16,
@@ -34,7 +41,7 @@ def VFOOONES : I<(outs VecReg128:$dst), (ins VecReg128:$src0),
 // GISEL-NOOPT: // MIs[0] Operand 2
 // GISEL-NOOPT-NEXT: GIM_CheckType, /*MI*/0, /*Op*/2, /*Type*/GILLT_v4s16,
 // GISEL-NOOPT-NEXT: GIM_RecordInsn, /*DefineMI*/1, /*MI*/0, /*OpIdx*/2, // MIs[1]
-// GISEL-NOOPT-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_BUILD_VECTOR,
+// GISEL-NOOPT-NEXT: GIM_CheckOpcodeIsEither, /*MI*/1, TargetOpcode::G_BUILD_VECTOR, TargetOpcode::G_BUILD_VECTOR_TRUNC,
 // GISEL-NOOPT-NEXT: GIM_CheckIsBuildVectorAllZeros, /*MI*/1,
 // GISEL-NOOPT-NEXT: // MIs[1] Operand 0
 // GISEL-NOOPT-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_v4s16,

diff  --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index dfafd46fd530..862523efd5be 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -1671,10 +1671,23 @@ PredicateListMatcher<PredicateMatcher>::getNoPredicateComment() const {
 /// Generates code to check the opcode of an instruction.
 class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
 protected:
-  const CodeGenInstruction *I;
+  // Allow matching one to several, similar opcodes that share properties. This
+  // is to handle patterns where one SelectionDAG operation maps to multiple
+  // GlobalISel ones (e.g. G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC). The first
+  // is treated as the canonical opcode.
+  SmallVector<const CodeGenInstruction *, 2> Insts;
 
   static DenseMap<const CodeGenInstruction *, unsigned> OpcodeValues;
 
+
+  MatchTableRecord getInstValue(const CodeGenInstruction *I) const {
+    const auto VI = OpcodeValues.find(I);
+    if (VI != OpcodeValues.end())
+      return MatchTable::NamedValue(I->Namespace, I->TheDef->getName(),
+                                    VI->second);
+    return MatchTable::NamedValue(I->Namespace, I->TheDef->getName());
+  }
+
 public:
   static void initOpcodeValuesMap(const CodeGenTarget &Target) {
     OpcodeValues.clear();
@@ -1684,8 +1697,13 @@ class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
       OpcodeValues[I] = OpcodeValue++;
   }
 
-  InstructionOpcodeMatcher(unsigned InsnVarID, const CodeGenInstruction *I)
-      : InstructionPredicateMatcher(IPM_Opcode, InsnVarID), I(I) {}
+  InstructionOpcodeMatcher(unsigned InsnVarID,
+                           ArrayRef<const CodeGenInstruction *> I)
+      : InstructionPredicateMatcher(IPM_Opcode, InsnVarID),
+        Insts(I.begin(), I.end()) {
+    assert((Insts.size() == 1 || Insts.size() == 2) &&
+           "unexpected number of opcode alternatives");
+  }
 
   static bool classof(const PredicateMatcher *P) {
     return P->getKind() == IPM_Opcode;
@@ -1693,22 +1711,36 @@ class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
 
   bool isIdentical(const PredicateMatcher &B) const override {
     return InstructionPredicateMatcher::isIdentical(B) &&
-           I == cast<InstructionOpcodeMatcher>(&B)->I;
+           Insts == cast<InstructionOpcodeMatcher>(&B)->Insts;
+  }
+
+  bool hasValue() const override {
+    return Insts.size() == 1 && OpcodeValues.count(Insts[0]);
   }
+
+  // TODO: This is used for the SwitchMatcher optimization. We should be able to
+  // return a list of the opcodes to match.
   MatchTableRecord getValue() const override {
+    assert(Insts.size() == 1);
+
+    const CodeGenInstruction *I = Insts[0];
     const auto VI = OpcodeValues.find(I);
     if (VI != OpcodeValues.end())
       return MatchTable::NamedValue(I->Namespace, I->TheDef->getName(),
                                     VI->second);
     return MatchTable::NamedValue(I->Namespace, I->TheDef->getName());
   }
-  bool hasValue() const override { return OpcodeValues.count(I); }
 
   void emitPredicateOpcodes(MatchTable &Table,
                             RuleMatcher &Rule) const override {
-    Table << MatchTable::Opcode("GIM_CheckOpcode") << MatchTable::Comment("MI")
-          << MatchTable::IntValue(InsnVarID) << getValue()
-          << MatchTable::LineBreak;
+    StringRef CheckType = Insts.size() == 1 ?
+                          "GIM_CheckOpcode" : "GIM_CheckOpcodeIsEither";
+    Table << MatchTable::Opcode(CheckType) << MatchTable::Comment("MI")
+          << MatchTable::IntValue(InsnVarID);
+
+    for (const CodeGenInstruction *I : Insts)
+      Table << getInstValue(I);
+    Table << MatchTable::LineBreak;
   }
 
   /// Compare the priority of this object and B.
@@ -1726,20 +1758,32 @@ class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
     // using instruction frequency information to improve compile time.
     if (const InstructionOpcodeMatcher *BO =
             dyn_cast<InstructionOpcodeMatcher>(&B))
-      return I->TheDef->getName() < BO->I->TheDef->getName();
+      return Insts[0]->TheDef->getName() < BO->Insts[0]->TheDef->getName();
 
     return false;
   };
 
   bool isConstantInstruction() const {
-    return I->TheDef->getName() == "G_CONSTANT";
+    return Insts.size() == 1 && Insts[0]->TheDef->getName() == "G_CONSTANT";
+  }
+
+  // The first opcode is the canonical opcode, and later are alternatives.
+  StringRef getOpcode() const {
+    return Insts[0]->TheDef->getName();
+  }
+
+  ArrayRef<const CodeGenInstruction *> getAlternativeOpcodes() {
+    return Insts;
   }
 
-  StringRef getOpcode() const { return I->TheDef->getName(); }
-  bool isVariadicNumOperands() const { return I->Operands.isVariadic; }
+  bool isVariadicNumOperands() const {
+    // If one is variadic, they all should be.
+    return Insts[0]->Operands.isVariadic;
+  }
 
   StringRef getOperandType(unsigned OpIdx) const {
-    return I->Operands[OpIdx].OperandType;
+    // Types expected to be uniform for all alternatives.
+    return Insts[0]->Operands[OpIdx].OperandType;
   }
 };
 
@@ -4141,8 +4185,16 @@ Error GlobalISelEmitter::importChildMatcher(
       InstructionOperandMatcher &InsnOperand = **MaybeInsnOperand;
 
       ValueTypeByHwMode VTy = ChildTypes.front().getValueTypeByHwMode();
+
+      const CodeGenInstruction &BuildVector
+        = Target.getInstruction(RK.getDef("G_BUILD_VECTOR"));
+      const CodeGenInstruction &BuildVectorTrunc
+        = Target.getInstruction(RK.getDef("G_BUILD_VECTOR_TRUNC"));
+
+      // Treat G_BUILD_VECTOR as the canonical opcode, and G_BUILD_VECTOR_TRUNC
+      // as an alternative.
       InsnOperand.getInsnMatcher().addPredicate<InstructionOpcodeMatcher>(
-          &Target.getInstruction(RK.getDef("G_BUILD_VECTOR")));
+      makeArrayRef({&BuildVector, &BuildVectorTrunc}));
 
       // TODO: Handle both G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC We could
       // theoretically not emit any opcode check, but getOpcodeMatcher currently


        


More information about the llvm-commits mailing list