[llvm] 655714a - [AArch64] Use GlobalISel MatchTable Combiner Backend

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 11 02:27:22 PDT 2023


Author: pvanhout
Date: 2023-07-11T11:27:14+02:00
New Revision: 655714a300ff303a1f283718c34d9e27e096b319

URL: https://github.com/llvm/llvm-project/commit/655714a300ff303a1f283718c34d9e27e096b319
DIFF: https://github.com/llvm/llvm-project/commit/655714a300ff303a1f283718c34d9e27e096b319.diff

LOG: [AArch64] Use GlobalISel MatchTable Combiner Backend

Only a few minor test changes needed because I removed the "helper" suffix from the combiner name, as it's not really a helper anymore but more like the implementation itself.

Depends on D153757

NOTE: This would land iff D153757 (RFC) lands too.

Reviewed By: aemerson

Differential Revision: https://reviews.llvm.org/D153850

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64Combine.td
    llvm/lib/Target/AArch64/CMakeLists.txt
    llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
    llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
    llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
    llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/combine-and-or-disjoint-mask.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-mulo-with-2.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
    llvm/test/CodeGen/AArch64/GlobalISel/lower-neon-vector-fcmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and-postlegalize.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-anyext-to-zext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-and-mask.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-mulo-zero.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 4408a22f340fa4..4916581e79aff0 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -33,21 +33,15 @@ def fold_global_offset : GICombineRule<
   (apply [{ applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
 >;
 
-def AArch64PreLegalizerCombinerHelper: GICombinerHelper<
-  "AArch64GenPreLegalizerCombinerHelper", [all_combines,
-                                           fconstant_to_constant,
-                                           icmp_redundant_trunc,
-                                           fold_global_offset]> {
-  let DisableRuleOption = "aarch64prelegalizercombiner-disable-rule";
-  let StateClass = "AArch64PreLegalizerCombinerHelperState";
-  let AdditionalArguments = [];
+def AArch64PreLegalizerCombiner: GICombinerHelper<
+  "AArch64PreLegalizerCombinerImpl", [all_combines,
+                                      fconstant_to_constant,
+                                      icmp_redundant_trunc,
+                                      fold_global_offset]> {
 }
 
-def AArch64O0PreLegalizerCombinerHelper: GICombinerHelper<
-  "AArch64GenO0PreLegalizerCombinerHelper", [optnone_combines]> {
-  let DisableRuleOption = "aarch64O0prelegalizercombiner-disable-rule";
-  let StateClass = "AArch64O0PreLegalizerCombinerHelperState";
-  let AdditionalArguments = [];
+def AArch64O0PreLegalizerCombiner: GICombinerHelper<
+  "AArch64O0PreLegalizerCombinerImpl", [optnone_combines]> {
 }
 
 // Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
@@ -213,18 +207,17 @@ def vector_sext_inreg_to_shift : GICombineRule<
 // Post-legalization combines which should happen at all optimization levels.
 // (E.g. ones that facilitate matching for the selector) For example, matching
 // pseudos.
-def AArch64PostLegalizerLoweringHelper
-    : GICombinerHelper<"AArch64GenPostLegalizerLoweringHelper",
+def AArch64PostLegalizerLowering
+    : GICombinerHelper<"AArch64PostLegalizerLoweringImpl",
                        [shuffle_vector_lowering, vashr_vlshr_imm,
                         icmp_lowering, build_vector_lowering,
                         lower_vector_fcmp, form_truncstore,
                         vector_sext_inreg_to_shift]> {
-  let DisableRuleOption = "aarch64postlegalizerlowering-disable-rule";
 }
 
 // Post-legalization combines which are primarily optimizations.
-def AArch64PostLegalizerCombinerHelper
-    : GICombinerHelper<"AArch64GenPostLegalizerCombinerHelper",
+def AArch64PostLegalizerCombiner
+    : GICombinerHelper<"AArch64PostLegalizerCombinerImpl",
                        [copy_prop, combines_for_extload,
                         sext_trunc_sextload, mutate_anyext_to_zext,
                         hoist_logic_op_with_same_opcode_hands,
@@ -238,5 +231,4 @@ def AArch64PostLegalizerCombinerHelper
                         ptr_add_immed_chain, overlapping_and,
                         split_store_zero_128, undef_combines,
                         select_to_minmax]> {
-  let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule";
 }

diff  --git a/llvm/lib/Target/AArch64/CMakeLists.txt b/llvm/lib/Target/AArch64/CMakeLists.txt
index 54157d2be2c74f..ada3a57300fa77 100644
--- a/llvm/lib/Target/AArch64/CMakeLists.txt
+++ b/llvm/lib/Target/AArch64/CMakeLists.txt
@@ -10,14 +10,14 @@ tablegen(LLVM AArch64GenDAGISel.inc -gen-dag-isel)
 tablegen(LLVM AArch64GenDisassemblerTables.inc -gen-disassembler)
 tablegen(LLVM AArch64GenFastISel.inc -gen-fast-isel)
 tablegen(LLVM AArch64GenGlobalISel.inc -gen-global-isel)
-tablegen(LLVM AArch64GenO0PreLegalizeGICombiner.inc -gen-global-isel-combiner
-              -combiners="AArch64O0PreLegalizerCombinerHelper")
-tablegen(LLVM AArch64GenPreLegalizeGICombiner.inc -gen-global-isel-combiner
-              -combiners="AArch64PreLegalizerCombinerHelper")
-tablegen(LLVM AArch64GenPostLegalizeGICombiner.inc -gen-global-isel-combiner
-              -combiners="AArch64PostLegalizerCombinerHelper")
-tablegen(LLVM AArch64GenPostLegalizeGILowering.inc -gen-global-isel-combiner
-              -combiners="AArch64PostLegalizerLoweringHelper")
+tablegen(LLVM AArch64GenO0PreLegalizeGICombiner.inc -gen-global-isel-combiner-matchtable
+              -combiners="AArch64O0PreLegalizerCombiner")
+tablegen(LLVM AArch64GenPreLegalizeGICombiner.inc -gen-global-isel-combiner-matchtable
+              -combiners="AArch64PreLegalizerCombiner")
+tablegen(LLVM AArch64GenPostLegalizeGICombiner.inc -gen-global-isel-combiner-matchtable
+              -combiners="AArch64PostLegalizerCombiner")
+tablegen(LLVM AArch64GenPostLegalizeGILowering.inc -gen-global-isel-combiner-matchtable
+              -combiners="AArch64PostLegalizerLowering")
 tablegen(LLVM AArch64GenInstrInfo.inc -gen-instr-info)
 tablegen(LLVM AArch64GenMCCodeEmitter.inc -gen-emitter)
 tablegen(LLVM AArch64GenMCPseudoLowering.inc -gen-pseudo-lowering)

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
index 3553492935d3c5..590afbc29d6d75 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
@@ -16,6 +16,8 @@
 #include "llvm/CodeGen/GlobalISel/Combiner.h"
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -27,33 +29,67 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/Support/Debug.h"
 
+#define GET_GICOMBINER_DEPS
+#include "AArch64GenO0PreLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_DEPS
+
 #define DEBUG_TYPE "aarch64-O0-prelegalizer-combiner"
 
 using namespace llvm;
 using namespace MIPatternMatch;
+namespace {
+#define GET_GICOMBINER_TYPES
+#include "AArch64GenO0PreLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_TYPES
 
-class AArch64O0PreLegalizerCombinerHelperState {
+class AArch64O0PreLegalizerCombinerImpl : public GIMatchTableExecutor {
 protected:
   CombinerHelper &Helper;
+  const AArch64O0PreLegalizerCombinerImplRuleConfig &RuleConfig;
+
+  const AArch64Subtarget &STI;
+  GISelChangeObserver &Observer;
+  MachineIRBuilder &B;
+  MachineFunction &MF;
+
+  MachineRegisterInfo &MRI;
 
 public:
-  AArch64O0PreLegalizerCombinerHelperState(CombinerHelper &Helper)
-      : Helper(Helper) {}
-};
+  AArch64O0PreLegalizerCombinerImpl(
+      const AArch64O0PreLegalizerCombinerImplRuleConfig &RuleConfig,
+      GISelChangeObserver &Observer, MachineIRBuilder &B,
+      CombinerHelper &Helper);
+
+  static const char *getName() { return "AArch64O0PreLegalizerCombiner"; }
 
-#define AARCH64O0PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
+  bool tryCombineAll(MachineInstr &I) const;
+
+private:
+#define GET_GICOMBINER_CLASS_MEMBERS
 #include "AArch64GenO0PreLegalizeGICombiner.inc"
-#undef AARCH64O0PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
+#undef GET_GICOMBINER_CLASS_MEMBERS
+};
 
-namespace {
-#define AARCH64O0PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
+#define GET_GICOMBINER_IMPL
+#include "AArch64GenO0PreLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_IMPL
+
+AArch64O0PreLegalizerCombinerImpl::AArch64O0PreLegalizerCombinerImpl(
+    const AArch64O0PreLegalizerCombinerImplRuleConfig &RuleConfig,
+    GISelChangeObserver &Observer, MachineIRBuilder &B, CombinerHelper &Helper)
+    : Helper(Helper), RuleConfig(RuleConfig),
+      STI(B.getMF().getSubtarget<AArch64Subtarget>()), Observer(Observer), B(B),
+      MF(B.getMF()), MRI(*B.getMRI()),
+#define GET_GICOMBINER_CONSTRUCTOR_INITS
 #include "AArch64GenO0PreLegalizeGICombiner.inc"
-#undef AARCH64O0PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
+#undef GET_GICOMBINER_CONSTRUCTOR_INITS
+{
+}
 
 class AArch64O0PreLegalizerCombinerInfo : public CombinerInfo {
   GISelKnownBits *KB;
   MachineDominatorTree *MDT;
-  AArch64GenO0PreLegalizerCombinerHelperRuleConfig GeneratedRuleCfg;
+  AArch64O0PreLegalizerCombinerImplRuleConfig RuleConfig;
 
 public:
   AArch64O0PreLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
@@ -62,7 +98,7 @@ class AArch64O0PreLegalizerCombinerInfo : public CombinerInfo {
       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
                      /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
         KB(KB), MDT(MDT) {
-    if (!GeneratedRuleCfg.parseCommandLineOption())
+    if (!RuleConfig.parseCommandLineOption())
       report_fatal_error("Invalid rule identifier");
   }
 
@@ -74,9 +110,10 @@ bool AArch64O0PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
                                                 MachineInstr &MI,
                                                 MachineIRBuilder &B) const {
   CombinerHelper Helper(Observer, B, /*IsPreLegalize*/ true, KB, MDT);
-  AArch64GenO0PreLegalizerCombinerHelper Generated(GeneratedRuleCfg, Helper);
+  AArch64O0PreLegalizerCombinerImpl Impl(RuleConfig, Observer, B, Helper);
+  Impl.setupMF(*MI.getMF(), KB);
 
-  if (Generated.tryCombineAll(Observer, MI, B))
+  if (Impl.tryCombineAll(MI))
     return true;
 
   unsigned Opc = MI.getOpcode();
@@ -104,10 +141,6 @@ bool AArch64O0PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
   return false;
 }
 
-#define AARCH64O0PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
-#include "AArch64GenO0PreLegalizeGICombiner.inc"
-#undef AARCH64O0PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
-
 // Pass boilerplate
 // ================
 

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index 7e1ccaad602816..303cf11d4f30c3 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -24,6 +24,8 @@
 #include "llvm/CodeGen/GlobalISel/Combiner.h"
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
@@ -37,11 +39,21 @@
 #include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/Support/Debug.h"
 
+#define GET_GICOMBINER_DEPS
+#include "AArch64GenPostLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_DEPS
+
 #define DEBUG_TYPE "aarch64-postlegalizer-combiner"
 
 using namespace llvm;
 using namespace MIPatternMatch;
 
+namespace {
+
+#define GET_GICOMBINER_TYPES
+#include "AArch64GenPostLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_TYPES
+
 /// This combine tries do what performExtractVectorEltCombine does in SDAG.
 /// Rewrite for pairwise fadd pattern
 ///   (s32 (g_extract_vector_elt
@@ -109,13 +121,13 @@ void applyExtractVecEltPairwiseAdd(
   MI.eraseFromParent();
 }
 
-static bool isSignExtended(Register R, MachineRegisterInfo &MRI) {
+bool isSignExtended(Register R, MachineRegisterInfo &MRI) {
   // TODO: check if extended build vector as well.
   unsigned Opc = MRI.getVRegDef(R)->getOpcode();
   return Opc == TargetOpcode::G_SEXT || Opc == TargetOpcode::G_SEXT_INREG;
 }
 
-static bool isZeroExtended(Register R, MachineRegisterInfo &MRI) {
+bool isZeroExtended(Register R, MachineRegisterInfo &MRI) {
   // TODO: check if extended build vector as well.
   return MRI.getVRegDef(R)->getOpcode() == TargetOpcode::G_ZEXT;
 }
@@ -264,7 +276,7 @@ void applyFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI,
 
 /// \returns True if a G_ANYEXT instruction \p MI should be mutated to a G_ZEXT
 /// instruction.
-static bool matchMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI) {
+bool matchMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI) {
   // If this is coming from a scalar compare then we can use a G_ZEXT instead of
   // a G_ANYEXT:
   //
@@ -281,9 +293,9 @@ static bool matchMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI)
                            m_GFCmp(m_Pred(), m_Reg(), m_Reg())));
 }
 
-static void applyMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI,
-                              MachineIRBuilder &B,
-                              GISelChangeObserver &Observer) {
+void applyMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI,
+                             MachineIRBuilder &B,
+                             GISelChangeObserver &Observer) {
   Observer.changingInstr(MI);
   MI.setDesc(B.getTII().get(TargetOpcode::G_ZEXT));
   Observer.changedInstr(MI);
@@ -291,7 +303,7 @@ static void applyMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI,
 
 /// Match a 128b store of zero and split it into two 64 bit stores, for
 /// size/performance reasons.
-static bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) {
+bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) {
   GStore &Store = cast<GStore>(MI);
   if (!Store.isSimple())
     return false;
@@ -307,9 +319,9 @@ static bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) {
   return MaybeCst && MaybeCst->isZero();
 }
 
-static void applySplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                   MachineIRBuilder &B,
-                                   GISelChangeObserver &Observer) {
+void applySplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI,
+                            MachineIRBuilder &B,
+                            GISelChangeObserver &Observer) {
   B.setInstrAndDebugLoc(MI);
   GStore &Store = cast<GStore>(MI);
   assert(MRI.getType(Store.getValueReg()).isVector() &&
@@ -327,21 +339,55 @@ static void applySplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI,
   Store.eraseFromParent();
 }
 
-#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
+class AArch64PostLegalizerCombinerImpl : public GIMatchTableExecutor {
+protected:
+  CombinerHelper &Helper;
+  const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig;
+
+  const AArch64Subtarget &STI;
+  MachineRegisterInfo &MRI;
+  GISelChangeObserver &Observer;
+  MachineIRBuilder &B;
+  MachineFunction &MF;
+
+public:
+  AArch64PostLegalizerCombinerImpl(
+      const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
+      const AArch64Subtarget &STI, GISelChangeObserver &Observer,
+      MachineIRBuilder &B, CombinerHelper &Helper);
+
+  static const char *getName() { return "AArch64PostLegalizerCombiner"; }
+
+  bool tryCombineAll(MachineInstr &I) const;
+
+private:
+#define GET_GICOMBINER_CLASS_MEMBERS
 #include "AArch64GenPostLegalizeGICombiner.inc"
-#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
+#undef GET_GICOMBINER_CLASS_MEMBERS
+};
 
-namespace {
-#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
+#define GET_GICOMBINER_IMPL
 #include "AArch64GenPostLegalizeGICombiner.inc"
-#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
+#undef GET_GICOMBINER_IMPL
+
+AArch64PostLegalizerCombinerImpl::AArch64PostLegalizerCombinerImpl(
+    const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
+    const AArch64Subtarget &STI, GISelChangeObserver &Observer,
+    MachineIRBuilder &B, CombinerHelper &Helper)
+    : Helper(Helper), RuleConfig(RuleConfig), STI(STI), MRI(*B.getMRI()),
+      Observer(Observer), B(B), MF(B.getMF()),
+#define GET_GICOMBINER_CONSTRUCTOR_INITS
+#include "AArch64GenPostLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_CONSTRUCTOR_INITS
+{
+}
 
 class AArch64PostLegalizerCombinerInfo : public CombinerInfo {
   GISelKnownBits *KB;
   MachineDominatorTree *MDT;
 
 public:
-  AArch64GenPostLegalizerCombinerHelperRuleConfig GeneratedRuleCfg;
+  AArch64PostLegalizerCombinerImplRuleConfig RuleConfig;
 
   AArch64PostLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
                                    GISelKnownBits *KB,
@@ -349,7 +395,7 @@ class AArch64PostLegalizerCombinerInfo : public CombinerInfo {
       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
                      /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
         KB(KB), MDT(MDT) {
-    if (!GeneratedRuleCfg.parseCommandLineOption())
+    if (!RuleConfig.parseCommandLineOption())
       report_fatal_error("Invalid rule identifier");
   }
 
@@ -360,17 +406,14 @@ class AArch64PostLegalizerCombinerInfo : public CombinerInfo {
 bool AArch64PostLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
                                                MachineInstr &MI,
                                                MachineIRBuilder &B) const {
-  const auto *LI =
-      MI.getParent()->getParent()->getSubtarget().getLegalizerInfo();
+  const auto &STI = MI.getMF()->getSubtarget<AArch64Subtarget>();
+  const auto *LI = STI.getLegalizerInfo();
   CombinerHelper Helper(Observer, B, /*IsPreLegalize*/ false, KB, MDT, LI);
-  AArch64GenPostLegalizerCombinerHelper Generated(GeneratedRuleCfg);
-  return Generated.tryCombineAll(Observer, MI, B, Helper);
+  AArch64PostLegalizerCombinerImpl Impl(RuleConfig, STI, Observer, B, Helper);
+  Impl.setupMF(*MI.getMF(), KB);
+  return Impl.tryCombineAll(MI);
 }
 
-#define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
-#include "AArch64GenPostLegalizeGICombiner.inc"
-#undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
-
 class AArch64PostLegalizerCombiner : public MachineFunctionPass {
 public:
   static char ID;

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 2a72216a01c0d1..61729b34d38f86 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -29,6 +29,8 @@
 #include "llvm/CodeGen/GlobalISel/Combiner.h"
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
@@ -46,18 +48,28 @@
 #include "llvm/Support/ErrorHandling.h"
 #include <optional>
 
+#define GET_GICOMBINER_DEPS
+#include "AArch64GenPostLegalizeGILowering.inc"
+#undef GET_GICOMBINER_DEPS
+
 #define DEBUG_TYPE "aarch64-postlegalizer-lowering"
 
 using namespace llvm;
 using namespace MIPatternMatch;
 using namespace AArch64GISelUtils;
 
+namespace {
+
+#define GET_GICOMBINER_TYPES
+#include "AArch64GenPostLegalizeGILowering.inc"
+#undef GET_GICOMBINER_TYPES
+
 /// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
 ///
 /// Used for matching target-supported shuffles before codegen.
 struct ShuffleVectorPseudo {
-  unsigned Opc; ///< Opcode for the instruction. (E.g. G_ZIP1)
-  Register Dst; ///< Destination register.
+  unsigned Opc;                 ///< Opcode for the instruction. (E.g. G_ZIP1)
+  Register Dst;                 ///< Destination register.
   SmallVector<SrcOp, 2> SrcOps; ///< Source registers.
   ShuffleVectorPseudo(unsigned Opc, Register Dst,
                       std::initializer_list<SrcOp> SrcOps)
@@ -67,8 +79,8 @@ struct ShuffleVectorPseudo {
 
 /// Check if a vector shuffle corresponds to a REV instruction with the
 /// specified blocksize.
-static bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts,
-                      unsigned BlockSize) {
+bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts,
+               unsigned BlockSize) {
   assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
          "Only possible block sizes for REV are: 16, 32, 64");
   assert(EltSize != 64 && "EltSize cannot be 64 for REV mask.");
@@ -96,8 +108,7 @@ static bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts,
 
 /// Determines if \p M is a shuffle vector mask for a TRN of \p NumElts.
 /// Whether or not G_TRN1 or G_TRN2 should be used is stored in \p WhichResult.
-static bool isTRNMask(ArrayRef<int> M, unsigned NumElts,
-                      unsigned &WhichResult) {
+bool isTRNMask(ArrayRef<int> M, unsigned NumElts, unsigned &WhichResult) {
   if (NumElts % 2 != 0)
     return false;
   WhichResult = (M[0] == 0 ? 0 : 1);
@@ -112,8 +123,8 @@ static bool isTRNMask(ArrayRef<int> M, unsigned NumElts,
 
 /// Check if a G_EXT instruction can handle a shuffle mask \p M when the vector
 /// sources of the shuffle are 
diff erent.
-static std::optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
-                                                           unsigned NumElts) {
+std::optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
+                                                    unsigned NumElts) {
   // Look for the first non-undef element.
   auto FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
   if (FirstRealElt == M.end())
@@ -154,8 +165,7 @@ static std::optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
 
 /// Determines if \p M is a shuffle vector mask for a UZP of \p NumElts.
 /// Whether or not G_UZP1 or G_UZP2 should be used is stored in \p WhichResult.
-static bool isUZPMask(ArrayRef<int> M, unsigned NumElts,
-                      unsigned &WhichResult) {
+bool isUZPMask(ArrayRef<int> M, unsigned NumElts, unsigned &WhichResult) {
   WhichResult = (M[0] == 0 ? 0 : 1);
   for (unsigned i = 0; i != NumElts; ++i) {
     // Skip undef indices.
@@ -169,8 +179,7 @@ static bool isUZPMask(ArrayRef<int> M, unsigned NumElts,
 
 /// \return true if \p M is a zip mask for a shuffle vector of \p NumElts.
 /// Whether or not G_ZIP1 or G_ZIP2 should be used is stored in \p WhichResult.
-static bool isZipMask(ArrayRef<int> M, unsigned NumElts,
-                      unsigned &WhichResult) {
+bool isZipMask(ArrayRef<int> M, unsigned NumElts, unsigned &WhichResult) {
   if (NumElts % 2 != 0)
     return false;
 
@@ -178,9 +187,9 @@ static bool isZipMask(ArrayRef<int> M, unsigned NumElts,
   WhichResult = (M[0] == 0 ? 0 : 1);
   unsigned Idx = WhichResult * NumElts / 2;
   for (unsigned i = 0; i != NumElts; i += 2) {
-      if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != Idx) ||
-          (M[i + 1] >= 0 && static_cast<unsigned>(M[i + 1]) != Idx + NumElts))
-        return false;
+    if ((M[i] >= 0 && static_cast<unsigned>(M[i]) != Idx) ||
+        (M[i + 1] >= 0 && static_cast<unsigned>(M[i + 1]) != Idx + NumElts))
+      return false;
     Idx += 1;
   }
   return true;
@@ -194,8 +203,8 @@ static bool isZipMask(ArrayRef<int> M, unsigned NumElts,
 /// G_INSERT_VECTOR_ELT destination should be the LHS of the G_SHUFFLE_VECTOR.
 ///
 /// Second element is the destination lane for the G_INSERT_VECTOR_ELT.
-static std::optional<std::pair<bool, int>> isINSMask(ArrayRef<int> M,
-                                                     int NumInputElements) {
+std::optional<std::pair<bool, int>> isINSMask(ArrayRef<int> M,
+                                              int NumInputElements) {
   if (M.size() != static_cast<size_t>(NumInputElements))
     return std::nullopt;
   int NumLHSMatch = 0, NumRHSMatch = 0;
@@ -219,8 +228,8 @@ static std::optional<std::pair<bool, int>> isINSMask(ArrayRef<int> M,
 
 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with a
 /// G_REV instruction. Returns the appropriate G_REV opcode in \p Opc.
-static bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     ShuffleVectorPseudo &MatchInfo) {
+bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
+              ShuffleVectorPseudo &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
   Register Dst = MI.getOperand(0).getReg();
@@ -248,8 +257,8 @@ static bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
 
 /// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
 /// a G_TRN1 or G_TRN2 instruction.
-static bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     ShuffleVectorPseudo &MatchInfo) {
+bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
+              ShuffleVectorPseudo &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   unsigned WhichResult;
   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
@@ -269,8 +278,8 @@ static bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
 ///
 /// \param [in] MI - The shuffle vector instruction.
 /// \param [out] MatchInfo - Either G_UZP1 or G_UZP2 on success.
-static bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     ShuffleVectorPseudo &MatchInfo) {
+bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
+              ShuffleVectorPseudo &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   unsigned WhichResult;
   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
@@ -285,8 +294,8 @@ static bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
   return true;
 }
 
-static bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     ShuffleVectorPseudo &MatchInfo) {
+bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
+              ShuffleVectorPseudo &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   unsigned WhichResult;
   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
@@ -302,9 +311,9 @@ static bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
 }
 
 /// Helper function for matchDup.
-static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
-                                        MachineRegisterInfo &MRI,
-                                        ShuffleVectorPseudo &MatchInfo) {
+bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
+                                 MachineRegisterInfo &MRI,
+                                 ShuffleVectorPseudo &MatchInfo) {
   if (Lane != 0)
     return false;
 
@@ -316,7 +325,8 @@ static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
   // %cst0:gpr(s32) = G_CONSTANT i32 0
   // %zerovec:fpr(<2 x s32>) = G_BUILD_VECTOR %cst0(s32), %cst0(s32)
   // %ins:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %undef, %scalar(s64), %cst0(s32)
-  // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef, %zerovec(<2 x s32>)
+  // %splat:fpr(<2 x s64>) = G_SHUFFLE_VECTOR %ins(<2 x s64>), %undef,
+  // %zerovec(<2 x s32>)
   //
   // ...into:
   // %splat = G_DUP %scalar
@@ -341,9 +351,9 @@ static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
 }
 
 /// Helper function for matchDup.
-static bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
-                                    MachineRegisterInfo &MRI,
-                                    ShuffleVectorPseudo &MatchInfo) {
+bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
+                             MachineRegisterInfo &MRI,
+                             ShuffleVectorPseudo &MatchInfo) {
   assert(Lane >= 0 && "Expected positive lane?");
   // Test if the LHS is a BUILD_VECTOR. If it is, then we can just reference the
   // lane's definition directly.
@@ -357,8 +367,8 @@ static bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
   return true;
 }
 
-static bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     ShuffleVectorPseudo &MatchInfo) {
+bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
+              ShuffleVectorPseudo &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   auto MaybeLane = getSplatIndex(MI);
   if (!MaybeLane)
@@ -376,7 +386,7 @@ static bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
 
 // Check if an EXT instruction can handle the shuffle mask when the vector
 // sources of the shuffle are the same.
-static bool isSingletonExtMask(ArrayRef<int> M, LLT Ty) {
+bool isSingletonExtMask(ArrayRef<int> M, LLT Ty) {
   unsigned NumElts = Ty.getNumElements();
 
   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
@@ -403,8 +413,8 @@ static bool isSingletonExtMask(ArrayRef<int> M, LLT Ty) {
   return true;
 }
 
-static bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     ShuffleVectorPseudo &MatchInfo) {
+bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
+              ShuffleVectorPseudo &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   Register Dst = MI.getOperand(0).getReg();
   LLT DstTy = MRI.getType(Dst);
@@ -435,8 +445,8 @@ static bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
 
 /// Replace a G_SHUFFLE_VECTOR instruction with a pseudo.
 /// \p Opc is the opcode to use. \p MI is the G_SHUFFLE_VECTOR.
-static void applyShuffleVectorPseudo(MachineInstr &MI,
-                                     ShuffleVectorPseudo &MatchInfo) {
+void applyShuffleVectorPseudo(MachineInstr &MI,
+                              ShuffleVectorPseudo &MatchInfo) {
   MachineIRBuilder MIRBuilder(MI);
   MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
   MI.eraseFromParent();
@@ -445,7 +455,7 @@ static void applyShuffleVectorPseudo(MachineInstr &MI,
 /// Replace a G_SHUFFLE_VECTOR instruction with G_EXT.
 /// Special-cased because the constant operand must be emitted as a G_CONSTANT
 /// for the imported tablegen patterns to work.
-static void applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {
+void applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {
   MachineIRBuilder MIRBuilder(MI);
   // Tablegen patterns expect an i32 G_CONSTANT as the final op.
   auto Cst =
@@ -466,8 +476,8 @@ static void applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {
 ///   %extract = G_EXTRACT_VECTOR_ELT %left, 0
 ///   %ins = G_INSERT_VECTOR_ELT %left, %extract, 1
 ///
-static bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     std::tuple<Register, int, Register, int> &MatchInfo) {
+bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI,
+              std::tuple<Register, int, Register, int> &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
   ArrayRef<int> ShuffleMask = MI.getOperand(3).getShuffleMask();
   Register Dst = MI.getOperand(0).getReg();
@@ -493,9 +503,9 @@ static bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI,
   return true;
 }
 
-static void applyINS(MachineInstr &MI, MachineRegisterInfo &MRI,
-                     MachineIRBuilder &Builder,
-                     std::tuple<Register, int, Register, int> &MatchInfo) {
+void applyINS(MachineInstr &MI, MachineRegisterInfo &MRI,
+              MachineIRBuilder &Builder,
+              std::tuple<Register, int, Register, int> &MatchInfo) {
   Builder.setInstrAndDebugLoc(MI);
   Register Dst = MI.getOperand(0).getReg();
   auto ScalarTy = MRI.getType(Dst).getElementType();
@@ -512,8 +522,8 @@ static void applyINS(MachineInstr &MI, MachineRegisterInfo &MRI,
 /// isVShiftRImm - Check if this is a valid vector for the immediate
 /// operand of a vector shift right operation. The value must be in the range:
 ///   1 <= Value <= ElementBits for a right shift.
-static bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty,
-                         int64_t &Cnt) {
+bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty,
+                  int64_t &Cnt) {
   assert(Ty.isVector() && "vector shift count is not a vector type");
   MachineInstr *MI = MRI.getVRegDef(Reg);
   auto Cst = getAArch64VectorSplatScalar(*MI, MRI);
@@ -525,8 +535,8 @@ static bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty,
 }
 
 /// Match a vector G_ASHR or G_LSHR with a valid immediate shift.
-static bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
-                              int64_t &Imm) {
+bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
+                       int64_t &Imm) {
   assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
          MI.getOpcode() == TargetOpcode::G_LSHR);
   LLT Ty = MRI.getType(MI.getOperand(1).getReg());
@@ -535,8 +545,8 @@ static bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
   return isVShiftRImm(MI.getOperand(2).getReg(), MRI, Ty, Imm);
 }
 
-static void applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
-                              int64_t &Imm) {
+void applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
+                       int64_t &Imm) {
   unsigned Opc = MI.getOpcode();
   assert(Opc == TargetOpcode::G_ASHR || Opc == TargetOpcode::G_LSHR);
   unsigned NewOpc =
@@ -755,7 +765,7 @@ void applyDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
   MI.eraseFromParent();
 }
 
-static bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI) {
+bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI) {
   assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
   auto Splat = getAArch64VectorSplat(MI, MRI);
   if (!Splat)
@@ -769,8 +779,8 @@ static bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI) {
   return (Cst != 0 && Cst != -1);
 }
 
-static void applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                  MachineIRBuilder &B) {
+void applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI,
+                           MachineIRBuilder &B) {
   B.setInstrAndDebugLoc(MI);
   B.buildInstr(AArch64::G_DUP, {MI.getOperand(0).getReg()},
                {MI.getOperand(1).getReg()});
@@ -779,8 +789,7 @@ static void applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI,
 
 /// \returns how many instructions would be saved by folding a G_ICMP's shift
 /// and/or extension operations.
-static unsigned getCmpOperandFoldingProfit(Register CmpOp,
-                                           const MachineRegisterInfo &MRI) {
+unsigned getCmpOperandFoldingProfit(Register CmpOp, MachineRegisterInfo &MRI) {
   // No instructions to save if there's more than one use or no uses.
   if (!MRI.hasOneNonDBGUse(CmpOp))
     return 0;
@@ -834,8 +843,7 @@ static unsigned getCmpOperandFoldingProfit(Register CmpOp,
 
 /// \returns true if it would be profitable to swap the LHS and RHS of a G_ICMP
 /// instruction \p MI.
-static bool trySwapICmpOperands(MachineInstr &MI,
-                                 const MachineRegisterInfo &MRI) {
+bool trySwapICmpOperands(MachineInstr &MI, MachineRegisterInfo &MRI) {
   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
   // Swap the operands if it would introduce a profitable folding opportunity.
   // (e.g. a shift + extend).
@@ -871,8 +879,7 @@ static bool trySwapICmpOperands(MachineInstr &MI,
           getCmpOperandFoldingProfit(TheRHS, MRI));
 }
 
-static void applySwapICmpOperands(MachineInstr &MI,
-                                  GISelChangeObserver &Observer) {
+void applySwapICmpOperands(MachineInstr &MI, GISelChangeObserver &Observer) {
   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
   Register LHS = MI.getOperand(2).getReg();
   Register RHS = MI.getOperand(3).getReg();
@@ -887,7 +894,7 @@ static void applySwapICmpOperands(MachineInstr &MI,
 /// for a condition code \p CC.
 /// \param [in] IsZero - True if the comparison is against 0.
 /// \param [in] NoNans - True if the target has NoNansFPMath.
-static std::function<Register(MachineIRBuilder &)>
+std::function<Register(MachineIRBuilder &)>
 getVectorFCMP(AArch64CC::CondCode CC, Register LHS, Register RHS, bool IsZero,
               bool NoNans, MachineRegisterInfo &MRI) {
   LLT DstTy = MRI.getType(LHS);
@@ -942,8 +949,8 @@ getVectorFCMP(AArch64CC::CondCode CC, Register LHS, Register RHS, bool IsZero,
 }
 
 /// Try to lower a vector G_FCMP \p MI into an AArch64-specific pseudo.
-static bool lowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
-                            MachineIRBuilder &MIB) {
+bool lowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
+                     MachineIRBuilder &MIB) {
   assert(MI.getOpcode() == TargetOpcode::G_FCMP);
   const auto &ST = MI.getMF()->getSubtarget<AArch64Subtarget>();
   Register Dst = MI.getOperand(0).getReg();
@@ -995,11 +1002,11 @@ static bool lowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
     CmpRes = MIB.buildNot(DstTy, CmpRes).getReg(0);
   MRI.replaceRegWith(Dst, CmpRes);
   MI.eraseFromParent();
-  return false;
+  return true;
 }
 
-static bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                Register &SrcReg) {
+bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
+                         Register &SrcReg) {
   assert(MI.getOpcode() == TargetOpcode::G_STORE);
   Register DstReg = MI.getOperand(0).getReg();
   if (MRI.getType(DstReg).isVector())
@@ -1011,10 +1018,9 @@ static bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
   return MRI.getType(SrcReg).getSizeInBits() <= 64;
 }
 
-static void applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                MachineIRBuilder &B,
-                                GISelChangeObserver &Observer,
-                                Register &SrcReg) {
+void applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
+                         MachineIRBuilder &B, GISelChangeObserver &Observer,
+                         Register &SrcReg) {
   assert(MI.getOpcode() == TargetOpcode::G_STORE);
   Observer.changingInstr(MI);
   MI.getOperand(0).setReg(SrcReg);
@@ -1024,40 +1030,74 @@ static void applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
 // Lower vector G_SEXT_INREG back to shifts for selection. We allowed them to
 // form in the first place for combine opportunities, so any remaining ones
 // at this stage need be lowered back.
-static bool matchVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI) {
+bool matchVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI) {
   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
   Register DstReg = MI.getOperand(0).getReg();
   LLT DstTy = MRI.getType(DstReg);
   return DstTy.isVector();
 }
 
-static void applyVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                 MachineIRBuilder &B,
-                                 GISelChangeObserver &Observer) {
+void applyVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI,
+                          MachineIRBuilder &B, GISelChangeObserver &Observer) {
   assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
   B.setInstrAndDebugLoc(MI);
   LegalizerHelper Helper(*MI.getMF(), Observer, B);
   Helper.lower(MI, 0, /* Unused hint type */ LLT());
 }
 
-#define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
+class AArch64PostLegalizerLoweringImpl : public GIMatchTableExecutor {
+protected:
+  CombinerHelper &Helper;
+  const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
+
+  const AArch64Subtarget &STI;
+  GISelChangeObserver &Observer;
+  MachineIRBuilder &B;
+  MachineFunction &MF;
+
+  MachineRegisterInfo &MRI;
+
+public:
+  AArch64PostLegalizerLoweringImpl(
+      const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
+      const AArch64Subtarget &STI, GISelChangeObserver &Observer,
+      MachineIRBuilder &B, CombinerHelper &Helper);
+
+  static const char *getName() { return "AArch6400PreLegalizerCombiner"; }
+
+  bool tryCombineAll(MachineInstr &I) const;
+
+private:
+#define GET_GICOMBINER_CLASS_MEMBERS
 #include "AArch64GenPostLegalizeGILowering.inc"
-#undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
+#undef GET_GICOMBINER_CLASS_MEMBERS
+};
 
-namespace {
-#define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
+#define GET_GICOMBINER_IMPL
 #include "AArch64GenPostLegalizeGILowering.inc"
-#undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_H
+#undef GET_GICOMBINER_IMPL
+
+AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
+    const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
+    const AArch64Subtarget &STI, GISelChangeObserver &Observer,
+    MachineIRBuilder &B, CombinerHelper &Helper)
+    : Helper(Helper), RuleConfig(RuleConfig), STI(STI), Observer(Observer),
+      B(B), MF(B.getMF()), MRI(*B.getMRI()),
+#define GET_GICOMBINER_CONSTRUCTOR_INITS
+#include "AArch64GenPostLegalizeGILowering.inc"
+#undef GET_GICOMBINER_CONSTRUCTOR_INITS
+{
+}
 
 class AArch64PostLegalizerLoweringInfo : public CombinerInfo {
 public:
-  AArch64GenPostLegalizerLoweringHelperRuleConfig GeneratedRuleCfg;
+  AArch64PostLegalizerLoweringImplRuleConfig RuleConfig;
 
   AArch64PostLegalizerLoweringInfo(bool OptSize, bool MinSize)
       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
                      /*LegalizerInfo*/ nullptr, /*OptEnabled = */ true, OptSize,
                      MinSize) {
-    if (!GeneratedRuleCfg.parseCommandLineOption())
+    if (!RuleConfig.parseCommandLineOption())
       report_fatal_error("Invalid rule identifier");
   }
 
@@ -1068,15 +1108,12 @@ class AArch64PostLegalizerLoweringInfo : public CombinerInfo {
 bool AArch64PostLegalizerLoweringInfo::combine(GISelChangeObserver &Observer,
                                                MachineInstr &MI,
                                                MachineIRBuilder &B) const {
+  const auto &STI = MI.getMF()->getSubtarget<AArch64Subtarget>();
   CombinerHelper Helper(Observer, B, /* IsPreLegalize*/ false);
-  AArch64GenPostLegalizerLoweringHelper Generated(GeneratedRuleCfg);
-  return Generated.tryCombineAll(Observer, MI, B, Helper);
+  AArch64PostLegalizerLoweringImpl Impl(RuleConfig, STI, Observer, B, Helper);
+  Impl.setupMF(*MI.getMF(), Helper.getKnownBits());
+  return Impl.tryCombineAll(MI);
 }
-
-#define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
-#include "AArch64GenPostLegalizeGILowering.inc"
-#undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_CPP
-
 class AArch64PostLegalizerLowering : public MachineFunctionPass {
 public:
   static char ID;

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 070c7deea871fe..a918e9f36e6914 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -17,6 +17,8 @@
 #include "llvm/CodeGen/GlobalISel/Combiner.h"
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
+#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -28,15 +30,24 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/Support/Debug.h"
 
+#define GET_GICOMBINER_DEPS
+#include "AArch64GenPreLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_DEPS
+
 #define DEBUG_TYPE "aarch64-prelegalizer-combiner"
 
 using namespace llvm;
 using namespace MIPatternMatch;
 
+namespace {
+
+#define GET_GICOMBINER_TYPES
+#include "AArch64GenPreLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_TYPES
+
 /// Return true if a G_FCONSTANT instruction is known to be better-represented
 /// as a G_CONSTANT.
-static bool matchFConstantToConstant(MachineInstr &MI,
-                                     MachineRegisterInfo &MRI) {
+bool matchFConstantToConstant(MachineInstr &MI, MachineRegisterInfo &MRI) {
   assert(MI.getOpcode() == TargetOpcode::G_FCONSTANT);
   Register DstReg = MI.getOperand(0).getReg();
   const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
@@ -51,7 +62,7 @@ static bool matchFConstantToConstant(MachineInstr &MI,
 }
 
 /// Change a G_FCONSTANT into a G_CONSTANT.
-static void applyFConstantToConstant(MachineInstr &MI) {
+void applyFConstantToConstant(MachineInstr &MI) {
   assert(MI.getOpcode() == TargetOpcode::G_FCONSTANT);
   MachineIRBuilder MIB(MI);
   const APFloat &ImmValAPF = MI.getOperand(1).getFPImm()->getValueAPF();
@@ -62,8 +73,8 @@ static void applyFConstantToConstant(MachineInstr &MI) {
 /// Try to match a G_ICMP of a G_TRUNC with zero, in which the truncated bits
 /// are sign bits. In this case, we can transform the G_ICMP to directly compare
 /// the wide value with a zero.
-static bool matchICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                    GISelKnownBits *KB, Register &MatchInfo) {
+bool matchICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
+                             GISelKnownBits *KB, Register &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_ICMP && KB);
 
   auto Pred = (CmpInst::Predicate)MI.getOperand(1).getPredicate();
@@ -91,10 +102,9 @@ static bool matchICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
   return true;
 }
 
-static void applyICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                    MachineIRBuilder &Builder,
-                                    GISelChangeObserver &Observer,
-                                    Register &WideReg) {
+void applyICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
+                             MachineIRBuilder &Builder,
+                             GISelChangeObserver &Observer, Register &WideReg) {
   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
 
   LLT WideTy = MRI.getType(WideReg);
@@ -113,8 +123,8 @@ static void applyICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
 /// e.g.
 ///
 /// %g = G_GLOBAL_VALUE @x -> %g = G_GLOBAL_VALUE @x + cst
-static bool matchFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                  std::pair<uint64_t, uint64_t> &MatchInfo) {
+bool matchFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
+                           std::pair<uint64_t, uint64_t> &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
   MachineFunction &MF = *MI.getMF();
   auto &GlobalOp = MI.getOperand(1);
@@ -180,10 +190,9 @@ static bool matchFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
   return true;
 }
 
-static void applyFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                  MachineIRBuilder &B,
-                                  GISelChangeObserver &Observer,
-                                  std::pair<uint64_t, uint64_t> &MatchInfo) {
+void applyFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
+                           MachineIRBuilder &B, GISelChangeObserver &Observer,
+                           std::pair<uint64_t, uint64_t> &MatchInfo) {
   // Change:
   //
   //  %g = G_GLOBAL_VALUE @x
@@ -220,9 +229,8 @@ static void applyFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
       B.buildConstant(LLT::scalar(64), -static_cast<int64_t>(MinOffset)));
 }
 
-static bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
-                               CombinerHelper &Helper,
-                               GISelChangeObserver &Observer) {
+bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
+                        CombinerHelper &Helper, GISelChangeObserver &Observer) {
   // Try simplify G_UADDO with 8 or 16 bit operands to wide G_ADD and TBNZ if
   // result is only used in the no-overflow case. It is restricted to cases
   // where we know that the high-bits of the operands are 0. If there's an
@@ -335,28 +343,54 @@ static bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
   return true;
 }
 
-class AArch64PreLegalizerCombinerHelperState {
+class AArch64PreLegalizerCombinerImpl : public GIMatchTableExecutor {
 protected:
   CombinerHelper &Helper;
+  const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig;
+
+  const AArch64Subtarget &STI;
+  GISelChangeObserver &Observer;
+  MachineIRBuilder &B;
+  MachineFunction &MF;
+
+  MachineRegisterInfo &MRI;
 
 public:
-  AArch64PreLegalizerCombinerHelperState(CombinerHelper &Helper)
-      : Helper(Helper) {}
-};
+  AArch64PreLegalizerCombinerImpl(
+      const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
+      const AArch64Subtarget &STI, GISelChangeObserver &Observer,
+      MachineIRBuilder &B, CombinerHelper &Helper);
+
+  static const char *getName() { return "AArch6400PreLegalizerCombiner"; }
 
-#define AARCH64PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
+  bool tryCombineAll(MachineInstr &I) const;
+
+private:
+#define GET_GICOMBINER_CLASS_MEMBERS
 #include "AArch64GenPreLegalizeGICombiner.inc"
-#undef AARCH64PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
+#undef GET_GICOMBINER_CLASS_MEMBERS
+};
 
-namespace {
-#define AARCH64PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
+#define GET_GICOMBINER_IMPL
 #include "AArch64GenPreLegalizeGICombiner.inc"
-#undef AARCH64PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
+#undef GET_GICOMBINER_IMPL
+
+AArch64PreLegalizerCombinerImpl::AArch64PreLegalizerCombinerImpl(
+    const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
+    const AArch64Subtarget &STI, GISelChangeObserver &Observer,
+    MachineIRBuilder &B, CombinerHelper &Helper)
+    : Helper(Helper), RuleConfig(RuleConfig), STI(STI), Observer(Observer),
+      B(B), MF(B.getMF()), MRI(*B.getMRI()),
+#define GET_GICOMBINER_CONSTRUCTOR_INITS
+#include "AArch64GenPreLegalizeGICombiner.inc"
+#undef GET_GICOMBINER_CONSTRUCTOR_INITS
+{
+}
 
 class AArch64PreLegalizerCombinerInfo : public CombinerInfo {
   GISelKnownBits *KB;
   MachineDominatorTree *MDT;
-  AArch64GenPreLegalizerCombinerHelperRuleConfig GeneratedRuleCfg;
+  AArch64PreLegalizerCombinerImplRuleConfig RuleConfig;
 
 public:
   AArch64PreLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
@@ -364,7 +398,7 @@ class AArch64PreLegalizerCombinerInfo : public CombinerInfo {
       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
                      /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
         KB(KB), MDT(MDT) {
-    if (!GeneratedRuleCfg.parseCommandLineOption())
+    if (!RuleConfig.parseCommandLineOption())
       report_fatal_error("Invalid rule identifier");
   }
 
@@ -375,11 +409,13 @@ class AArch64PreLegalizerCombinerInfo : public CombinerInfo {
 bool AArch64PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
                                               MachineInstr &MI,
                                               MachineIRBuilder &B) const {
-  const auto *LI = MI.getMF()->getSubtarget().getLegalizerInfo();
+  const auto &STI = MI.getMF()->getSubtarget<AArch64Subtarget>();
+  const auto *LI = STI.getLegalizerInfo();
   CombinerHelper Helper(Observer, B, /* IsPreLegalize*/ true, KB, MDT, LI);
-  AArch64GenPreLegalizerCombinerHelper Generated(GeneratedRuleCfg, Helper);
+  AArch64PreLegalizerCombinerImpl Impl(RuleConfig, STI, Observer, B, Helper);
+  Impl.setupMF(*MI.getMF(), KB);
 
-  if (Generated.tryCombineAll(Observer, MI, B))
+  if (Impl.tryCombineAll(MI))
     return true;
 
   unsigned Opc = MI.getOpcode();
@@ -410,10 +446,6 @@ bool AArch64PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
   return false;
 }
 
-#define AARCH64PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
-#include "AArch64GenPreLegalizeGICombiner.inc"
-#undef AARCH64PRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
-
 // Pass boilerplate
 // ================
 
@@ -423,7 +455,9 @@ class AArch64PreLegalizerCombiner : public MachineFunctionPass {
 
   AArch64PreLegalizerCombiner();
 
-  StringRef getPassName() const override { return "AArch64PreLegalizerCombiner"; }
+  StringRef getPassName() const override {
+    return "AArch64PreLegalizerCombiner";
+  }
 
   bool runOnMachineFunction(MachineFunction &MF) override;
 
@@ -482,7 +516,6 @@ INITIALIZE_PASS_END(AArch64PreLegalizerCombiner, DEBUG_TYPE,
                     "Combine AArch64 machine instrs before legalization", false,
                     false)
 
-
 namespace llvm {
 FunctionPass *createAArch64PreLegalizerCombiner() {
   return new AArch64PreLegalizerCombiner();

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-or-disjoint-mask.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-or-disjoint-mask.mir
index 71adf3f6f474b3..82f387cddf2a3d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-or-disjoint-mask.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-and-or-disjoint-mask.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 
-# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="and_or_disjoint_mask" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="and_or_disjoint_mask" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
index 4841434458b5df..fb072fbe97c19a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 
-# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="icmp_to_lhs_known_bits" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="icmp_to_lhs_known_bits" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mulo-with-2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mulo-with-2.mir
index 27486b5752d4f3..261201590e5e0b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mulo-with-2.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mulo-with-2.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="mulo_by_2" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="mulo_by_2" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 ...
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
index 4447716f8a69cb..b5a02d670933bc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-select.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown- --aarch64postlegalizercombinerhelper-only-enable-rule="select_to_logical" %s -o - | FileCheck %s
+# RUN: llc -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown- --aarch64postlegalizercombiner-only-enable-rule="select_to_logical" %s -o - | FileCheck %s
 # RUN: llc -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s
 # REQUIRES: asserts
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
index 7d04b4d86bafd7..141e6c4d470387 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-and.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombinerhelper-only-enable-rule="bitfield_extract_from_and" -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombiner-only-enable-rule="bitfield_extract_from_and" -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 # Check that we can combine

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/lower-neon-vector-fcmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/lower-neon-vector-fcmp.mir
index 8a0edbc99e3524..8f01c009c4967a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/lower-neon-vector-fcmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/lower-neon-vector-fcmp.mir
@@ -37,8 +37,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMEQZ:%[0-9]+]]:_(<2 x s64>) = G_FCMEQZ %lhs
     ; CHECK-NEXT: $q0 = COPY [[FCMEQZ]](<2 x s64>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
@@ -84,8 +82,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMGTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMGTZ %lhs
     ; CHECK-NEXT: $q0 = COPY [[FCMGTZ]](<2 x s64>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
@@ -133,8 +129,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMGEZ:%[0-9]+]]:_(<2 x s64>) = G_FCMGEZ %lhs
     ; CHECK-NEXT: $q0 = COPY [[FCMGEZ]](<2 x s64>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
@@ -180,8 +174,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMLTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMLTZ %lhs
     ; CHECK-NEXT: $q0 = COPY [[FCMLTZ]](<2 x s64>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
@@ -226,8 +218,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMLEZ:%[0-9]+]]:_(<2 x s64>) = G_FCMLEZ %lhs
     ; CHECK-NEXT: $q0 = COPY [[FCMLEZ]](<2 x s64>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
@@ -280,8 +270,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMGTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMGTZ %lhs
     ; CHECK-NEXT: [[FCMLTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMLTZ %lhs
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[FCMLTZ]], [[FCMGTZ]]
@@ -339,8 +327,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMGEZ:%[0-9]+]]:_(<2 x s64>) = G_FCMGEZ %lhs
     ; CHECK-NEXT: [[FCMLTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMLTZ %lhs
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[FCMLTZ]], [[FCMGEZ]]
@@ -398,8 +384,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMEQ:%[0-9]+]]:_(<2 x s64>) = G_FCMEQ %lhs, %lhs(<2 x s64>)
     ; CHECK-NEXT: $q0 = COPY [[FCMEQ]](<2 x s64>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
@@ -453,8 +437,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMGEZ:%[0-9]+]]:_(<2 x s64>) = G_FCMGEZ %lhs
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
@@ -511,8 +493,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMGTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMGTZ %lhs
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
@@ -569,8 +549,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMLEZ:%[0-9]+]]:_(<2 x s64>) = G_FCMLEZ %lhs
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
@@ -627,8 +605,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMLTZ:%[0-9]+]]:_(<2 x s64>) = G_FCMLTZ %lhs
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
@@ -685,8 +661,6 @@ body:             |
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK-NEXT: %zero:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
     ; CHECK-NEXT: [[FCMEQZ:%[0-9]+]]:_(<2 x s64>) = G_FCMEQZ %lhs
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and-postlegalize.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and-postlegalize.mir
index 602084ec8d8b85..831212d6cb0149 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and-postlegalize.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and-postlegalize.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -debugify-and-strip-all-safe -mtriple arm64-apple-ios -O0 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombinerhelper-only-enable-rule="overlapping_and" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -debugify-and-strip-all-safe -mtriple arm64-apple-ios -O0 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombiner-only-enable-rule="overlapping_and" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 # Test running the overlapping_and combine post-legalization.

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
index cedd6f0fac69b8..c5a7e231b0737d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -debugify-and-strip-all-safe -mtriple arm64-apple-ios -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="overlapping_and" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -debugify-and-strip-all-safe -mtriple arm64-apple-ios -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="overlapping_and" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 ---
 name:            bitmask_overlap1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
index 53338f9f419d0e..a022a287b4633e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64-apple-ios  -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombinerhelper-only-enable-rule="ptr_add_immed_chain"  %s -o - -verify-machineinstrs | FileCheck %s
+# RUN: llc -mtriple aarch64-apple-ios  -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombiner-only-enable-rule="ptr_add_immed_chain"  %s -o - -verify-machineinstrs | FileCheck %s
 # REQUIRES: asserts
 
 # Check that we fold two adds of constant offsets with G_PTR_ADD into a single G_PTR_ADD.

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-anyext-to-zext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-anyext-to-zext.mir
index 1b3b3408cb20e1..24baa2fc13317c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-anyext-to-zext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-anyext-to-zext.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombinerhelper-only-enable-rule="mutate_anyext_to_zext" -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombiner-only-enable-rule="mutate_anyext_to_zext" -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
index 47ec12aefc1029..567d5f2deb8c5d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering --aarch64postlegalizerloweringhelper-only-enable-rule="shuf_to_ins" -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering --aarch64postlegalizerlowering-only-enable-rule="shuf_to_ins" -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 # Check that we can recognize an ins mask for a shuffle vector.

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
index 6a130a2192ab46..98a9cb7624edc6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombinerhelper-only-enable-rule="rotate_out_of_range" -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner --aarch64postlegalizercombiner-only-enable-rule="rotate_out_of_range" -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 # Check that we simplify the constant rotate amount to be in range.

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
index 0a6b85bd45ddf2..94f56e5650b22f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-addo-zero.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="addo_by_0" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="addo_by_0" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 # (G_*ADDO x, 0) -> x + no carry

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
index ff543b92949c91..27d5ad126fd233 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="icmp_to_true_false_known_bits" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="icmp_to_true_false_known_bits" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 --- |

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-and-mask.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-and-mask.mir
index 10a82060ebf61c..a7809f276c53f9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-and-mask.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-and-mask.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -aarch64prelegalizercombinerhelper-only-enable-rule="load_and_mask" -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -aarch64prelegalizercombiner-only-enable-rule="load_and_mask" -verify-machineinstrs %s -o - | FileCheck %s
 
 # REQUIRES: asserts
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
index bd5719ab3d28ed..a190a12dcd780b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=NOT_STRICT
-# RUN: llc -debugify-and-strip-all-safe -mattr=+strict-align -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=STRICT
+# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=NOT_STRICT
+# RUN: llc -debugify-and-strip-all-safe -mattr=+strict-align -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=STRICT
 
 # REQUIRES: asserts
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
index 428def14dc78e3..88d214e43c82e5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LITTLE
-# RUN: llc -debugify-and-strip-all-safe -mtriple arm64eb -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=BIG
+# RUN: llc -debugify-and-strip-all-safe -mtriple aarch64 -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LITTLE
+# RUN: llc -debugify-and-strip-all-safe -mtriple arm64eb -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="load_or_combine" -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=BIG
 
 # REQUIRES: asserts
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-mulo-zero.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-mulo-zero.mir
index a6c617b993a369..34219c374f6edc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-mulo-zero.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-mulo-zero.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="mulo_by_0" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple aarch64 -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="mulo_by_0" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 # REQUIRES: asserts
 
 # (G_*MULO x, 0) -> 0 + no carry out

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir
index 0647de44c4b812..111f866c47746e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -debugify-and-strip-all-safe -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="opt_brcond_by_inverting_cond" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -debugify-and-strip-all-safe -O0 -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="opt_brcond_by_inverting_cond" -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 
 # Need asserts for the only-enable-rule to work.
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
index 5604bc81022f07..e511282eb7b7ca 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
@@ -2,14 +2,14 @@
 # RUN: llc -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - \
 # RUN:                                                                | FileCheck --check-prefix=ENABLED %s
 # RUN: llc -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - \
-# RUN:     --aarch64prelegalizercombinerhelper-disable-rule=copy_prop | FileCheck --check-prefix=DISABLED %s
+# RUN:     --aarch64prelegalizercombiner-disable-rule=copy_prop | FileCheck --check-prefix=DISABLED %s
 # RUN: llc -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - \
-# RUN:     --aarch64prelegalizercombinerhelper-disable-rule="*" | FileCheck --check-prefix=DISABLED %s
+# RUN:     --aarch64prelegalizercombiner-disable-rule="*" | FileCheck --check-prefix=DISABLED %s
 # RUN: llc -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - \
-# RUN:     --aarch64prelegalizercombinerhelper-disable-rule="*,!copy_prop" \
+# RUN:     --aarch64prelegalizercombiner-disable-rule="*,!copy_prop" \
 # RUN:    | FileCheck --check-prefix=ENABLED %s
 # RUN: llc -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - \
-# RUN:     --aarch64prelegalizercombinerhelper-only-enable-rule="copy_prop" \
+# RUN:     --aarch64prelegalizercombiner-only-enable-rule="copy_prop" \
 # RUN:    | FileCheck --check-prefix=ENABLED %s
 
 # REQUIRES: asserts

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
index 93f8e4284cd4cb..6fc5dc461cf273 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple aarch64-apple-ios  -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="not_cmp_fold" %s -o - -verify-machineinstrs | FileCheck %s
+# RUN: llc -mtriple aarch64-apple-ios  -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombiner-only-enable-rule="not_cmp_fold" %s -o - -verify-machineinstrs | FileCheck %s
 
 # Need asserts for the only-enable-rule to work.
 


        


More information about the llvm-commits mailing list