[llvm] [ARM][TableGen][MC] Change the ARM mnemonic operands to be optional for ASM parsing (PR #83436)

Alfie Richards via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 1 08:04:04 PST 2024


https://github.com/AlfieRichardsArm updated https://github.com/llvm/llvm-project/pull/83436

>From f03c485a73faa1cf05f9858335bec312deac5f02 Mon Sep 17 00:00:00 2001
From: Alfie Richards <alfie.richards at arm.com>
Date: Fri, 1 Mar 2024 14:42:43 +0000
Subject: [PATCH 1/3] [TabeGen] Add `PreferSmallerInstructions` for Targets.

This option means that in assembly matching instructions with smaller
encodings will be preferred.

This will be used for the ARM instruciton set where this is the correct
behaviour after some other refactoring.
---
 llvm/include/llvm/Target/Target.td        |  5 +++
 llvm/utils/TableGen/AsmMatcherEmitter.cpp | 37 +++++++++++++++++------
 llvm/utils/TableGen/CodeGenTarget.cpp     |  4 +++
 llvm/utils/TableGen/CodeGenTarget.h       |  5 +++
 4 files changed, 41 insertions(+), 10 deletions(-)

diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index 0d97a47190b196..929a3f799d3a8d 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -1724,6 +1724,11 @@ class Target {
   // setting hasExtraDefRegAllocReq and hasExtraSrcRegAllocReq to 1
   // for all opcodes if this flag is set to 0.
   int AllowRegisterRenaming = 0;
+
+  // PreferSmallerInstructions = Should the assembly matcher prefer the smaller
+  // instructions. 1 if the instruction set should sort by size,
+  // 0 otherwise.
+  bit PreferSmallerInstructions = 0;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index febd96086df27b..1563349f2fae53 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -502,6 +502,9 @@ struct MatchableInfo {
   /// matchable came from.
   Record *const TheDef;
 
+  // ResInstSize - The size of the resulting instruction for this matchable.
+  unsigned ResInstSize;
+
   /// DefRec - This is the definition that it came from.
   PointerUnion<const CodeGenInstruction *, const CodeGenInstAlias *> DefRec;
 
@@ -543,10 +546,12 @@ struct MatchableInfo {
 
   MatchableInfo(const CodeGenInstruction &CGI)
       : AsmVariantID(0), AsmString(CGI.AsmString), TheDef(CGI.TheDef),
-        DefRec(&CGI), UseInstAsmMatchConverter(true) {}
+        ResInstSize(TheDef->getValueAsInt("Size")), DefRec(&CGI),
+        UseInstAsmMatchConverter(true) {}
 
   MatchableInfo(std::unique_ptr<const CodeGenInstAlias> Alias)
       : AsmVariantID(0), AsmString(Alias->AsmString), TheDef(Alias->TheDef),
+        ResInstSize(Alias->ResultInst->TheDef->getValueAsInt("Size")),
         DefRec(Alias.release()), UseInstAsmMatchConverter(TheDef->getValueAsBit(
                                      "UseInstAsmMatchConverter")) {}
 
@@ -608,12 +613,17 @@ struct MatchableInfo {
   void buildInstructionResultOperands();
   void buildAliasResultOperands(bool AliasConstraintsAreChecked);
 
-  /// operator< - Compare two matchables.
-  bool operator<(const MatchableInfo &RHS) const {
+  /// shouldBeMatchedBefore - Compare two matchables for ordering.
+  bool shouldBeMatchedBefore(const MatchableInfo &RHS, const CodeGenTarget &Target) const {
     // The primary comparator is the instruction mnemonic.
     if (int Cmp = Mnemonic.compare_insensitive(RHS.Mnemonic))
       return Cmp == -1;
 
+    // Sort by the resultant instuctions size, eg. for ARM instructions
+    // we must choose the smallest matching instruction.
+    if (Target.getPreferSmallerInstructions() && ResInstSize != RHS.ResInstSize)
+      return ResInstSize < RHS.ResInstSize;
+
     if (AsmOperands.size() != RHS.AsmOperands.size())
       return AsmOperands.size() < RHS.AsmOperands.size();
 
@@ -652,7 +662,8 @@ struct MatchableInfo {
   /// couldMatchAmbiguouslyWith - Check whether this matchable could
   /// ambiguously match the same set of operands as \p RHS (without being a
   /// strictly superior match).
-  bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) const {
+  bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS,
+                                 const CodeGenTarget &Target) const {
     // The primary comparator is the instruction mnemonic.
     if (Mnemonic != RHS.Mnemonic)
       return false;
@@ -661,6 +672,11 @@ struct MatchableInfo {
     if (AsmVariantID != RHS.AsmVariantID)
       return false;
 
+    // Sort by the resultant instuctions size, eg. for ARM instructions
+    // we must choose the smallest matching instruction.
+    if (Target.getPreferSmallerInstructions() && ResInstSize != RHS.ResInstSize)
+      return false;
+
     // The number of operands is unambiguous.
     if (AsmOperands.size() != RHS.AsmOperands.size())
       return false;
@@ -3224,17 +3240,18 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
   // Sort the instruction table using the partial order on classes. We use
   // stable_sort to ensure that ambiguous instructions are still
   // deterministically ordered.
-  llvm::stable_sort(
-      Info.Matchables,
-      [](const std::unique_ptr<MatchableInfo> &a,
-         const std::unique_ptr<MatchableInfo> &b) { return *a < *b; });
+  llvm::stable_sort(Info.Matchables,
+                    [&Target](const std::unique_ptr<MatchableInfo> &a,
+                              const std::unique_ptr<MatchableInfo> &b) {
+                      return a->shouldBeMatchedBefore(*b, Target);
+                    });
 
 #ifdef EXPENSIVE_CHECKS
   // Verify that the table is sorted and operator < works transitively.
   for (auto I = Info.Matchables.begin(), E = Info.Matchables.end(); I != E;
        ++I) {
     for (auto J = I; J != E; ++J) {
-      assert(!(**J < **I));
+      assert(!((*J)->shouldBeMatchedBefore(**I, Target)));
     }
   }
 #endif
@@ -3253,7 +3270,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
         const MatchableInfo &A = **I;
         const MatchableInfo &B = **J;
 
-        if (A.couldMatchAmbiguouslyWith(B)) {
+        if (A.couldMatchAmbiguouslyWith(B, Target)) {
           errs() << "warning: ambiguous matchables:\n";
           A.dump();
           errs() << "\nis incomparable with:\n";
diff --git a/llvm/utils/TableGen/CodeGenTarget.cpp b/llvm/utils/TableGen/CodeGenTarget.cpp
index 980c9bdb6367f7..9dea19188b6f0e 100644
--- a/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -332,6 +332,10 @@ bool CodeGenTarget::getAllowRegisterRenaming() const {
   return TargetRec->getValueAsInt("AllowRegisterRenaming");
 }
 
+bool CodeGenTarget::getPreferSmallerInstructions() const {
+  return TargetRec->getValueAsBit("PreferSmallerInstructions");
+}
+
 /// getAsmParser - Return the AssemblyParser definition for this target.
 ///
 Record *CodeGenTarget::getAsmParser() const {
diff --git a/llvm/utils/TableGen/CodeGenTarget.h b/llvm/utils/TableGen/CodeGenTarget.h
index 2ae3a3a2204dd0..7e2dd43cbf1387 100644
--- a/llvm/utils/TableGen/CodeGenTarget.h
+++ b/llvm/utils/TableGen/CodeGenTarget.h
@@ -99,6 +99,11 @@ class CodeGenTarget {
   ///
   bool getAllowRegisterRenaming() const;
 
+  /// getPreferSmallerInstructions  - Return the PreferSmallerInstructions 
+  /// flag value for this target.
+  ///
+  bool getPreferSmallerInstructions() const;
+
   /// getAsmParser - Return the AssemblyParser definition for this target.
   ///
   Record *getAsmParser() const;

>From 083ff708eeb2da6dee8454a39e771bb36e46b529 Mon Sep 17 00:00:00 2001
From: Alfie Richards <alfie.richards at arm.com>
Date: Fri, 1 Mar 2024 15:30:40 +0000
Subject: [PATCH 2/3] [TableGen] Bug fix for tied optional operands resolution

---
 llvm/utils/TableGen/AsmMatcherEmitter.cpp | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 1563349f2fae53..3f18c3936a38bc 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -2002,9 +2002,9 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
   }
   CvtOS << "  assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
   CvtOS << "  const uint8_t *Converter = ConversionTable[Kind];\n";
-  CvtOS << "  unsigned OpIdx;\n";
   CvtOS << "  Inst.setOpcode(Opcode);\n";
   CvtOS << "  for (const uint8_t *p = Converter; *p; p += 2) {\n";
+  CvtOS << "    unsigned OpIdx;\n";
   if (HasOptionalOperands) {
     // When optional operands are involved, formal and actual operand indices
     // may differ. Map the former to the latter by subtracting the number of
@@ -2015,16 +2015,17 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
   }
   CvtOS << "    switch (*p) {\n";
   CvtOS << "    default: llvm_unreachable(\"invalid conversion entry!\");\n";
-  CvtOS << "    case CVT_Reg:\n";
+  CvtOS << "    case CVT_Reg:{\n";
   CvtOS << "      static_cast<" << TargetOperandClass
         << " &>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n";
   CvtOS << "      break;\n";
+  CvtOS << "    }\n";
   CvtOS << "    case CVT_Tied: {\n";
-  CvtOS << "      assert(OpIdx < (size_t)(std::end(TiedAsmOperandTable) -\n";
+  CvtOS << "      assert(*(p + 1) < (size_t)(std::end(TiedAsmOperandTable) -\n";
   CvtOS
       << "                              std::begin(TiedAsmOperandTable)) &&\n";
   CvtOS << "             \"Tied operand not found\");\n";
-  CvtOS << "      unsigned TiedResOpnd = TiedAsmOperandTable[OpIdx][0];\n";
+  CvtOS << "      unsigned TiedResOpnd = TiedAsmOperandTable[*(p + 1)][0];\n";
   CvtOS << "      if (TiedResOpnd != (uint8_t)-1)\n";
   CvtOS << "        Inst.addOperand(Inst.getOperand(TiedResOpnd));\n";
   CvtOS << "      break;\n";

>From 99197384924a957fc57eb66368aab97b0c3352c2 Mon Sep 17 00:00:00 2001
From: Alfie Richards <alfie.richards at arm.com>
Date: Fri, 1 Mar 2024 14:46:05 +0000
Subject: [PATCH 3/3] [ARM][TableGen][MC] Change the ARM mnemonic operands to
 be optional for ASM parsing

This changs the way the assembly matcher works for Aarch32 parsing.
Previously there  was a pile of hacks which dictated whether the CC
and CCOut operands should be present which de-facto chose if the
wide/narrow (or thumb1/thumb2/arm) instruction version were chosen.

This meant much of the TableGen machinery present for the assembly
matching was effectively being bypassed and worked around.

This patch makes the CC and CCOut operands optional which allows
the ASM matcher operate as it was designed and means we can avoid
doing some of the hacks done previously. This also adds the option
for the target to allow the prioritizing the smaller instruction
encodings as is required for Aarch32.
---
 llvm/include/llvm/Target/Target.td            |   11 +-
 llvm/lib/Target/ARM/ARM.td                    |    1 +
 llvm/lib/Target/ARM/ARMInstrFormats.td        |   17 +-
 .../lib/Target/ARM/AsmParser/ARMAsmParser.cpp | 1153 +++++++++--------
 llvm/test/MC/ARM/arm-branch-errors.s          |    4 +-
 llvm/test/MC/ARM/arm-reg-addr-errors.s        |   16 +-
 llvm/test/MC/ARM/arm11-hint-instr.s           |    8 +-
 llvm/test/MC/ARM/cps.s                        |    2 +-
 llvm/test/MC/ARM/d16.s                        |    2 +-
 .../MC/ARM/directive-arch_extension-crypto.s  |   15 +-
 llvm/test/MC/ARM/lsl-zero-errors.s            |    8 +-
 llvm/test/MC/ARM/mve-misc.s                   |   55 +-
 llvm/test/MC/ARM/negative-immediates-fail.s   |   10 +-
 llvm/test/MC/ARM/not-armv4.s                  |   14 +-
 llvm/test/MC/ARM/register-token-source-loc.s  |    3 +
 llvm/test/MC/ARM/tMOVSr.s                     |    5 +-
 llvm/test/MC/ARM/thumb-diagnostics.s          |   33 +-
 llvm/test/MC/ARM/thumb-mov.s                  |   16 +-
 llvm/test/MC/ARM/thumb2-diagnostics.s         |    4 +-
 llvm/utils/TableGen/AsmMatcherEmitter.cpp     |   24 +-
 20 files changed, 798 insertions(+), 603 deletions(-)

diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index 929a3f799d3a8d..dea560eab44ffc 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -948,9 +948,18 @@ class AsmOperandClass {
   /// error will be suppressed if all of the remaining unmatched operands are
   /// marked as IsOptional.
   ///
-  /// Optional arguments must be at the end of the operand list.
+  /// Note: Optional arguments have caveats if they are not at the end of this list
+  /// when regarding custom operand parsing. See below
   bit IsOptional = false;
 
+  // Fixme: Ideally this would not be necessary however this would involve interleaving the
+  // parsing and matching processes.
+  /// Set to 1 if the parser should assume this operand will always be present
+  /// for the sake of calculating the operand index in regards to which custom operand
+  /// parser should be used.
+  /// This is only used for custom operands that are not at the end of the instruction.
+  bit OptionalShouldOffsetCustomParsers = true;
+
   /// The name of the method on the target specific asm parser that returns the
   /// default operand for this optional operand. This method is only used if
   /// IsOptional == 1. If not set, this will default to "defaultFooOperands",
diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td
index b62e1a032631fd..7472970b7f283b 100644
--- a/llvm/lib/Target/ARM/ARM.td
+++ b/llvm/lib/Target/ARM/ARM.td
@@ -1747,4 +1747,5 @@ def ARM : Target {
   let AssemblyParsers = [ARMAsmParser];
   let AssemblyParserVariants = [ARMAsmParserVariant];
   let AllowRegisterRenaming = 1;
+  let PreferSmallerInstructions= true;
 }
diff --git a/llvm/lib/Target/ARM/ARMInstrFormats.td b/llvm/lib/Target/ARM/ARMInstrFormats.td
index 14e315534570d2..6e0466f1bc11bd 100644
--- a/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -155,7 +155,13 @@ def iflags_op : Operand<i32> {
 
 // ARM Predicate operand. Default to 14 = always (AL). Second part is CC
 // register whose default is 0 (no register).
-def CondCodeOperand : AsmOperandClass { let Name = "CondCode"; }
+def CondCodeOperand : AsmOperandClass {
+  let Name = "CondCode";
+  let PredicateMethod = "isCondCode";
+  let DefaultMethod = "defaultCondCodeOp";
+  let IsOptional = true;
+  let OptionalShouldOffsetCustomParsers = false;
+}
 def pred : PredicateOperand<OtherVT, (ops i32imm, i32imm),
                                      (ops (i32 14), (i32 zero_reg))> {
   let PrintMethod = "printPredicateOperand";
@@ -174,7 +180,12 @@ def cmovpred : Operand<i32>, PredicateOp,
 }
 
 // Conditional code result for instructions whose 's' bit is set, e.g. subs.
-def CCOutOperand : AsmOperandClass { let Name = "CCOut"; }
+def CCOutOperand : AsmOperandClass {
+  let Name = "CCOut";
+  let DefaultMethod = "defaultCCOutOp";
+  let IsOptional = true;
+  let OptionalShouldOffsetCustomParsers = false;
+}
 def cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 zero_reg))> {
   let EncoderMethod = "getCCOutOpValue";
   let PrintMethod = "printSBitModifierOperand";
@@ -468,7 +479,7 @@ class InstThumb<AddrMode am, int sz, IndexMode im,
 // These are aliases that require C++ handling to convert to the target
 // instruction, while InstAliases can be handled directly by tblgen.
 class AsmPseudoInst<string asm, dag iops, dag oops = (outs)>
-  : InstTemplate<AddrModeNone, 0, IndexModeNone, Pseudo, GenericDomain,
+  : InstTemplate<AddrModeNone, 4, IndexModeNone, Pseudo, GenericDomain,
                  "", NoItinerary> {
   let OutOperandList = oops;
   let InOperandList = iops;
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index efec163c6ed634..068150929898ea 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -18,6 +18,7 @@
 #include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringMap.h"
@@ -47,6 +48,7 @@
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/SMLoc.h"
@@ -79,6 +81,7 @@ extern const ARMInstrTable ARMDescs;
 } // end namespace llvm
 
 namespace {
+class ARMOperand;
 
 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
 
@@ -446,9 +449,10 @@ class ARMAsmParser : public MCTargetAsmParser {
   }
 
   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
-                           unsigned ListNo, bool IsARPop = false);
+                           unsigned MnemonicOpsEndInd, unsigned ListIndex,
+                           bool IsARPop = false);
   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
-                           unsigned ListNo);
+                           unsigned MnemonicOpsEndInd, unsigned ListIndex);
 
   int tryParseRegister();
   bool tryParseRegisterWithWriteBack(OperandVector &);
@@ -504,6 +508,9 @@ class ARMAsmParser : public MCTargetAsmParser {
   bool parseDirectiveSEHEpilogEnd(SMLoc L);
   bool parseDirectiveSEHCustom(SMLoc L);
 
+  std::unique_ptr<ARMOperand> defaultCondCodeOp();
+  std::unique_ptr<ARMOperand> defaultCCOutOp();
+
   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
                           ARMCC::CondCodes &PredicationCode,
@@ -516,9 +523,13 @@ class ARMAsmParser : public MCTargetAsmParser {
                              bool &CanAcceptVPTPredicationCode);
   bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
 
-  void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
-                                     OperandVector &Operands);
-  bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
+  void tryConvertingToTwoOperandForm(StringRef Mnemonic,
+                                     ARMCC::CondCodes PredicationCode,
+                                     bool CarrySetting, OperandVector &Operands,
+                                     unsigned MnemonicOpsEndInd);
+
+  bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands,
+                                unsigned MnemonicOpsEndInd);
 
   bool isThumb() const {
     // FIXME: Can tablegen auto-generate this?
@@ -658,15 +669,20 @@ class ARMAsmParser : public MCTargetAsmParser {
   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
   void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
 
-  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
-  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
-  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
-  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
-  bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
+  bool validateInstruction(MCInst &Inst, const OperandVector &Ops,
+                           unsigned MnemonicOpsEndInd);
+  bool processInstruction(MCInst &Inst, const OperandVector &Ops,
+                          unsigned MnemonicOpsEndInd, MCStreamer &Out);
+  bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
+                                        OperandVector &Operands,
+                                        unsigned MnemonicOpsEndInd);
   bool isITBlockTerminator(MCInst &Inst) const;
-  void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
-  bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
-                        bool Load, bool ARMMode, bool Writeback);
+
+  void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands,
+                         unsigned MnemonicOpsEndInd);
+  bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, bool Load,
+                        bool ARMMode, bool Writeback,
+                        unsigned MnemonicOpsEndInd);
 
 public:
   enum ARMMatchResultTy {
@@ -676,6 +692,7 @@ class ARMAsmParser : public MCTargetAsmParser {
     Match_RequiresThumb2,
     Match_RequiresV8,
     Match_RequiresFlagSetting,
+    Match_RequiresDestinationRegisterMatchASourceRegister,
 #define GET_OPERAND_DIAGNOSTIC_TYPES
 #include "ARMGenAsmMatcher.inc"
 
@@ -715,6 +732,9 @@ class ARMAsmParser : public MCTargetAsmParser {
   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
                                       unsigned Kind) override;
   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
+  unsigned
+  checkEarlyTargetMatchPredicate(MCInst &Inst,
+                                 const OperandVector &Operands) override;
 
   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
                                OperandVector &Operands, MCStreamer &Out,
@@ -4053,6 +4073,63 @@ static MCRegister MatchRegisterName(StringRef Name);
 
 /// }
 
+static const DenseSet<StringRef> DataTypes{
+    ".8",  ".16",  ".32",  ".64",  ".i8", ".i16", ".i32", ".i64",
+    ".u8", ".u16", ".u32", ".u64", ".s8", ".s16", ".s32", ".s64",
+    ".p8", ".p16", ".f32", ".f64", ".f",  ".d"};
+static bool isDataTypeToken(StringRef Tok) { return DataTypes.contains(Tok); }
+
+static unsigned getMnemonicOpsEndInd(const OperandVector &Operands) {
+  unsigned MnemonicOpsEndInd = 1;
+  // Special case for CPS which has a Mnemonic side token for possibly storing
+  // ie/id variant
+  if (Operands[0]->isToken() &&
+      static_cast<ARMOperand &>(*Operands[0]).getToken() == "cps") {
+    if (Operands.size() > 1 && Operands[1]->isImm() &&
+        static_cast<ARMOperand &>(*Operands[1]).getImm()->getKind() ==
+            llvm::MCExpr::Constant &&
+        (dyn_cast<MCConstantExpr>(
+             static_cast<ARMOperand &>(*Operands[1]).getImm())
+                 ->getValue() == ARM_PROC::IE ||
+         dyn_cast<MCConstantExpr>(
+             static_cast<ARMOperand &>(*Operands[1]).getImm())
+                 ->getValue() == ARM_PROC::ID))
+      ++MnemonicOpsEndInd;
+  }
+
+  // In some circumstances the code code moves to the right
+  bool RHSCondCode = false;
+  while (MnemonicOpsEndInd < Operands.size()) {
+    auto Op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
+    // Special case for it instructions which have a condition code on the RHS
+    if (Op.isITMask()) {
+      RHSCondCode = true;
+      MnemonicOpsEndInd++;
+    }
+    // Special case for it instructions which have a condition code on the RHS
+    else if (Op.isToken() &&
+             (
+                 // There are several special cases not covered by
+                 // isDataTypeToken
+                 Op.getToken() == ".w" || Op.getToken() == ".bf16" ||
+                 Op.getToken() == ".p64" || Op.getToken() == ".f16" ||
+                 isDataTypeToken(Op.getToken()))) {
+      // In the mnemonic operators the cond code must always precede the data
+      // type. So we can now safely assume any subsequent cond code is on the
+      // RHS. As is the cdase for VCMP and VPT.
+      RHSCondCode = true;
+      MnemonicOpsEndInd++;
+    }
+    // Skip all mnemonic operator types
+    else if (Op.isCCOut() || (Op.isCondCode() && !RHSCondCode) ||
+             Op.isVPTPred() || (Op.isToken() && Op.getToken() == ".w"))
+      MnemonicOpsEndInd++;
+    else
+      break;
+  }
+  return MnemonicOpsEndInd;
+}
+
 bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
                                  SMLoc &EndLoc) {
   const AsmToken &Tok = getParser().getTok();
@@ -5598,37 +5675,86 @@ ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
   return ParseStatus::Success;
 }
 
+// Finds the index of the first CondCode operator, if there is none returns 0
+unsigned findCondCodeInd(const OperandVector &Operands,
+                         unsigned MnemonicOpsEndInd) {
+  for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
+    auto Op = static_cast<ARMOperand &>(*Operands[I]);
+    if (Op.isCondCode())
+      return I;
+  }
+  return 0;
+}
+
+unsigned findCCOutInd(const OperandVector &Operands,
+                      unsigned MnemonicOpsEndInd) {
+  for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
+    auto Op = static_cast<ARMOperand &>(*Operands[I]);
+    if (Op.isCCOut())
+      return I;
+  }
+  return 0;
+}
+
 /// Convert parsed operands to MCInst.  Needed here because this instruction
 /// only has two register operands, but multiplication is commutative so
 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
                                     const OperandVector &Operands) {
-  ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
-  ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
-  // If we have a three-operand form, make sure to set Rn to be the operand
-  // that isn't the same as Rd.
-  unsigned RegOp = 4;
-  if (Operands.size() == 6 &&
-      ((ARMOperand &)*Operands[4]).getReg() ==
-          ((ARMOperand &)*Operands[3]).getReg())
-    RegOp = 5;
-  ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
-  Inst.addOperand(Inst.getOperand(0));
-  ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
+  unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
+  unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
+  unsigned CondOutI = findCCOutInd(Operands, MnemonicOpsEndInd);
+
+  // 2 operand form
+  unsigned RegRd = MnemonicOpsEndInd;
+  unsigned RegRn = MnemonicOpsEndInd + 1;
+  unsigned RegRm = MnemonicOpsEndInd;
+
+  if (Operands.size() == MnemonicOpsEndInd + 3) {
+    // If we have a three-operand form, make sure to set Rn to be the operand
+    // that isn't the same as Rd.
+    if (((ARMOperand &)*Operands[RegRd]).getReg() ==
+        ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).getReg()) {
+      RegRn = MnemonicOpsEndInd + 2;
+      RegRm = MnemonicOpsEndInd + 1;
+    } else {
+      RegRn = MnemonicOpsEndInd + 1;
+      RegRm = MnemonicOpsEndInd + 2;
+    }
+  }
+
+  // Rd
+  ((ARMOperand &)*Operands[RegRd]).addRegOperands(Inst, 1);
+  // CCOut
+  if (CondOutI != 0) {
+    ((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, 1);
+  } else {
+    ARMOperand Op = *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc());
+    Op.addCCOutOperands(Inst, 1);
+  }
+  // Rn
+  ((ARMOperand &)*Operands[RegRn]).addRegOperands(Inst, 1);
+  // Rm
+  ((ARMOperand &)*Operands[RegRm]).addRegOperands(Inst, 1);
+
+  // Cond code
+  if (CondI != 0) {
+    ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
+  } else {
+    ARMOperand Op =
+        *ARMOperand::CreateCondCode(llvm::ARMCC::AL, Operands[0]->getEndLoc());
+    Op.addCondCodeOperands(Inst, 2);
+  }
 }
 
 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
                                     const OperandVector &Operands) {
-  int CondOp = -1, ImmOp = -1;
-  switch(Inst.getOpcode()) {
-    case ARM::tB:
-    case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
-
-    case ARM::t2B:
-    case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
+  unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
+  unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
+  unsigned Cond =
+      (CondI == 0 ? ARMCC::AL
+                  : static_cast<ARMOperand &>(*Operands[CondI]).getCondCode());
 
-    default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
-  }
   // first decide whether or not the branch should be conditional
   // by looking at it's location relative to an IT block
   if(inITBlock()) {
@@ -5639,9 +5765,6 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
     }
   } else {
-    // outside IT blocks we can only have unconditional branches with AL
-    // condition code or conditional branches with non-AL condition code
-    unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
     switch(Inst.getOpcode()) {
       case ARM::tB:
       case ARM::tBcc:
@@ -5658,36 +5781,56 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
   switch(Inst.getOpcode()) {
     // classify tB as either t2B or t1B based on range of immediate operand
     case ARM::tB: {
-      ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
+      ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
         Inst.setOpcode(ARM::t2B);
       break;
     }
     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
     case ARM::tBcc: {
-      ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
+      ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
         Inst.setOpcode(ARM::t2Bcc);
       break;
     }
   }
-  ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
-  ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
+  ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
+  if (CondI != 0) {
+    ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
+  } else {
+    ARMOperand Op =
+        *ARMOperand::CreateCondCode(llvm::ARMCC::AL, Operands[0]->getEndLoc());
+    Op.addCondCodeOperands(Inst, 2);
+  }
 }
 
 void ARMAsmParser::cvtMVEVMOVQtoDReg(
   MCInst &Inst, const OperandVector &Operands) {
 
-  // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
-  assert(Operands.size() == 8);
+  unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
+  unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
 
-  ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
-  ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
-  ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
-  ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
+  // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
+  assert(Operands.size() == MnemonicOpsEndInd + 6);
+
+  ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1); // Rt
+  ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1])
+      .addRegOperands(Inst, 1); // Rt2
+  ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2])
+      .addRegOperands(Inst, 1); // Qd
+  ((ARMOperand &)*Operands[MnemonicOpsEndInd + 3])
+      .addMVEPairVectorIndexOperands(Inst, 1); // idx
   // skip second copy of Qd in Operands[6]
-  ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
-  ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
+  ((ARMOperand &)*Operands[MnemonicOpsEndInd + 5])
+      .addMVEPairVectorIndexOperands(Inst, 1); // idx2
+  if (CondI != 0) {
+    ((ARMOperand &)*Operands[CondI])
+        .addCondCodeOperands(Inst, 2); // condition code
+  } else {
+    ARMOperand Op =
+        *ARMOperand::CreateCondCode(ARMCC::AL, Operands[0]->getEndLoc());
+    Op.addCondCodeOperands(Inst, 2);
+  }
 }
 
 /// Parse an ARM memory expression, return false if successful else return true
@@ -5972,10 +6115,19 @@ ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
   // integer constant. Make sure we don't try to parse an FPImm
   // for these:
   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
-  ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
-  bool isVmovf = TyOp.isToken() &&
-                 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
-                  TyOp.getToken() == ".f16");
+
+  bool isVmovf = false;
+  unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
+  for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
+    ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[I]);
+    if (TyOp.isToken() &&
+        (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
+         TyOp.getToken() == ".f16")) {
+      isVmovf = true;
+      break;
+    }
+  }
+
   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
                                          Mnemonic.getToken() == "fconsts");
@@ -6488,18 +6640,30 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
     CanAcceptPredicationCode = true;
 }
 
+bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd) {
+  for (unsigned I = 0; I < MnemonicOpsEndInd; ++I) {
+    auto &Op = static_cast<ARMOperand &>(*Operands[I]);
+    if (Op.isToken() && Op.getToken() == ".w")
+      return true;
+  }
+  return false;
+};
+
 // Some Thumb instructions have two operand forms that are not
 // available as three operand, convert to two operand form if possible.
 //
 // FIXME: We would really like to be able to tablegen'erate this.
-void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
-                                                 bool CarrySetting,
-                                                 OperandVector &Operands) {
-  if (Operands.size() != 6)
+void ARMAsmParser::tryConvertingToTwoOperandForm(
+    StringRef Mnemonic, ARMCC::CondCodes PredicationCode, bool CarrySetting,
+    OperandVector &Operands, unsigned MnemonicOpsEndInd) {
+
+  if (operandsContainWide(Operands, MnemonicOpsEndInd))
+    return;
+  if (Operands.size() != MnemonicOpsEndInd + 3)
     return;
 
-  const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
-        auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
+  const auto &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
+  auto &Op4 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
   if (!Op3.isReg() || !Op4.isReg())
     return;
 
@@ -6510,7 +6674,7 @@ void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
   // won't accept SP or PC so we do the transformation here taking care
   // with immediate range in the 'add sp, sp #imm' case.
-  auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
+  auto &Op5 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]);
   if (isThumbTwo()) {
     if (Mnemonic != "add")
       return;
@@ -6570,7 +6734,7 @@ void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
   if (Transform) {
     if (Swap)
       std::swap(Op4, Op5);
-    Operands.erase(Operands.begin() + 3);
+    Operands.erase(Operands.begin() + MnemonicOpsEndInd);
   }
 }
 
@@ -6595,183 +6759,9 @@ static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
   return false;
 }
 
-bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
-                                          OperandVector &Operands) {
-  // FIXME: This is all horribly hacky. We really need a better way to deal
-  // with optional operands like this in the matcher table.
-
-  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
-  // another does not. Specifically, the MOVW instruction does not. So we
-  // special case it here and remove the defaulted (non-setting) cc_out
-  // operand if that's the instruction we're trying to match.
-  //
-  // We do this as post-processing of the explicit operands rather than just
-  // conditionally adding the cc_out in the first place because we need
-  // to check the type of the parsed immediate operand.
-  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
-      !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
-      static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
-    return true;
-
-  if (Mnemonic == "movs" && Operands.size() > 3 && isThumb() &&
-      isThumbI8Relocation(*Operands[3]))
-    return true;
-
-  // Register-register 'add' for thumb does not have a cc_out operand
-  // when there are only two register operands.
-  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
-      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
-    return true;
-  // Register-register 'add' for thumb does not have a cc_out operand
-  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
-  // have to check the immediate range here since Thumb2 has a variant
-  // that can handle a different range and has a cc_out operand.
-  if (((isThumb() && Mnemonic == "add") ||
-       (isThumbTwo() && Mnemonic == "sub")) &&
-      Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
-      ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
-       static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
-    return true;
-  // For Thumb2, add/sub immediate does not have a cc_out operand for the
-  // imm0_4095 variant. That's the least-preferred variant when
-  // selecting via the generic "add" mnemonic, so to know that we
-  // should remove the cc_out operand, we have to explicitly check that
-  // it's not one of the other variants. Ugh.
-  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
-      Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[5]).isImm()) {
-    // Nest conditions rather than one big 'if' statement for readability.
-    //
-    // If both registers are low, we're in an IT block, and the immediate is
-    // in range, we should use encoding T1 instead, which has a cc_out.
-    if (inITBlock() &&
-        isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
-        isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
-        static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
-      return false;
-    // Check against T3. If the second register is the PC, this is an
-    // alternate form of ADR, which uses encoding T4, so check for that too.
-    if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
-        (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
-         static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
-      return false;
-
-    // Otherwise, we use encoding T4, which does not have a cc_out
-    // operand.
-    return true;
-  }
-
-  // The thumb2 multiply instruction doesn't have a CCOut register, so
-  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
-  // use the 16-bit encoding or not.
-  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
-      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[5]).isReg() &&
-      // If the registers aren't low regs, the destination reg isn't the
-      // same as one of the source regs, or the cc_out operand is zero
-      // outside of an IT block, we have to use the 32-bit encoding, so
-      // remove the cc_out operand.
-      (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
-       !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
-       !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
-       !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
-                            static_cast<ARMOperand &>(*Operands[5]).getReg() &&
-                        static_cast<ARMOperand &>(*Operands[3]).getReg() !=
-                            static_cast<ARMOperand &>(*Operands[4]).getReg())))
-    return true;
-
-  // Also check the 'mul' syntax variant that doesn't specify an explicit
-  // destination register.
-  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
-      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
-      // If the registers aren't low regs  or the cc_out operand is zero
-      // outside of an IT block, we have to use the 32-bit encoding, so
-      // remove the cc_out operand.
-      (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
-       !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
-       !inITBlock()))
-    return true;
-
-  // Register-register 'add/sub' for thumb does not have a cc_out operand
-  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
-  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
-  // right, this will result in better diagnostics (which operand is off)
-  // anyway.
-  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
-      (Operands.size() == 5 || Operands.size() == 6) &&
-      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
-      (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
-       (Operands.size() == 6 &&
-        static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
-    // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
-    return (!(isThumbTwo() &&
-              (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
-               static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
-  }
-  // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
-  // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
-  // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
-  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
-      (Operands.size() == 5) &&
-      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
-      static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
-      static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
-      static_cast<ARMOperand &>(*Operands[4]).isImm()) {
-    const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
-    if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
-      return false; // add.w / sub.w
-    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
-      const int64_t Value = CE->getValue();
-      // Thumb1 imm8 sub / add
-      if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
-          isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
-        return false;
-      return true; // Thumb2 T4 addw / subw
-    }
-  }
-  return false;
-}
-
-bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
-                                              OperandVector &Operands) {
-  // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
-  unsigned RegIdx = 3;
-  if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
-      Mnemonic == "vrintr") &&
-      (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
-       static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
-    if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
-        (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
-         static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
-      RegIdx = 4;
-
-    if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
-        (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
-             static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
-         ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
-             static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
-      return true;
-  }
-  return false;
-}
-
-bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
-                                                    OperandVector &Operands) {
-  if (!hasMVE() || Operands.size() < 3)
+bool ARMAsmParser::shouldOmitVectorPredicateOperand(
+    StringRef Mnemonic, OperandVector &Operands, unsigned MnemonicOpsEndInd) {
+  if (!hasMVE() || Operands.size() < MnemonicOpsEndInd)
     return true;
 
   if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
@@ -6802,7 +6792,7 @@ bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
       // outside of the allowed range.
       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
           (Operand->isReg() &&
-           (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
+           (ARMMCRegisterClasses[ARM::QPRRegClassID].contains
              Operand->getReg()))))
         return false;
     }
@@ -6810,15 +6800,6 @@ bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
   }
 }
 
-static bool isDataTypeToken(StringRef Tok) {
-  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
-    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
-    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
-    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
-    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
-    Tok == ".f" || Tok == ".d";
-}
-
 // FIXME: This bit should probably be handled via an explicit match class
 // in the .td files that matches the suffix instead of having it be
 // a literal string token the way it is now.
@@ -6839,14 +6820,15 @@ static void applyMnemonicAliases(StringRef &Mnemonic,
 // bail out, and let the assembly parser report an error on the instruction as
 // it is written.
 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
-                                     OperandVector &Operands) {
+                                     OperandVector &Operands,
+                                     unsigned MnemonicOpsEndInd) {
   if (Mnemonic != "ldrd" && Mnemonic != "strd")
     return;
-  if (Operands.size() < 4)
+  if (Operands.size() < MnemonicOpsEndInd + 2)
     return;
 
-  ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
-  ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
+  ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
+  ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
 
   if (!Op2.isReg())
     return;
@@ -6871,7 +6853,7 @@ void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
     return;
 
   Operands.insert(
-      Operands.begin() + 3,
+      Operands.begin() + MnemonicOpsEndInd + 1,
       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
 }
 
@@ -6881,19 +6863,17 @@ void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
 // operand. If the conversion fails an error is diagnosed, and the function
 // returns true.
 bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
-                                            OperandVector &Operands) {
+                                            OperandVector &Operands,
+                                            unsigned MnemonicOpsEndInd) {
   assert(MS.isCDEDualRegInstr(Mnemonic));
-  bool isPredicable =
-      Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
-  size_t NumPredOps = isPredicable ? 1 : 0;
 
-  if (Operands.size() <= 3 + NumPredOps)
+  if (Operands.size() < 2 + MnemonicOpsEndInd)
     return false;
 
   StringRef Op2Diag(
       "operand must be an even-numbered register in the range [r0, r10]");
 
-  const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
+  const MCParsedAsmOperand &Op2 = *Operands[MnemonicOpsEndInd + 1];
   if (!Op2.isReg())
     return Error(Op2.getStartLoc(), Op2Diag);
 
@@ -6928,16 +6908,43 @@ bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
     break;
   }
 
-  const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
+  const MCParsedAsmOperand &Op3 = *Operands[MnemonicOpsEndInd + 2];
   if (!Op3.isReg() || Op3.getReg() != RNext)
     return Error(Op3.getStartLoc(), "operand must be a consecutive register");
 
-  Operands.erase(Operands.begin() + 3 + NumPredOps);
-  Operands[2 + NumPredOps] =
+  Operands.erase(Operands.begin() + MnemonicOpsEndInd + 2);
+  Operands[MnemonicOpsEndInd + 1] =
       ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
   return false;
 }
 
+void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
+  for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
+    if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) {
+      Operands.erase(Operands.begin() + I);
+      --MnemonicOpsEndInd;
+      break;
+    }
+}
+
+void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
+  for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
+    if (static_cast<ARMOperand &>(*Operands[I]).isCCOut()) {
+      Operands.erase(Operands.begin() + I);
+      --MnemonicOpsEndInd;
+      break;
+    }
+}
+
+void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
+  for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
+    if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) {
+      Operands.erase(Operands.begin() + I);
+      --MnemonicOpsEndInd;
+      break;
+    }
+}
+
 /// Parse an arm instruction mnemonic followed by its operands.
 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
                                     SMLoc NameLoc, OperandVector &Operands) {
@@ -7118,6 +7125,11 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
     }
   }
 
+  // This marks the end of the LHS Mnemonic operators
+  // This is used for indexing into the non-menmonic operators as some of the
+  // mnemonic operators are optional and therfore indexes can differ.
+  unsigned MnemonicOpsEndInd = Operands.size();
+
   // Read the remaining operands.
   if (getLexer().isNot(AsmToken::EndOfStatement)) {
     // Read the first operand.
@@ -7136,7 +7148,17 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
     return true;
 
-  tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
+  // Remove the optional tokens if they are the default values
+  // This can only be done after parsing as the index of the operands
+  //  is used for custom parsers.
+  // CCOut
+  if (!CarrySetting && CanAcceptCarrySet)
+    removeCCOut(Operands, MnemonicOpsEndInd);
+  if (PredicationCode == llvm::ARMCC::AL && CanAcceptPredicationCode)
+    removeCondCode(Operands, MnemonicOpsEndInd);
+
+  tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
+                                Operands, MnemonicOpsEndInd);
 
   if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
     // Dual-register instructions use even-odd register pairs as their
@@ -7147,33 +7169,16 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
     // returns false, the function either succeeded or an error (e.g. missing
     // operand) will be diagnosed elsewhere.
     if (MS.isCDEDualRegInstr(Mnemonic)) {
-      bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
+      bool GotError =
+          CDEConvertDualRegOperand(Mnemonic, Operands, MnemonicOpsEndInd);
       if (GotError)
         return GotError;
     }
   }
 
-  // Some instructions, mostly Thumb, have forms for the same mnemonic that
-  // do and don't have a cc_out optional-def operand. With some spot-checks
-  // of the operand list, we can figure out which variant we're trying to
-  // parse and adjust accordingly before actually matching. We shouldn't ever
-  // try to remove a cc_out operand that was explicitly set on the
-  // mnemonic, of course (CarrySetting == true). Reason number #317 the
-  // table driven matcher doesn't fit well with the ARM instruction set.
-  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
-    Operands.erase(Operands.begin() + 1);
-
-  // Some instructions have the same mnemonic, but don't always
-  // have a predicate. Distinguish them here and delete the
-  // appropriate predicate if needed.  This could be either the scalar
-  // predication code or the vector predication code.
-  if (PredicationCode == ARMCC::AL &&
-      shouldOmitPredicateOperand(Mnemonic, Operands))
-    Operands.erase(Operands.begin() + 1);
-
-
   if (hasMVE()) {
-    if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
+    if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
+                                          MnemonicOpsEndInd) &&
         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
       // Very nasty hack to deal with the vector predicated variant of vmovlt
       // the scalar predicated vmov with condition 'lt'.  We can not tell them
@@ -7188,7 +7193,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
       Operands.insert(Operands.begin(),
                       ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
     } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
-               !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+               !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
+                                                 MnemonicOpsEndInd)) {
       // Another nasty hack to deal with the ambiguity between vcvt with scalar
       // predication 'ne' and vcvtn with vector predication 'e'.  As above we
       // can only distinguish between the two after we have parsed their
@@ -7203,11 +7209,12 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
       Operands.insert(Operands.begin(),
                       ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
     } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
-               !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+               !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
+                                                 MnemonicOpsEndInd)) {
       // Another hack, this time to distinguish between scalar predicated vmul
       // with 'lt' predication code and the vector instruction vmullt with
       // vector predication code "none"
-      Operands.erase(Operands.begin() + 1);
+      removeCondCode(Operands, MnemonicOpsEndInd);
       Operands.erase(Operands.begin());
       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
       Operands.insert(Operands.begin(),
@@ -7223,16 +7230,19 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
               !Mnemonic.starts_with("vcvtn") &&
               !Mnemonic.starts_with("vcvtp") &&
               !Mnemonic.starts_with("vcvtm"))) {
-      if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
+      if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
+                                            MnemonicOpsEndInd)) {
         // We could not split the vector predicate off vcvt because it might
         // have been the scalar vcvtt instruction.  Now we know its a vector
         // instruction, we still need to check whether its the vector
         // predicated vcvt with 'Then' predication or the vector vcvtt.  We can
         // distinguish the two based on the suffixes, if it is any of
         // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
-        if (Mnemonic.starts_with("vcvtt") && Operands.size() >= 4) {
-          auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
-          auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
+        if (Mnemonic.starts_with("vcvtt") && MnemonicOpsEndInd > 2) {
+          auto Sz1 =
+              static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 2]);
+          auto Sz2 =
+              static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 1]);
           if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
                 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
             Operands.erase(Operands.begin());
@@ -7244,24 +7254,25 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
                             ARMOperand::CreateToken(Mnemonic, MLoc));
           }
         }
-        Operands.erase(Operands.begin() + 1);
+        // Remove CondCode
+        removeCondCode(Operands, MnemonicOpsEndInd);
         SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
                                           Mnemonic.size() + CarrySetting);
+        // Add VPTPred
         Operands.insert(Operands.begin() + 1,
                         ARMOperand::CreateVPTPred(
                             ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
+        ++MnemonicOpsEndInd;
       }
     } else if (CanAcceptVPTPredicationCode) {
       // For all other instructions, make sure only one of the two
       // predication operands is left behind, depending on whether we should
       // use the vector predication.
-      if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
-        if (CanAcceptPredicationCode)
-          Operands.erase(Operands.begin() + 2);
-        else
-          Operands.erase(Operands.begin() + 1);
+      if (shouldOmitVectorPredicateOperand(Mnemonic, Operands,
+                                           MnemonicOpsEndInd)) {
+        removeVPTCondCode(Operands, MnemonicOpsEndInd);
       } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
-        Operands.erase(Operands.begin() + 1);
+        removeCondCode(Operands, MnemonicOpsEndInd);
       }
     }
   }
@@ -7286,69 +7297,73 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
     }
   }
 
-    // ARM mode 'blx' need special handling, as the register operand version
-    // is predicable, but the label operand version is not. So, we can't rely
-    // on the Mnemonic based checking to correctly figure out when to put
-    // a k_CondCode operand in the list. If we're trying to match the label
-    // version, remove the k_CondCode operand here.
-    if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
-        static_cast<ARMOperand &>(*Operands[2]).isImm())
-      Operands.erase(Operands.begin() + 1);
-
-    // Adjust operands of ldrexd/strexd to MCK_GPRPair.
-    // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
-    // a single GPRPair reg operand is used in the .td file to replace the two
-    // GPRs. However, when parsing from asm, the two GRPs cannot be
-    // automatically
-    // expressed as a GPRPair, so we have to manually merge them.
-    // FIXME: We would really like to be able to tablegen'erate this.
-    if (!isThumb() && Operands.size() > 4 &&
-        (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
-         Mnemonic == "stlexd")) {
-      bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
-      unsigned Idx = isLoad ? 2 : 3;
-      ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
-      ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
-
-      const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
-      // Adjust only if Op1 and Op2 are GPRs.
-      if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
-          MRC.contains(Op2.getReg())) {
-        unsigned Reg1 = Op1.getReg();
-        unsigned Reg2 = Op2.getReg();
-        unsigned Rt = MRI->getEncodingValue(Reg1);
-        unsigned Rt2 = MRI->getEncodingValue(Reg2);
-
-        // Rt2 must be Rt + 1 and Rt must be even.
-        if (Rt + 1 != Rt2 || (Rt & 1)) {
-          return Error(Op2.getStartLoc(),
-                       isLoad ? "destination operands must be sequential"
-                              : "source operands must be sequential");
-        }
-        unsigned NewReg = MRI->getMatchingSuperReg(
-            Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
-        Operands[Idx] =
-            ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
-        Operands.erase(Operands.begin() + Idx + 1);
+  // ARM mode 'blx' need special handling, as the register operand version
+  // is predicable, but the label operand version is not. So, we can't rely
+  // on the Mnemonic based checking to correctly figure out when to put
+  // a k_CondCode operand in the list. If we're trying to match the label
+  // version, remove the k_CondCode operand here.
+  if (!isThumb() && Mnemonic == "blx" &&
+      Operands.size() == MnemonicOpsEndInd + 1 &&
+      static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isImm())
+    removeCondCode(Operands, MnemonicOpsEndInd);
+
+  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
+  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
+  // a single GPRPair reg operand is used in the .td file to replace the two
+  // GPRs. However, when parsing from asm, the two GRPs cannot be
+  // automatically
+  // expressed as a GPRPair, so we have to manually merge them.
+  // FIXME: We would really like to be able to tablegen'erate this.
+  if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 &&
+      (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
+       Mnemonic == "stlexd")) {
+    bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
+    unsigned Idx = isLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
+    ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
+    ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
+
+    const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
+    // Adjust only if Op1 and Op2 are GPRs.
+    if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
+        MRC.contains(Op2.getReg())) {
+      unsigned Reg1 = Op1.getReg();
+      unsigned Reg2 = Op2.getReg();
+      unsigned Rt = MRI->getEncodingValue(Reg1);
+      unsigned Rt2 = MRI->getEncodingValue(Reg2);
+
+      // Rt2 must be Rt + 1 and Rt must be even.
+      if (Rt + 1 != Rt2 || (Rt & 1)) {
+        return Error(Op2.getStartLoc(),
+                     isLoad ? "destination operands must be sequential"
+                            : "source operands must be sequential");
       }
+      unsigned NewReg = MRI->getMatchingSuperReg(
+          Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
+      Operands[Idx] =
+          ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
+      Operands.erase(Operands.begin() + Idx + 1);
+    }
   }
 
   // GNU Assembler extension (compatibility).
-  fixupGNULDRDAlias(Mnemonic, Operands);
+  fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
 
   // FIXME: As said above, this is all a pretty gross hack.  This instruction
   // does not fit with other "subs" and tblgen.
   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
   // so the Mnemonic is the original name "subs" and delete the predicate
   // operand so it will match the table entry.
-  if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
-      static_cast<ARMOperand &>(*Operands[3]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
-      static_cast<ARMOperand &>(*Operands[4]).isReg() &&
-      static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
-      static_cast<ARMOperand &>(*Operands[5]).isImm()) {
+  if (isThumbTwo() && Mnemonic == "sub" &&
+      Operands.size() == MnemonicOpsEndInd + 3 &&
+      static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isReg() &&
+      static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).getReg() ==
+          ARM::PC &&
+      static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).isReg() &&
+      static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
+          ARM::LR &&
+      static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
-    Operands.erase(Operands.begin() + 1);
+    removeCCOut(Operands, MnemonicOpsEndInd);
   }
   return false;
 }
@@ -7393,49 +7408,61 @@ static bool instIsBreakpoint(const MCInst &Inst) {
            Inst.getOpcode() == ARM::HLT;
 }
 
+unsigned getRegListInd(const OperandVector &Operands,
+                       unsigned MnemonicOpsEndInd) {
+  for (unsigned I = MnemonicOpsEndInd; I < Operands.size(); ++I) {
+    const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[I]);
+    if (Op.isRegList()) {
+      return I;
+    }
+  }
+  return 0;
+}
+
 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
                                        const OperandVector &Operands,
-                                       unsigned ListNo, bool IsARPop) {
-  const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
-  bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
-
-  bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
-  bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
-  bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
+                                       unsigned MnemonicOpsEndInd,
+                                       unsigned ListIndex, bool IsARPop) {
+  bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
+  bool ListContainsLR = listContainsReg(Inst, ListIndex, ARM::LR);
+  bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
 
   if (!IsARPop && ListContainsSP)
-    return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
-                 "SP may not be in the register list");
-  else if (ListContainsPC && ListContainsLR)
-    return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
-                 "PC and LR may not be in the register list simultaneously");
+    return Error(
+        Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
+        "SP may not be in the register list");
+  if (ListContainsPC && ListContainsLR)
+    return Error(
+        Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
+        "PC and LR may not be in the register list simultaneously");
   return false;
 }
 
 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
                                        const OperandVector &Operands,
-                                       unsigned ListNo) {
-  const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
-  bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
-
-  bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
-  bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
+                                       unsigned MnemonicOpsEndInd,
+                                       unsigned ListIndex) {
+  bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
+  bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
 
   if (ListContainsSP && ListContainsPC)
-    return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
-                 "SP and PC may not be in the register list");
-  else if (ListContainsSP)
-    return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
-                 "SP may not be in the register list");
-  else if (ListContainsPC)
-    return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
-                 "PC may not be in the register list");
+    return Error(
+        Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
+        "SP and PC may not be in the register list");
+  if (ListContainsSP)
+    return Error(
+        Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
+        "SP may not be in the register list");
+  if (ListContainsPC)
+    return Error(
+        Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
+        "PC may not be in the register list");
   return false;
 }
 
-bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
-                                    const OperandVector &Operands,
-                                    bool Load, bool ARMMode, bool Writeback) {
+bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
+                                    bool Load, bool ARMMode, bool Writeback,
+                                    unsigned MnemonicOpsEndInd) {
   unsigned RtIndex = Load || !Writeback ? 0 : 1;
   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
@@ -7443,21 +7470,21 @@ bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
   if (ARMMode) {
     // Rt can't be R14.
     if (Rt == 14)
-      return Error(Operands[3]->getStartLoc(),
-                  "Rt can't be R14");
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "Rt can't be R14");
 
     // Rt must be even-numbered.
     if ((Rt & 1) == 1)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "Rt must be even-numbered");
 
     // Rt2 must be Rt + 1.
     if (Rt2 != Rt + 1) {
       if (Load)
-        return Error(Operands[3]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "destination operands must be sequential");
       else
-        return Error(Operands[3]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "source operands must be sequential");
     }
 
@@ -7467,7 +7494,7 @@ bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
 
   if (!ARMMode && Load) {
     if (Rt2 == Rt)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "destination operands can't be identical");
   }
 
@@ -7476,11 +7503,11 @@ bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
 
     if (Rn == Rt || Rn == Rt2) {
       if (Load)
-        return Error(Operands[3]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "base register needs to be different from destination "
                      "registers");
       else
-        return Error(Operands[3]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "source register and base register can't be identical");
     }
 
@@ -7518,7 +7545,8 @@ static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
 
 // FIXME: We would really like to be able to tablegen'erate this.
 bool ARMAsmParser::validateInstruction(MCInst &Inst,
-                                       const OperandVector &Operands) {
+                                       const OperandVector &Operands,
+                                       unsigned MnemonicOpsEndInd) {
   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
   SMLoc Loc = Operands[0]->getStartLoc();
 
@@ -7533,7 +7561,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
     if (Cond != currentITCond()) {
       // Find the condition code Operand to get its SMLoc information.
-      SMLoc CondLoc;
+      SMLoc CondLoc = Operands[0]->getEndLoc();
       for (unsigned I = 1; I < Operands.size(); ++I)
         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
           CondLoc = Operands[I]->getStartLoc();
@@ -7612,50 +7640,50 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     break;
   }
   case ARM::LDRD:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
-                         /*Writeback*/false))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
+                         /*Writeback*/ false, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::LDRD_PRE:
   case ARM::LDRD_POST:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
-                         /*Writeback*/true))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
+                         /*Writeback*/ true, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::t2LDRDi8:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
-                         /*Writeback*/false))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
+                         /*Writeback*/ false, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::t2LDRD_PRE:
   case ARM::t2LDRD_POST:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
-                         /*Writeback*/true))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
+                         /*Writeback*/ true, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::t2BXJ: {
     const unsigned RmReg = Inst.getOperand(0).getReg();
     // Rm = SP is no longer unpredictable in v8-A
     if (RmReg == ARM::SP && !hasV8Ops())
-      return Error(Operands[2]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "r13 (SP) is an unpredictable operand to BXJ");
     return false;
   }
   case ARM::STRD:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
-                         /*Writeback*/false))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
+                         /*Writeback*/ false, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::STRD_PRE:
   case ARM::STRD_POST:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
-                         /*Writeback*/true))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
+                         /*Writeback*/ true, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::t2STRD_PRE:
   case ARM::t2STRD_POST:
-    if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
-                         /*Writeback*/true))
+    if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ false,
+                         /*Writeback*/ true, MnemonicOpsEndInd))
       return true;
     break;
   case ARM::STR_PRE_IMM:
@@ -7679,7 +7707,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
 
     if (Rt == Rn)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
                    "source register and base register can't be identical");
     return false;
   }
@@ -7692,19 +7720,19 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
 
     if (Rt == Rn)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "destination register and base register can't be identical");
     if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
         Inst.getOpcode() == ARM::t2STR_POST_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 255 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [-255, 255]");
     }
     if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
         Inst.getOpcode() == ARM::t2STR_POST_imm) {
       if (Inst.getOperand(0).getReg() == ARM::PC) {
-        return Error(Operands[3]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "operand must be a register in range [r0, r14]");
       }
     }
@@ -7723,17 +7751,17 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
         Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 255 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [-255, 255]");
     } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
                Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 0 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [0, 255] with a negative sign");
     }
     if (Inst.getOperand(0).getReg() == ARM::PC) {
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "if operand is PC, should call the LDRB (literal)");
     }
     return false;
@@ -7751,17 +7779,17 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
         Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 255 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [-255, 255]");
     } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
                Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 0 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [0, 255] with a negative sign");
     }
     if (Inst.getOperand(0).getReg() == ARM::PC) {
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "if operand is PC, should call the LDRH (literal)");
     }
     return false;
@@ -7774,16 +7802,16 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
         Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 255 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [-255, 255]");
     } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 0 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [0, 255] with a negative sign");
     }
     if (Inst.getOperand(0).getReg() == ARM::PC) {
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                    "if operand is PC, should call the LDRH (literal)");
     }
     return false;
@@ -7796,16 +7824,16 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
         Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 255 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [-255, 255]");
     } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
       int Imm = Inst.getOperand(2).getImm();
       if (Imm > 0 || Imm < -255)
-        return Error(Operands[5]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                      "operand must be in range [0, 255] with a negative sign");
     }
     if (Inst.getOperand(0).getReg() == ARM::PC) {
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "if operand is PC, should call the LDRH (literal)");
     }
     return false;
@@ -7840,7 +7868,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
 
     if (Rt == Rn)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "destination register and base register can't be identical");
     return false;
   }
@@ -7884,10 +7912,10 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
 
     if (Qd == Qm) {
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    Twine("destination vector register and vector ") +
-                   (QmIsPointer ? "pointer" : "offset") +
-                   " register can't be identical");
+                       (QmIsPointer ? "pointer" : "offset") +
+                       " register can't be identical");
     }
     return false;
   }
@@ -7900,7 +7928,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     unsigned LSB = Inst.getOperand(2).getImm();
     unsigned Widthm1 = Inst.getOperand(3).getImm();
     if (Widthm1 >= 32 - LSB)
-      return Error(Operands[5]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                    "bitfield width must be in range [1,32-lsb]");
     return false;
   }
@@ -7914,24 +7942,29 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     // in the register list.
     unsigned Rn = Inst.getOperand(0).getReg();
     bool HasWritebackToken =
-        (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
-         static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
+        (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+             .isToken() &&
+         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+                 .getToken() == "!");
+
+    unsigned RegStart =
+        HasWritebackToken ? MnemonicOpsEndInd + 2 : MnemonicOpsEndInd + 1;
     bool ListContainsBase;
     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
-      return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
+      return Error(Operands[RegStart]->getStartLoc(),
                    "registers must be in range r0-r7");
     // If we should have writeback, then there should be a '!' token.
     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
-      return Error(Operands[2]->getStartLoc(),
+      return Error(Operands[RegStart]->getStartLoc(),
                    "writeback operator '!' expected");
     // If we should not have writeback, there must not be a '!'. This is
     // true even for the 32-bit wide encodings.
     if (ListContainsBase && HasWritebackToken)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "writeback operator '!' not allowed when base register "
                    "in register list");
 
-    if (validatetLDMRegList(Inst, Operands, 3))
+    if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
       return true;
     break;
   }
@@ -7949,12 +7982,12 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     break;
   case ARM::t2LDMIA:
   case ARM::t2LDMDB:
-    if (validatetLDMRegList(Inst, Operands, 3))
+    if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
       return true;
     break;
   case ARM::t2STMIA:
   case ARM::t2STMDB:
-    if (validatetSTMRegList(Inst, Operands, 3))
+    if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
       return true;
     break;
   case ARM::t2LDMIA_UPD:
@@ -7966,10 +7999,10 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
                    "writeback register not allowed in register list");
 
     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
-      if (validatetLDMRegList(Inst, Operands, 3))
+      if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
         return true;
     } else {
-      if (validatetSTMRegList(Inst, Operands, 3))
+      if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
         return true;
     }
     break;
@@ -7979,7 +8012,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   case ARM::sysLDMDB_UPD:
   case ARM::sysLDMIB_UPD:
     if (!listContainsReg(Inst, 3, ARM::PC))
-      return Error(Operands[4]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
                    "writeback register only allowed on system LDM "
                    "if PC in register-list");
     break;
@@ -7987,7 +8020,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   case ARM::sysSTMDA_UPD:
   case ARM::sysSTMDB_UPD:
   case ARM::sysSTMIB_UPD:
-    return Error(Operands[2]->getStartLoc(),
+    return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                  "system STM cannot have writeback register");
   case ARM::tMUL:
     // The second source operand must be the same register as the destination
@@ -7998,11 +8031,12 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     // this first statement is always true for the new Inst.  Essentially, the
     // destination is unconditionally copied into the second source operand
     // without checking to see if it matches what we actually parsed.
-    if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
-                                 ((ARMOperand &)*Operands[5]).getReg()) &&
-        (((ARMOperand &)*Operands[3]).getReg() !=
-         ((ARMOperand &)*Operands[4]).getReg())) {
-      return Error(Operands[3]->getStartLoc(),
+    if (Operands.size() == MnemonicOpsEndInd + 3 &&
+        (((ARMOperand &)*Operands[MnemonicOpsEndInd]).getReg() !=
+         ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2]).getReg()) &&
+        (((ARMOperand &)*Operands[MnemonicOpsEndInd]).getReg() !=
+         ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).getReg())) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "destination register must match source register");
     }
     break;
@@ -8014,9 +8048,9 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     bool ListContainsBase;
     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
         !isThumbTwo())
-      return Error(Operands[2]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "registers must be in range r0-r7 or pc");
-    if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
+    if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 2, !isMClass()))
       return true;
     break;
   }
@@ -8024,9 +8058,9 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     bool ListContainsBase;
     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
         !isThumbTwo())
-      return Error(Operands[2]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "registers must be in range r0-r7 or lr");
-    if (validatetSTMRegList(Inst, Operands, 2))
+    if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 2))
       return true;
     break;
   }
@@ -8035,17 +8069,17 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
                                           0, ListContainsBase);
     if (InvalidLowList && !isThumbTwo())
-      return Error(Operands[4]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                    "registers must be in range r0-r7");
 
     // This would be converted to a 32-bit stm, but that's not valid if the
     // writeback register is in the list.
     if (InvalidLowList && ListContainsBase)
-      return Error(Operands[4]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "writeback operator '!' not allowed when base register "
                    "in register list");
 
-    if (validatetSTMRegList(Inst, Operands, 4))
+    if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 4))
       return true;
     break;
   }
@@ -8054,7 +8088,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     // same, we need thumb2 (for the wide encoding), or we have an error.
     if (!isThumbTwo() &&
         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
-      return Error(Operands[4]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                    "source register must be the same as destination");
     }
     break;
@@ -8065,17 +8099,20 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   case ARM::t2SUBrs:
     if (Inst.getOperand(0).getReg() == ARM::SP &&
         Inst.getOperand(1).getReg() != ARM::SP)
-      return Error(Operands[4]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
                    "source register must be sp if destination is sp");
     break;
 
   // Final range checking for Thumb unconditional branch instructions.
   case ARM::tB:
-    if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
-      return Error(Operands[2]->getStartLoc(), "branch target out of range");
+    if (!(static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]))
+             .isSignedOffset<11, 1>())
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "branch target out of range");
     break;
   case ARM::t2B: {
-    int op = (Operands[2]->isImm()) ? 2 : 3;
+    int op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
+                                                    : MnemonicOpsEndInd + 1;
     ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
     // Delay the checks of symbolic expressions until they are resolved.
     if (!isa<MCBinaryExpr>(Operand.getImm()) &&
@@ -8085,19 +8122,24 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   }
   // Final range checking for Thumb conditional branch instructions.
   case ARM::tBcc:
-    if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
-      return Error(Operands[2]->getStartLoc(), "branch target out of range");
+    if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
+             .isSignedOffset<8, 1>())
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "branch target out of range");
     break;
   case ARM::t2Bcc: {
-    int Op = (Operands[2]->isImm()) ? 2 : 3;
+    int Op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
+                                                    : MnemonicOpsEndInd + 1;
     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
     break;
   }
   case ARM::tCBZ:
   case ARM::tCBNZ: {
-    if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
-      return Error(Operands[2]->getStartLoc(), "branch target out of range");
+    if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+             .isUnsignedOffset<6, 1>())
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
+                   "branch target out of range");
     break;
   }
   case ARM::MOVi16:
@@ -8111,7 +8153,8 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     // want the behavior of silently truncating, which can be unexpected and
     // lead to bugs that are difficult to find since this is an easy mistake
     // to make.
-    int i = (Operands[3]->isImm()) ? 3 : 4;
+    int i = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
+                                                   : MnemonicOpsEndInd + 1;
     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
     if (CE) break;
@@ -8126,7 +8169,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     break;
   }
   case ARM::tADDi8: {
-    MCParsedAsmOperand &Op = *Operands[4];
+    MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd + 1];
     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
       return Error(Op.getStartLoc(),
                    "Immediate expression for Thumb adds requires :lower0_7:,"
@@ -8134,7 +8177,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     break;
   }
   case ARM::tMOVi8: {
-    MCParsedAsmOperand &Op = *Operands[2];
+    MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd];
     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
       return Error(Op.getStartLoc(),
                    "Immediate expression for Thumb movs requires :lower0_7:,"
@@ -8161,30 +8204,36 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   case ARM::t2BFr:
   case ARM::t2BFLi:
   case ARM::t2BFLr: {
-    if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
-        (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
-      return Error(Operands[2]->getStartLoc(),
+    if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
+             .isUnsignedOffset<4, 1>() ||
+        (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "branch location out of range or not a multiple of 2");
+    }
 
     if (Opcode == ARM::t2BFi) {
-      if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
-        return Error(Operands[3]->getStartLoc(),
+      if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+               .isSignedOffset<16, 1>())
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "branch target out of range or not a multiple of 2");
     } else if (Opcode == ARM::t2BFLi) {
-      if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
-        return Error(Operands[3]->getStartLoc(),
+      if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+               .isSignedOffset<18, 1>())
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "branch target out of range or not a multiple of 2");
     }
     break;
   }
   case ARM::t2BFic: {
-    if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
+    if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
+             .isUnsignedOffset<4, 1>() ||
         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
       return Error(Operands[1]->getStartLoc(),
                    "branch location out of range or not a multiple of 2");
 
-    if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
-      return Error(Operands[2]->getStartLoc(),
+    if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+             .isSignedOffset<16, 1>())
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
                    "branch target out of range or not a multiple of 2");
 
     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
@@ -8205,7 +8254,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
       if (Inst.getOperand(i).isReg() &&
           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
               Inst.getOperand(i).getReg())) {
-        return Error(Operands[2]->getStartLoc(),
+        return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                      "invalid register in register list. Valid registers are "
                      "r0-r12, lr/r14 and APSR.");
       }
@@ -8237,7 +8286,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
     if (Sm1 != Sm + 1)
-      return Error(Operands[5]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
                    "source operands must be sequential");
     break;
   }
@@ -8246,16 +8295,17 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
     if (Sm1 != Sm + 1)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "destination operands must be sequential");
     break;
   }
   case ARM::VLDMDIA:
   case ARM::VSTMDIA: {
-    ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
+    ARMOperand &Op =
+        static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
     auto &RegList = Op.getRegList();
     if (RegList.size() < 1 || RegList.size() > 16)
-      return Error(Operands[3]->getStartLoc(),
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
                    "list of registers must be at least 1 and at most 16");
     break;
   }
@@ -8266,13 +8316,15 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   case ARM::MVE_VMULLTs32:
   case ARM::MVE_VMULLBu32:
   case ARM::MVE_VMULLTu32: {
-    if (Operands[3]->getReg() == Operands[4]->getReg()) {
-      return Error (Operands[3]->getStartLoc(),
-                    "Qd register and Qn register can't be identical");
+    if (Operands[MnemonicOpsEndInd]->getReg() ==
+        Operands[MnemonicOpsEndInd + 1]->getReg()) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "Qd register and Qn register can't be identical");
     }
-    if (Operands[3]->getReg() == Operands[5]->getReg()) {
-      return Error (Operands[3]->getStartLoc(),
-                    "Qd register and Qm register can't be identical");
+    if (Operands[MnemonicOpsEndInd]->getReg() ==
+        Operands[MnemonicOpsEndInd + 2]->getReg()) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "Qd register and Qm register can't be identical");
     }
     break;
   }
@@ -8281,41 +8333,56 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
   case ARM::MVE_VREV64_32:
   case ARM::MVE_VQDMULL_qr_s32bh:
   case ARM::MVE_VQDMULL_qr_s32th: {
-    if (Operands[3]->getReg() == Operands[4]->getReg()) {
-      return Error (Operands[3]->getStartLoc(),
-                    "Qd register and Qn register can't be identical");
+    if (Operands[MnemonicOpsEndInd]->getReg() ==
+        Operands[MnemonicOpsEndInd + 1]->getReg()) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "Qd register and Qn register can't be identical");
     }
     break;
   }
   case ARM::MVE_VCADDi32:
   case ARM::MVE_VCADDf32:
   case ARM::MVE_VHCADDs32: {
-    if (Operands[3]->getReg() == Operands[5]->getReg()) {
-      return Error (Operands[3]->getStartLoc(),
-                    "Qd register and Qm register can't be identical");
+    if (Operands[MnemonicOpsEndInd]->getReg() ==
+        Operands[MnemonicOpsEndInd + 2]->getReg()) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "Qd register and Qm register can't be identical");
     }
     break;
   }
   case ARM::MVE_VMOV_rr_q: {
-    if (Operands[4]->getReg() != Operands[6]->getReg())
-      return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
-    if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
-        static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
-      return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
+    if (Operands[MnemonicOpsEndInd + 2]->getReg() !=
+        Operands[MnemonicOpsEndInd + 4]->getReg())
+      return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
+                   "Q-registers must be the same");
+    if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
+            .getVectorIndex() !=
+        static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 5])
+                .getVectorIndex() +
+            2)
+      return Error(Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
+                   "Q-register indexes must be 2 and 0 or 3 and 1");
     break;
   }
   case ARM::MVE_VMOV_q_rr: {
-    if (Operands[2]->getReg() != Operands[4]->getReg())
-      return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
-    if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
-        static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
-      return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
+    if (Operands[MnemonicOpsEndInd]->getReg() !=
+        Operands[MnemonicOpsEndInd + 2]->getReg())
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
+                   "Q-registers must be the same");
+    if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+            .getVectorIndex() !=
+        static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
+                .getVectorIndex() +
+            2)
+      return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
+                   "Q-register indexes must be 2 and 0 or 3 and 1");
     break;
   }
   case ARM::MVE_SQRSHR:
   case ARM::MVE_UQRSHL: {
-    if (Operands[2]->getReg() == Operands[3]->getReg()) {
-      return Error(Operands[2]->getStartLoc(),
+    if (Operands[MnemonicOpsEndInd]->getReg() ==
+        Operands[MnemonicOpsEndInd + 1]->getReg()) {
+      return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
                    "Rda register and Rm register can't be identical");
     }
     break;
@@ -8719,6 +8786,7 @@ static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
 
 bool ARMAsmParser::processInstruction(MCInst &Inst,
                                       const OperandVector &Operands,
+                                      unsigned MnemonicOpsEndInd,
                                       MCStreamer &Out) {
   // Check if we have the wide qualifier, because if it's present we
   // must avoid selecting a 16-bit thumb instruction.
@@ -9039,9 +9107,7 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
       TmpInst.setOpcode(ARM::t2LDRpci);
     const ARMOperand &PoolOperand =
-      (HasWideQualifier ?
-       static_cast<ARMOperand &>(*Operands[4]) :
-       static_cast<ARMOperand &>(*Operands[3]));
+        static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
     // If SubExprVal is a constant we may be able to use a MOV
     if (isa<MCConstantExpr>(SubExprVal) &&
@@ -10468,7 +10534,8 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
     // to encoding T1 if <Rd> is omitted."
     if (Inst.getOperand(3).isImm() &&
-        (unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
+        (unsigned)Inst.getOperand(3).getImm() < 8 &&
+        Operands.size() == MnemonicOpsEndInd + 3) {
       Inst.setOpcode(ARM::tADDi3);
       return true;
     }
@@ -10478,7 +10545,8 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
     // to encoding T1 if <Rd> is omitted."
-    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
+    if ((unsigned)Inst.getOperand(3).getImm() < 8 &&
+        Operands.size() == MnemonicOpsEndInd + 3) {
       Inst.setOpcode(ARM::tSUBi3);
       return true;
     }
@@ -10598,8 +10666,10 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
     // an error in validateInstruction().
     unsigned Rn = Inst.getOperand(0).getReg();
     bool hasWritebackToken =
-        (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
-         static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
+        (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+             .isToken() &&
+         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
+                 .getToken() == "!");
     bool listContainsBase;
     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
         (!listContainsBase && !hasWritebackToken) ||
@@ -10898,6 +10968,26 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
   return false;
 }
 
+unsigned
+ARMAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
+                                             const OperandVector &Operands) {
+  unsigned Opc = Inst.getOpcode();
+  switch (Opc) {
+  // Prevent the mov r8 r8 encoding for nop being selected when the v6/thumb 2
+  // encoding is available
+  case ARM::tMOVr: {
+    if (Operands[0]->isToken() &&
+        static_cast<ARMOperand &>(*Operands[0]).getToken() == "nop" &&
+        ((isThumb() && !isThumbOne()) || hasV6MOps())) {
+      return Match_MnemonicFail;
+    }
+  }
+    LLVM_FALLTHROUGH;
+  default:
+    return Match_Success;
+  }
+}
+
 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
   // suffix depending on whether they're in an IT block or not.
@@ -10908,22 +10998,23 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
            "optionally flag setting instruction missing optional def operand");
     assert(MCID.NumOperands == Inst.getNumOperands() &&
            "operand count mismatch!");
-    // Find the optional-def operand (cc_out).
-    unsigned OpNo;
-    for (OpNo = 0;
-         OpNo < MCID.NumOperands && !MCID.operands()[OpNo].isOptionalDef();
-         ++OpNo)
-      ;
+    bool IsCPSR = false;
+    // Check if the instruction has CPSR set.
+    for (unsigned OpNo = 0; OpNo < MCID.NumOperands; ++OpNo) {
+      if (MCID.operands()[OpNo].isOptionalDef() &&
+          Inst.getOperand(OpNo).isReg() &&
+          Inst.getOperand(OpNo).getReg() == ARM::CPSR)
+        IsCPSR = true;
+    }
+
     // If we're parsing Thumb1, reject it completely.
-    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
+    if (isThumbOne() && !IsCPSR)
       return Match_RequiresFlagSetting;
     // If we're parsing Thumb2, which form is legal depends on whether we're
     // in an IT block.
-    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
-        !inITBlock())
+    if (isThumbTwo() && !IsCPSR && !inITBlock())
       return Match_RequiresITBlock;
-    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
-        inITBlock())
+    if (isThumbTwo() && IsCPSR && inITBlock())
       return Match_RequiresNotITBlock;
     // LSL with zero immediate is not allowed in an IT block
     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
@@ -10984,6 +11075,12 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
     if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
       return Match_RequiresV8;
     break;
+  case ARM::tMUL:
+    // The second source operand must be the same register as the destination
+    // operand.
+    if (Inst.getOperand(0).getReg() != Inst.getOperand(3).getReg())
+      return Match_RequiresDestinationRegisterMatchASourceRegister;
+    break;
   default:
     break;
   }
@@ -11142,6 +11239,9 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
                                  PendConditionalInstruction, Out);
 
+  // Find the number of operators that are part of the Mnumonic (LHS)
+  unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
+
   switch (MatchResult) {
   case Match_Success:
     LLVM_DEBUG(dbgs() << "Parsed as: ";
@@ -11150,7 +11250,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
 
     // Context sensitive operand constraints aren't handled by the matcher,
     // so check them here.
-    if (validateInstruction(Inst, Operands)) {
+    if (validateInstruction(Inst, Operands, MnemonicOpsEndInd)) {
       // Still progress the IT block, otherwise one wrong condition causes
       // nasty cascading errors.
       forwardITPosition();
@@ -11163,7 +11263,7 @@ bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
       // encoding is selected. Loop on it while changes happen so the
       // individual transformations can chain off each other. E.g.,
       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
-      while (processInstruction(Inst, Operands, Out))
+      while (processInstruction(Inst, Operands, MnemonicOpsEndInd, Out))
         LLVM_DEBUG(dbgs() << "Changed to: ";
                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
                    dbgs() << "\n");
@@ -12612,6 +12712,9 @@ ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
       case Match_RequiresFlagSetting:
         Message.Message = "no flag-preserving variant of this instruction available";
         break;
+      case Match_RequiresDestinationRegisterMatchASourceRegister:
+        Message.Message = "destination register must match a source register";
+        break;
       case Match_InvalidOperand:
         Message.Message = "invalid operand for instruction";
         break;
@@ -12863,3 +12966,11 @@ bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
       std::begin(predicable_prefixes), std::end(predicable_prefixes),
       [&Mnemonic](const char *prefix) { return Mnemonic.starts_with(prefix); });
 }
+
+std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
+  return ARMOperand::CreateCondCode(ARMCC::AL, SMLoc());
+}
+
+std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
+  return ARMOperand::CreateCCOut(0, SMLoc());
+}
diff --git a/llvm/test/MC/ARM/arm-branch-errors.s b/llvm/test/MC/ARM/arm-branch-errors.s
index bbf6445f5c18a4..5d7ae12fb3bc00 100644
--- a/llvm/test/MC/ARM/arm-branch-errors.s
+++ b/llvm/test/MC/ARM/arm-branch-errors.s
@@ -10,13 +10,13 @@
 
 @ CHECK: error: invalid instruction, any one of the following would fix this:
 @ CHECK:        b #2
-@ CHECK: note: instruction requires: thumb
 @ CHECK: note: invalid operand for instruction
+@ CHECK: note: instruction requires: thumb
 @ CHECK: error: invalid instruction, any one of the following would fix this:
 @ CHECK:        bl #2
 @ CHECK: note: instruction requires: thumb
 @ CHECK: note: invalid operand for instruction
 @ CHECK: error: invalid instruction, any one of the following would fix this:
 @ CHECK:        beq #2
-@ CHECK: note: instruction requires: thumb
 @ CHECK: note: invalid operand for instruction
+@ CHECK: note: instruction requires: thumb
diff --git a/llvm/test/MC/ARM/arm-reg-addr-errors.s b/llvm/test/MC/ARM/arm-reg-addr-errors.s
index 069baead38b499..47188932b1fff4 100644
--- a/llvm/test/MC/ARM/arm-reg-addr-errors.s
+++ b/llvm/test/MC/ARM/arm-reg-addr-errors.s
@@ -27,21 +27,21 @@ ldr r4, [r3], q12
 ldr r4, [r3], cpsr
 @ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
 add r3, r0, s1, lsl #2
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, d1, lsl #2
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, q1, lsl #2
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, cpsr, lsl #2
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, r1, lsl s6
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, r1, lsl d6
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, r1, lsl q6
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 add r3, r0, r1, lsl cpsr
-@ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
+@ CHECK: [[@LINE-1]]{{.*}} too many operands for instruction
 ldrd r2, r3, [s4]
 @ CHECK: [[@LINE-1]]{{.*}} invalid operand for instruction
 ldrd r2, r3, [r4, s5]
diff --git a/llvm/test/MC/ARM/arm11-hint-instr.s b/llvm/test/MC/ARM/arm11-hint-instr.s
index 4193a686870aba..d9eaa5a89ab230 100644
--- a/llvm/test/MC/ARM/arm11-hint-instr.s
+++ b/llvm/test/MC/ARM/arm11-hint-instr.s
@@ -65,7 +65,13 @@
 @ CHECK-THUMB: wfe                             @ encoding: [0x20,0xbf]
 @ CHECK-THUMB: wfi                             @ encoding: [0x30,0xbf]
 @ CHECK-THUMB: sev                             @ encoding: [0x40,0xbf]
-@ CHECK-ERROR-THUMB: error: instruction requires: v7 clrex
+@ CHECK-ERROR-THUMB: error: invalid instruction, any one of the following would fix this:
+@ CHECK-ERROR-THUMB: clrex
+@ CHECK-ERROR-THUMB: ^
+@ CHECK-ERROR-THUMB: note: instruction requires: v7 clrex
+@ CHECK-ERROR-THUMB: clrex
+@ CHECK-ERROR-THUMB: ^
+@ CHECK-ERROR-THUMB: note: instruction requires: arm-mode
 @ CHECK-ERROR-THUMB: clrex
 @ CHECK-ERROR-THUMB: ^
 
diff --git a/llvm/test/MC/ARM/cps.s b/llvm/test/MC/ARM/cps.s
index bafdfdea537b8b..1034ed93810dd4 100644
--- a/llvm/test/MC/ARM/cps.s
+++ b/llvm/test/MC/ARM/cps.s
@@ -26,6 +26,6 @@
 @ V6-ERRORS: note: too many operands for instruction
 @ V6-ERRORS: error: invalid instruction, any one of the following would fix this:
 @ V6-ERRORS: cps #0
-@ V6-ERRORS: note: too few operands for instruction
 @ V6-ERRORS: note: instruction requires: arm-mode
 @ V6-ERRORS: note: instruction requires: thumb2
+@ V6-ERRORS: note: too few operands for instruction
diff --git a/llvm/test/MC/ARM/d16.s b/llvm/test/MC/ARM/d16.s
index 67b5095a13282c..755c9a8a312894 100644
--- a/llvm/test/MC/ARM/d16.s
+++ b/llvm/test/MC/ARM/d16.s
@@ -5,8 +5,8 @@
 
 @ D16: error: invalid instruction, any one of the following would fix this:
 @ D16-NEXT: vadd.f64 d1, d2, d16
-@ D16: note: operand must be a register in range [d0, d15]
 @ D16: note: too many operands for instruction
+@ D16: note: operand must be a register in range [d0, d15]
 vadd.f64 d1, d2, d16
 
 @ D16: error: operand must be a register in range [d0, d15]
diff --git a/llvm/test/MC/ARM/directive-arch_extension-crypto.s b/llvm/test/MC/ARM/directive-arch_extension-crypto.s
index 8d3cd9e5e1d0da..05b6d9e040188e 100644
--- a/llvm/test/MC/ARM/directive-arch_extension-crypto.s
+++ b/llvm/test/MC/ARM/directive-arch_extension-crypto.s
@@ -10,15 +10,16 @@
 	.syntax unified
 
 	.arch_extension crypto
-@ CHECK-V7: error: architectural extension 'crypto' is not allowed for the current base architecture
+@ CHECK-V7: architectural extension 'crypto' is not allowed for the current base architecture
 @ CHECK-V7-NEXT: 	.arch_extension crypto
 @ CHECK-V7-NEXT:                     ^
 
 	.type crypto,%function
 crypto:
 	vmull.p64 q0, d0, d1
-@ CHECK-V7: error: instruction requires: aes armv8
-
+@ CHECK-V7: error: invalid instruction, any one of the following would fix this:
+@ CHECK-V7: note: invalid operand for instruction
+@ CHECK-V7: note: instruction requires: aes armv8
 	aesd.8 q0, q1
 @ CHECK-V7: error: instruction requires: aes armv8
 	aese.8 q0, q1
@@ -51,14 +52,18 @@ crypto:
 @ CHECK-V7: error: instruction requires: sha2 armv8
 
 	.arch_extension nocrypto
+@ CHECK-V7: error: architectural extension 'sha2' is not allowed for the current base architecture
+@ CHECK-V7: error: architectural extension 'aes' is not allowed for the current base architecture
 @ CHECK-V7: error: architectural extension 'crypto' is not allowed for the current base architecture
-@ CHECK-V7-NEXT: 	.arch_extension nocrypto
+@ CHECK-V7-NEXT:     .arch_extension nocrypto
 @ CHECK-V7-NEXT:                     ^
 
 	.type nocrypto,%function
 nocrypto:
 	vmull.p64 q0, d0, d1
-@ CHECK-V7: error: instruction requires: aes armv8
+@ CHECK-V7: error: invalid instruction, any one of the following
+@ CHECK-V7: note: invalid operand for instruction
+@ CHECK-V7: note: instruction requires: aes armv8
 @ CHECK-V8: error: instruction requires: aes
 
 	aesd.8 q0, q1
diff --git a/llvm/test/MC/ARM/lsl-zero-errors.s b/llvm/test/MC/ARM/lsl-zero-errors.s
index e021aa9eb986d5..1e51c587211d1b 100644
--- a/llvm/test/MC/ARM/lsl-zero-errors.s
+++ b/llvm/test/MC/ARM/lsl-zero-errors.s
@@ -55,22 +55,22 @@
 
 // CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
 // CHECK-NONARM-NEXT: mov pc, r0, lsl #0
-// CHECK-NONARM: note: operand must be a register in range [r0, r15]
 // CHECK-THUMBV7: note: operand must be a register in range [r0, r12] or r14
 // CHECK-THUMBV8: note: operand must be a register in range [r0, r14]
+// CHECK-NONARM: note: operand must be a register in range [r0, r15]
 
 // CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
 // CHECK-NONARM-NEXT: mov r0, pc, lsl #0
-// CHECK-NONARM: note: operand must be a register in range [r0, r15]
 // CHECK-NONARM: note: invalid operand for instruction
 // CHECK-NONARM: note: invalid operand for instruction
 // CHECK-NONARM: note: operand must be an immediate in the range [256,65535]
+// CHECK-NONARM: note: operand must be a register in range [r0, r15]
 
 // CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
 // CHECK-NONARM-NEXT: mov pc, pc, lsl #0
-// CHECK-NONARM: note: operand must be a register in range [r0, r15]
 // CHECK-THUMBV7: note: operand must be a register in range [r0, r12] or r14
 // CHECK-THUMBV8: note: operand must be a register in range [r0, r14]
+// CHECK-NONARM: note: operand must be a register in range [r0, r15]
 
 // CHECK-NONARM: error: invalid instruction, any one of the following would fix this:
 // CHECK-NONARM-NEXT: movs pc, r0, lsl #0
@@ -134,8 +134,8 @@
 // FIXME: We should consistently have the "requires ARMv8" error here
 // CHECK-THUMBV7: error: invalid instruction, any one of the following would fix this:
 // CHECK-THUMBV7-NEXT: mov sp, sp, lsl #0
-// CHECK-THUMBV7: note: operand must be a register in range [r0, r15]
 // CHECK-THUMBV7: note: operand must be a register in range [r0, r12] or r14
+// CHECK-THUMBV7: note: operand must be a register in range [r0, r15]
 
 // CHECK-THUMBV7: error: invalid instruction, any one of the following would fix this:
 // CHECK-THUMBV7-NEXT: movs sp, sp, lsl #0
diff --git a/llvm/test/MC/ARM/mve-misc.s b/llvm/test/MC/ARM/mve-misc.s
index f3af9e0afe64fb..7f9a2c56c2d82b 100644
--- a/llvm/test/MC/ARM/mve-misc.s
+++ b/llvm/test/MC/ARM/mve-misc.s
@@ -15,55 +15,55 @@ vpsel   q0, q5, q2
 vpnot
 
 # CHECK: wlstp.8     lr, r0, #1668  @ encoding: [0x00,0xf0,0x43,0xc3]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r0, #1668
 
 # CHECK: wlstp.16     lr, r0, #1668  @ encoding: [0x10,0xf0,0x43,0xc3]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.16     lr, r0, #1668
 
 # CHECK: wlstp.32     lr, r4, #2706  @ encoding: [0x24,0xf0,0x49,0xcd]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.32     lr, r4, #2706
 
 # CHECK: wlstp.64     lr, lr, #3026  @ encoding: [0x3e,0xf0,0xe9,0xcd]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.64     lr, lr, #3026
 
 # CHECK: wlstp.8     lr, r5, #3436  @ encoding: [0x05,0xf0,0xb7,0xc6]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r5, #3436
 
 # CHECK: wlstp.16     lr, r1, #1060  @ encoding: [0x11,0xf0,0x13,0xc2]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.16     lr, r1, #1060
 
 # CHECK: wlstp.32     lr, r7, #4036  @ encoding: [0x27,0xf0,0xe3,0xc7]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.32     lr, r7, #4036
 
 # CHECK: wlstp.8     lr, r1, #538  @ encoding: [0x01,0xf0,0x0d,0xc9]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r1, #538
 
 # CHECK: wlstp.8     lr, r10, #1404  @ encoding: [0x0a,0xf0,0xbf,0xc2]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r10, #1404
 
 # CHECK: wlstp.8     lr, r10, #1408  @ encoding: [0x0a,0xf0,0xc1,0xc2]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r10, #1408
 
 # CHECK: wlstp.8     lr, r10, #2358  @ encoding: [0x0a,0xf0,0x9b,0xcc]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r10, #2358
 
 # CHECK: wlstp.8     lr, r10, #4086  @ encoding: [0x0a,0xf0,0xfb,0xcf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r10, #4086
 
 # CHECK: wlstp.8     lr, r11, #1442  @ encoding: [0x0b,0xf0,0xd1,0xca]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r11, #1442
 
 # ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: loop end is out of range or not a positive multiple of 2
@@ -87,39 +87,39 @@ wlstp.16     lr, sp, #1442
 wlstp.32     r10, r11, #1442
 
 # CHECK: wlstp.8     lr, r1, .Lendloop  @ encoding: [0x01'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.8     lr, r1, .Lendloop
 
 # CHECK: wlstp.16     lr, r2, .Lendloop  @ encoding: [0x12'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.16     lr, r2, .Lendloop
 
 # CHECK: wlstp.32     lr, r3, .Lendloop  @ encoding: [0x23'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.32     lr, r3, .Lendloop
 
 # CHECK: wlstp.64     lr, r5, .Lendloop  @ encoding: [0x35'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.64     lr, r5, .Lendloop
 
 # CHECK: wlstp.64     lr, r5, #0  @ encoding: [0x35,0xf0,0x01,0xc0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 wlstp.64     lr, r5, #0
 
 # CHECK: dlstp.8     lr, r5  @ encoding: [0x05,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 dlstp.8     lr, r5
 
 # CHECK: dlstp.16     lr, r5  @ encoding: [0x15,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 dlstp.16     lr, r5
 
 # CHECK: dlstp.32     lr, r7  @ encoding: [0x27,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 dlstp.32     lr, r7
 
 # CHECK: dlstp.64     lr, r2  @ encoding: [0x32,0xf0,0x01,0xe0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 dlstp.64     lr, r2
 
 # ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: operand must be a register in range [r0, r12] or r14
@@ -135,15 +135,15 @@ dlstp.64     r10, r0
 dlstp.64     lr, pc
 
 # CHECK: letp lr, #-2 @ encoding: [0x1f,0xf0,0x01,0xc8]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 letp lr, #-2
 
 # CHECK: letp lr, #-8 @ encoding: [0x1f,0xf0,0x05,0xc0]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 letp lr, #-8
 
 # CHECK: letp lr, #-4094 @ encoding: [0x1f,0xf0,0xff,0xcf]
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 letp lr, #-4094
 
 # ERROR: [[@LINE+2]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
@@ -159,7 +159,7 @@ letp lr, #8
 letp lr, #-4096
 
 # CHECK: letp lr, .Lstartloop @ encoding: [0x1f'A',0xf0'A',0x01'A',0xc0'A']
-# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 letp lr, .Lstartloop
 
 # CHECK: lctp @ encoding: [0x0f,0xf0,0x01,0xe0]
@@ -172,8 +172,11 @@ it eq
 # ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 lctpeq
 
+# ERROR-NOMVE: [[@LINE+1]]:1: error: instruction requires: mve
 vpste
+# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
 vpselt.s16 q0, q1, q2
+# ERROR-NOMVE: [[@LINE+1]]:1: error: invalid instruction
 vpsele.i32 q0, q1, q2
 # CHECK: vpste @ encoding: [0x71,0xfe,0x4d,0x8f]
 # CHECK: vpselt q0, q1, q2 @ encoding: [0x33,0xfe,0x05,0x0f]
diff --git a/llvm/test/MC/ARM/negative-immediates-fail.s b/llvm/test/MC/ARM/negative-immediates-fail.s
index 9bc4c92b5ee665..4b352743d73e8a 100644
--- a/llvm/test/MC/ARM/negative-immediates-fail.s
+++ b/llvm/test/MC/ARM/negative-immediates-fail.s
@@ -4,25 +4,25 @@
 
 ADC r0, r1, #0xFFFFFEEE
 # CHECK: error: invalid instruction, any one of the following would fix this:
-# CHECK: note: invalid operand for instruction
+# CHECK: note: too many operands for instruction
 # CHECK: note: operand must be a register in range [r0, r15]
 ADC r0, r1, #0xABFEABFF
 # CHECK: error: invalid instruction, any one of the following would fix this:
-# CHECK: note: invalid operand for instruction
+# CHECK: note: too many operands for instruction
 # CHECK: note: operand must be a register in range [r0, r15]
 ADC r0, r1, #0xFFFFFE02
 # CHECK: error: invalid instruction, any one of the following would fix this:
-# CHECK: note: invalid operand for instruction
+# CHECK: note: too many operands for instruction
 # CHECK: note: operand must be a register in range [r0, r15]
 
 ADD.W r0, r0, #0xFF01FF01
 # CHECK: error: invalid instruction, any one of the following would fix this:
-# CHECK: note: invalid operand for instruction
+# CHECK: note: too many operands for instruction
 # CHECK: note: operand must be a register in range [r0, r15]
 
 ORR r0, r1, #0xFFFFFF00
 # CHECK: error: invalid instruction, any one of the following would fix this:
-# CHECK: note: invalid operand for instruction
+# CHECK: note: too many operands for instruction
 # CHECK: note: operand must be a register in range [r0, r15]
 # CHECK: note: instruction requires: thumb2
 ORN r0, r1, #0xFFFFFF00
diff --git a/llvm/test/MC/ARM/not-armv4.s b/llvm/test/MC/ARM/not-armv4.s
index c62c50c26c31d5..b65b12976f78a8 100644
--- a/llvm/test/MC/ARM/not-armv4.s
+++ b/llvm/test/MC/ARM/not-armv4.s
@@ -1,13 +1,21 @@
 @ RUN: not llvm-mc < %s -triple armv4-unknown-unknown -show-encoding 2>&1 | FileCheck %s
 
 @ PR18524
-@ CHECK: instruction requires: armv5t
+@ CHECK: error: invalid instruction, any one of the following would fix this:
+@ CHECK: note: instruction requires: armv5t
+@ CHECK: note: instruction requires: thumb2
 clz r4,r9
 
-@ CHECK: instruction requires: armv6t2
+@ CHECK: error: invalid instruction, any one of the following would fix this:
+@ CHECK: note: instruction requires: armv6t2
+@ CHECK: note: instruction requires: thumb2
 rbit r4,r9
 
 @ CHECK: error: instruction requires: armv6t2
 movw r4,#0x1234
-@ CHECK: error: instruction requires: armv6t2
+
+@ CHECK: error: invalid instruction, any one of the following would fix this:
+@ CHECK: note: invalid operand for instruction
+@ CHECK: note: operand must be a register in range [r0, r15]
+@ CHECK: note: instruction requires: armv6t2
 mov  r4,#0x1234
diff --git a/llvm/test/MC/ARM/register-token-source-loc.s b/llvm/test/MC/ARM/register-token-source-loc.s
index afb6ba0c61a39e..7560f95999e71b 100644
--- a/llvm/test/MC/ARM/register-token-source-loc.s
+++ b/llvm/test/MC/ARM/register-token-source-loc.s
@@ -3,6 +3,9 @@
 // CHECK:     error: invalid instruction, any one of the following would fix this:
 // CHECK-NEXT:  add sp, r0, #4
 // CHECK-NEXT:  ^
+// CHECK-NEXT: note: operand must be a register in range [r0, r7]
+// CHECK-NEXT:   add sp, r0, #4
+// CHECK-NEXT:       ^
 // CHECK-NEXT: note: operand must be a register sp
 // CHECK-NEXT:  add sp, r0, #4
 // CHECK-NEXT:          ^
diff --git a/llvm/test/MC/ARM/tMOVSr.s b/llvm/test/MC/ARM/tMOVSr.s
index 198c90aa5ceb46..09602fecd68513 100644
--- a/llvm/test/MC/ARM/tMOVSr.s
+++ b/llvm/test/MC/ARM/tMOVSr.s
@@ -1,6 +1,7 @@
 @ REQUIRES: asserts
-@ RUN: llvm-mc --triple=thumbv8 --debug %s 2>&1 | FileCheck %s --match-full-lines
+@ RUN: llvm-mc --triple=thumbv8 %s --show-encoding 2>&1 | FileCheck %s --match-full-lines
 
-@ CHECK: Changed to: <MCInst #{{[0-9]+}} tMOVSr <MCOperand Reg:{{[0-9]+}}> <MCOperand Reg:{{[0-9]+}}>>
+// Note this makes sure the narrow instruciton is selected
+@ CHECK: movs r2, r3 @ encoding: [0x1a,0x00]
 .text
   movs r2, r3
diff --git a/llvm/test/MC/ARM/thumb-diagnostics.s b/llvm/test/MC/ARM/thumb-diagnostics.s
index cacd7f21cb9d36..171d60ac13f9a8 100644
--- a/llvm/test/MC/ARM/thumb-diagnostics.s
+++ b/llvm/test/MC/ARM/thumb-diagnostics.s
@@ -28,9 +28,12 @@
 @ CHECK-ERRORS:         ^
 @ CHECK-ERRORS: note: instruction variant requires Thumb2
 @ CHECK-ERRORS: note: operand must be a register sp
-@ CHECK-ERRORS-V5: error: instruction variant requires ARMv6 or later
+@ CHECK-ERRORS-V5: error: invalid instruction, any one of the following would fix this:
 @ CHECK-ERRORS-V5:         mov r2, r3
 @ CHECK-ERRORS-V5:         ^
+@ CHECK-ERRORS-V5: note: instruction requires: arm-mode
+@ CHECK-ERRORS-V5: note: operand must be an immediate in the range [0,255] or a relocatable expression
+@ CHECK-ERRORS-V5: note: instruction variant requires ARMv6 or later
 
 @ Immediates where registers were expected
         adds #0, r1, r2
@@ -225,10 +228,11 @@
 
 @ Mismatched source/destination operands for MUL instruction.
         muls r1, r2, r3
-@ CHECK-ERRORS: error: destination register must match source register
+@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
 @ CHECK-ERRORS:         muls r1, r2, r3
-@ CHECK-ERRORS:              ^
-
+@ CHECK-ERRORS:         ^
+@ CHECK-ERRORS: note: destination register must match a source register
+@ CHECK-ERRORS: note: too many operands for instruction
 
 @ Out of range immediates for STR instruction.
         str r2, [r7, #-1]
@@ -274,30 +278,33 @@
 @ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
 @ CHECK-ERRORS: add sp, #-1
 @ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: instruction requires: thumb2
+@ CHECK-ERRORS: add sp, #-1
+@ CHECK-ERRORS: ^
 @ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
 @ CHECK-ERRORS: add sp, #-1
 @ CHECK-ERRORS:         ^
 @ CHECK-ERRORS: note: invalid operand for instruction
 @ CHECK-ERRORS: add sp, #-1
 @ CHECK-ERRORS:         ^
-@ CHECK-ERRORS: note: instruction requires: thumb2
-@ CHECK-ERRORS: add sp, #-1
-@ CHECK-ERRORS: ^
 @ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
 @ CHECK-ERRORS: add sp, #3
 @ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: instruction requires: thumb2
+@ CHECK-ERRORS: add sp, #3
+@ CHECK-ERRORS: ^
 @ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
 @ CHECK-ERRORS: add sp, #3
 @ CHECK-ERRORS:         ^
 @ CHECK-ERRORS: note: invalid operand for instruction
 @ CHECK-ERRORS: add sp, #3
 @ CHECK-ERRORS:         ^
-@ CHECK-ERRORS: note: instruction requires: thumb2
-@ CHECK-ERRORS: add sp, #3
-@ CHECK-ERRORS: ^
 @ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
 @ CHECK-ERRORS: add sp, sp, #512
 @ CHECK-ERRORS: ^
+@ CHECK-ERRORS: note: instruction requires: thumb2
+@ CHECK-ERRORS: add sp, sp, #512
+@ CHECK-ERRORS: ^
 @ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
 @ CHECK-ERRORS: add sp, sp, #512
 @ CHECK-ERRORS:             ^
@@ -305,9 +312,6 @@
 @ CHECK-ERRORS: add sp, sp, #512
 @ CHECK-ERRORS:             ^
 @ CHECK-ERRORS: note: instruction requires: thumb2
-@ CHECK-ERRORS: add sp, sp, #512
-@ CHECK-ERRORS: ^
-@ CHECK-ERRORS: error: instruction requires: thumb2
 @ CHECK-ERRORS: add r2, sp, #1024
 @ CHECK-ERRORS: ^
         add r2, sp, ip
@@ -407,7 +411,8 @@
         adds
         adds r0
 @ CHECK-ERRORS: error: too few operands for instruction
-@ CHECK-ERRORS: error: too few operands for instruction
+@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
+@ CHECK-ERRORS: note: too few operands for instruction
 
 @------------------------------------------------------------------------------
 @ Out of range width for SBFX/UBFX
diff --git a/llvm/test/MC/ARM/thumb-mov.s b/llvm/test/MC/ARM/thumb-mov.s
index 6f662f3ee2c720..e910722f2edf49 100644
--- a/llvm/test/MC/ARM/thumb-mov.s
+++ b/llvm/test/MC/ARM/thumb-mov.s
@@ -58,10 +58,16 @@
         movs sp, r0
         movs r0, sp
         movs sp, sp
-// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
 // CHECK-V7-NEXT: movs sp, r0
-// CHECK-V7: instruction variant requires ARMv8 or later
+// CHECK-V7: note: instruction variant requires ARMv8 or later
+// CHECK-V7: note: operand must be a register in range [r0, r7]
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
 // CHECK-V7-NEXT: movs r0, sp
+// CHECK-V7: note: instruction variant requires ARMv8 or later
+// CHECK-V7: note: invalid operand for instruction
+// CHECK-V7: note: operand must be an immediate in the range [0,255] or a relocatable expression
+// CHECK-V7: note: operand must be a register in range [r0, r7]
 // CHECK-V7: error: instruction variant requires ARMv8 or later
 // CHECK-V7-NEXT: movs sp, sp
 // CHECK-V8: movs.w sp, r0            @ encoding: [0x5f,0xea,0x00,0x0d]
@@ -69,8 +75,9 @@
 // CHECK-V8: movs.w sp, sp            @ encoding: [0x5f,0xea,0x0d,0x0d]
 
         mov.w sp, sp
-// CHECK-V7: error: instruction variant requires ARMv8 or later
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
 // CHECK-V7-NEXT: mov.w sp, sp
+// CHECK-V7: note: instruction variant requires ARMv8 or later
 // CHECK-V8: mov.w sp, sp             @ encoding: [0x4f,0xea,0x0d,0x0d]
 
         movs.w sp, r0
@@ -78,8 +85,9 @@
         movs.w sp, sp
 // CHECK-V7: error: instruction variant requires ARMv8 or later
 // CHECK-V7-NEXT: movs.w sp, r0
-// CHECK-V7: instruction variant requires ARMv8 or later
+// CHECK-V7: error: invalid instruction, any one of the following would fix this:
 // CHECK-V7-NEXT: movs.w r0, sp
+// CHECK-V7: note: instruction variant requires ARMv8 or later
 // CHECK-V7: error: instruction variant requires ARMv8 or later
 // CHECK-V7-NEXT: movs.w sp, sp
 // CHECK-V8: movs.w sp, r0            @ encoding: [0x5f,0xea,0x00,0x0d]
diff --git a/llvm/test/MC/ARM/thumb2-diagnostics.s b/llvm/test/MC/ARM/thumb2-diagnostics.s
index 45efd3c62120c1..afb12ce4309812 100644
--- a/llvm/test/MC/ARM/thumb2-diagnostics.s
+++ b/llvm/test/MC/ARM/thumb2-diagnostics.s
@@ -156,7 +156,9 @@ foo2:
         adds
         adds r0
 @ CHECK-ERRORS: error: too few operands for instruction
-@ CHECK-ERRORS: error: too few operands for instruction
+@ CHECK-ERRORS: error: invalid instruction, any one of the following would fix this:
+@ CHECK-ERRORS: note: too few operands for instruction
+@ CHECK-ERRORS: note: operand must be a register in range [r0, r15]
 
         tst sp, #3
         tst sp, r5
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 3f18c3936a38bc..4eaf0adbb46465 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -107,6 +107,7 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/TargetRegistry.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -216,6 +217,19 @@ struct ClassInfo {
   /// Is this operand optional and not always required.
   bool IsOptional;
 
+  /// OptionalShouldOffsetCustomParsers - Only used if IsOptional is true.
+  /// Specifies if this optional operand should be assumed to be
+  ///   present for the sake of determining if a custom parser should be
+  ///   used, which is determined by the mnemonic and operand index.
+  /// If true, then the parser will always assume a value representing this
+  ///   operand will always be present when parsing and thus the custom parser
+  ///   will be applied to later tokens in the input stream.
+  /// If false, then the parse will assume it will not be present in the operand
+  ///   vector and therefore will apply the custom parser earlier.
+  /// For example of usage, see ARMAsmParser and the CondCode operands which are
+  ///   always present at parse time.
+  bool OptionalShouldOffsetCustomParsers;
+
   /// DefaultMethod - The name of the method that returns the default operand
   /// for optional operand
   std::string DefaultMethod;
@@ -1449,6 +1463,11 @@ void AsmMatcherInfo::buildOperandClasses() {
     if (BitInit *BI = dyn_cast<BitInit>(IsOptional))
       CI->IsOptional = BI->getValue();
 
+    Init *OptionalShouldOffsetCustomParsers =
+        Rec->getValueInit("OptionalShouldOffsetCustomParsers");
+    if (BitInit *BI = dyn_cast<BitInit>(OptionalShouldOffsetCustomParsers))
+      CI->OptionalShouldOffsetCustomParsers = BI->getValue();
+
     // Get or construct the default method name.
     Init *DMName = Rec->getValueInit("DefaultMethod");
     if (StringInit *SI = dyn_cast<StringInit>(DMName)) {
@@ -1490,7 +1509,7 @@ void AsmMatcherInfo::buildOperandMatchInfo() {
         OperandMask |= maskTrailingOnes<unsigned>(NumOptionalOps + 1)
                        << (i - NumOptionalOps);
       }
-      if (Op.Class->IsOptional)
+      if (Op.Class->IsOptional && Op.Class->OptionalShouldOffsetCustomParsers)
         ++NumOptionalOps;
     }
 
@@ -3747,6 +3766,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
     OS << "        } else {\n";
     OS << "          DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"but formal "
           "operand not required\\n\");\n";
+    OS << "          if (isSubclass(Formal, OptionalMatchClass)) {\n";
+    OS << "            OptionalOperandsMask.set(FormalIdx);\n";
+    OS << "          }\n";
     OS << "        }\n";
     OS << "        continue;\n";
   } else {



More information about the llvm-commits mailing list