[llvm] r209576 - AArch64/ARM64: remove AArch64 from tree prior to renaming ARM64.

Tim Northover tnorthover at apple.com
Sat May 24 05:42:30 PDT 2014


Removed: llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td (removed)
@@ -1,9474 +0,0 @@
-//===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the AArch64 NEON instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// NEON-specific DAG Nodes.
-//===----------------------------------------------------------------------===//
-
-// (outs Result), (ins Imm, OpCmode)
-def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
-
-def Neon_movi     : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
-
-def Neon_mvni     : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
-
-// (outs Result), (ins Imm)
-def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
-                        [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
-
-// (outs Result), (ins LHS, RHS, CondCode)
-def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
-                 [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
-
-// (outs Result), (ins LHS, 0/0.0 constant, CondCode)
-def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
-                 [SDTCisVec<0>,  SDTCisVec<1>]>>;
-
-// (outs Result), (ins LHS, RHS)
-def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
-                 [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
-
-def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
-                                     SDTCisVT<2, i32>]>;
-def Neon_sqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
-def Neon_uqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
-
-def SDTPERMUTE : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
-                               SDTCisSameAs<0, 2>]>;
-def Neon_uzp1    : SDNode<"AArch64ISD::NEON_UZP1", SDTPERMUTE>;
-def Neon_uzp2    : SDNode<"AArch64ISD::NEON_UZP2", SDTPERMUTE>;
-def Neon_zip1    : SDNode<"AArch64ISD::NEON_ZIP1", SDTPERMUTE>;
-def Neon_zip2    : SDNode<"AArch64ISD::NEON_ZIP2", SDTPERMUTE>;
-def Neon_trn1    : SDNode<"AArch64ISD::NEON_TRN1", SDTPERMUTE>;
-def Neon_trn2    : SDNode<"AArch64ISD::NEON_TRN2", SDTPERMUTE>;
-
-def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
-def Neon_rev64    : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
-def Neon_rev32    : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
-def Neon_rev16    : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
-def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
-                       [SDTCisVec<0>]>>;
-def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
-                           [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
-def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
-                           [SDTCisVec<0>,  SDTCisSameAs<0, 1>,
-                           SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
-
-//===----------------------------------------------------------------------===//
-// Addressing-mode instantiations
-//===----------------------------------------------------------------------===//
-
-multiclass ls_64_pats<dag address, dag Base, dag Offset, ValueType Ty> {
-defm : ls_neutral_pats<LSFP64_LDR, LSFP64_STR, Base,
-                      !foreach(decls.pattern, Offset,
-                               !subst(OFFSET, dword_uimm12, decls.pattern)),
-                      !foreach(decls.pattern, address,
-                               !subst(OFFSET, dword_uimm12,
-                               !subst(ALIGN, min_align8, decls.pattern))),
-                      Ty>;
-}
-
-multiclass ls_128_pats<dag address, dag Base, dag Offset, ValueType Ty> {
-defm : ls_neutral_pats<LSFP128_LDR, LSFP128_STR, Base,
-                       !foreach(decls.pattern, Offset,
-                                !subst(OFFSET, qword_uimm12, decls.pattern)),
-                       !foreach(decls.pattern, address,
-                                !subst(OFFSET, qword_uimm12,
-                                !subst(ALIGN, min_align16, decls.pattern))),
-                      Ty>;
-}
-
-multiclass uimm12_neon_pats<dag address, dag Base, dag Offset> {
-  defm : ls_64_pats<address, Base, Offset, v8i8>;
-  defm : ls_64_pats<address, Base, Offset, v4i16>;
-  defm : ls_64_pats<address, Base, Offset, v2i32>;
-  defm : ls_64_pats<address, Base, Offset, v1i64>;
-  defm : ls_64_pats<address, Base, Offset, v2f32>;
-  defm : ls_64_pats<address, Base, Offset, v1f64>;
-
-  defm : ls_128_pats<address, Base, Offset, v16i8>;
-  defm : ls_128_pats<address, Base, Offset, v8i16>;
-  defm : ls_128_pats<address, Base, Offset, v4i32>;
-  defm : ls_128_pats<address, Base, Offset, v2i64>;
-  defm : ls_128_pats<address, Base, Offset, v4f32>;
-  defm : ls_128_pats<address, Base, Offset, v2f64>;
-}
-
-defm : uimm12_neon_pats<(A64WrapperSmall
-                          tconstpool:$Hi, tconstpool:$Lo12, ALIGN),
-                        (ADRPxi tconstpool:$Hi), (i64 tconstpool:$Lo12)>;
-
-//===----------------------------------------------------------------------===//
-// Multiclasses
-//===----------------------------------------------------------------------===//
-
-multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size,  bits<5> opcode,
-                                string asmop, SDPatternOperator opnode8B,
-                                SDPatternOperator opnode16B,
-                                bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8B :  NeonI_3VSame<0b0, u, size, opcode,
-               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
-               asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
-               [(set (v8i8 VPR64:$Rd),
-                  (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
-               NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _16B : NeonI_3VSame<0b1, u, size, opcode,
-               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-               asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
-               [(set (v16i8 VPR128:$Rd),
-                  (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
-               NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-
-}
-
-multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
-                                  string asmop, SDPatternOperator opnode,
-                                  bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
-              (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
-              asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
-              [(set (v4i16 VPR64:$Rd),
-                 (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
-              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-              asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
-              [(set (v8i16 VPR128:$Rd),
-                 (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
-              (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
-              asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
-              [(set (v2i32 VPR64:$Rd),
-                 (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
-              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-              asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
-              [(set (v4i32 VPR128:$Rd),
-                 (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
-                                  string asmop, SDPatternOperator opnode,
-                                  bit Commutable = 0>
-   : NeonI_3VSame_HS_sizes<u, opcode,  asmop, opnode, Commutable> {
-  let isCommutable = Commutable in {
-    def _8B :  NeonI_3VSame<0b0, u, 0b00, opcode,
-               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
-               asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
-               [(set (v8i8 VPR64:$Rd),
-                  (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
-               NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
-               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-               asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
-               [(set (v16i8 VPR128:$Rd),
-                  (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
-               NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
-                                   string asmop, SDPatternOperator opnode,
-                                   bit Commutable = 0>
-   : NeonI_3VSame_BHS_sizes<u, opcode,  asmop, opnode, Commutable> {
-  let isCommutable = Commutable in {
-    def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
-              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-              asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
-              [(set (v2i64 VPR128:$Rd),
-                 (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-// Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
-// but Result types can be integer or floating point types.
-multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
-                                 string asmop, SDPatternOperator opnode,
-                                 ValueType ResTy2S, ValueType ResTy4S,
-                                 ValueType ResTy2D, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
-              (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
-              asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
-              [(set (ResTy2S VPR64:$Rd),
-                 (ResTy2S (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
-              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-              asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
-              [(set (ResTy4S VPR128:$Rd),
-                 (ResTy4S (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
-              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-              asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
-              [(set (ResTy2D VPR128:$Rd),
-                 (ResTy2D (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
-              NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-//===----------------------------------------------------------------------===//
-// Instruction Definitions
-//===----------------------------------------------------------------------===//
-
-// Vector Arithmetic Instructions
-
-// Vector Add (Integer and Floating-Point)
-
-defm ADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
-defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd,
-                                     v2f32, v4f32, v2f64, 1>;
-
-// Patterns to match add of v1i8/v1i16/v1i32 types
-def : Pat<(v1i8 (add FPR8:$Rn, FPR8:$Rm)),
-          (EXTRACT_SUBREG
-              (ADDvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                         (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
-              sub_8)>;
-def : Pat<(v1i16 (add FPR16:$Rn, FPR16:$Rm)),
-          (EXTRACT_SUBREG
-              (ADDvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                         (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
-              sub_16)>;
-def : Pat<(v1i32 (add FPR32:$Rn, FPR32:$Rm)),
-          (EXTRACT_SUBREG
-              (ADDvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                         (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
-              sub_32)>;
-
-// Vector Sub (Integer and Floating-Point)
-
-defm SUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
-defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub,
-                                     v2f32, v4f32, v2f64, 0>;
-
-// Patterns to match sub of v1i8/v1i16/v1i32 types
-def : Pat<(v1i8 (sub FPR8:$Rn, FPR8:$Rm)),
-          (EXTRACT_SUBREG
-              (SUBvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                         (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
-              sub_8)>;
-def : Pat<(v1i16 (sub FPR16:$Rn, FPR16:$Rm)),
-          (EXTRACT_SUBREG
-              (SUBvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                         (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
-              sub_16)>;
-def : Pat<(v1i32 (sub FPR32:$Rn, FPR32:$Rm)),
-          (EXTRACT_SUBREG
-              (SUBvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                         (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
-              sub_32)>;
-
-// Vector Multiply (Integer and Floating-Point)
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm MULvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
-defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul,
-                                     v2f32, v4f32, v2f64, 1>;
-}
-
-// Patterns to match mul of v1i8/v1i16/v1i32 types
-def : Pat<(v1i8 (mul FPR8:$Rn, FPR8:$Rm)),
-          (EXTRACT_SUBREG 
-              (MULvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                         (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
-              sub_8)>;
-def : Pat<(v1i16 (mul FPR16:$Rn, FPR16:$Rm)),
-          (EXTRACT_SUBREG 
-              (MULvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                         (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
-              sub_16)>;
-def : Pat<(v1i32 (mul FPR32:$Rn, FPR32:$Rm)),
-          (EXTRACT_SUBREG
-              (MULvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                         (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
-              sub_32)>;
-
-// Vector Multiply (Polynomial)
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
-                                    int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
-}
-
-// Vector Multiply-accumulate and Multiply-subtract (Integer)
-
-// class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
-// two operands constraints.
-class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
-  RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
-  bits<5> opcode, SDPatternOperator opnode>
-  : NeonI_3VSame<q, u, size, opcode,
-    (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
-    asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
-    [(set (OpTy VPRC:$Rd),
-       (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
-    NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                       (add node:$Ra, (mul node:$Rn, node:$Rm))>;
-
-def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                       (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
-
-
-let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC] in {
-def MLAvvv_8B:  NeonI_3VSame_Constraint_impl<"mla", ".8b",  VPR64,  v8i8,
-                                             0b0, 0b0, 0b00, 0b10010, Neon_mla>;
-def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
-                                             0b1, 0b0, 0b00, 0b10010, Neon_mla>;
-def MLAvvv_4H:  NeonI_3VSame_Constraint_impl<"mla", ".4h",  VPR64,  v4i16,
-                                             0b0, 0b0, 0b01, 0b10010, Neon_mla>;
-def MLAvvv_8H:  NeonI_3VSame_Constraint_impl<"mla", ".8h",  VPR128, v8i16,
-                                             0b1, 0b0, 0b01, 0b10010, Neon_mla>;
-def MLAvvv_2S:  NeonI_3VSame_Constraint_impl<"mla", ".2s",  VPR64,  v2i32,
-                                             0b0, 0b0, 0b10, 0b10010, Neon_mla>;
-def MLAvvv_4S:  NeonI_3VSame_Constraint_impl<"mla", ".4s",  VPR128, v4i32,
-                                             0b1, 0b0, 0b10, 0b10010, Neon_mla>;
-
-def MLSvvv_8B:  NeonI_3VSame_Constraint_impl<"mls", ".8b",  VPR64,  v8i8,
-                                             0b0, 0b1, 0b00, 0b10010, Neon_mls>;
-def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
-                                             0b1, 0b1, 0b00, 0b10010, Neon_mls>;
-def MLSvvv_4H:  NeonI_3VSame_Constraint_impl<"mls", ".4h",  VPR64,  v4i16,
-                                             0b0, 0b1, 0b01, 0b10010, Neon_mls>;
-def MLSvvv_8H:  NeonI_3VSame_Constraint_impl<"mls", ".8h",  VPR128, v8i16,
-                                             0b1, 0b1, 0b01, 0b10010, Neon_mls>;
-def MLSvvv_2S:  NeonI_3VSame_Constraint_impl<"mls", ".2s",  VPR64,  v2i32,
-                                             0b0, 0b1, 0b10, 0b10010, Neon_mls>;
-def MLSvvv_4S:  NeonI_3VSame_Constraint_impl<"mls", ".4s",  VPR128, v4i32,
-                                             0b1, 0b1, 0b10, 0b10010, Neon_mls>;
-}
-
-// Vector Multiply-accumulate and Multiply-subtract (Floating Point)
-
-def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                        (fadd node:$Ra, (fmul_su node:$Rn, node:$Rm))>;
-
-def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                        (fsub node:$Ra, (fmul_su node:$Rn, node:$Rm))>;
-
-let Predicates = [HasNEON, UseFusedMAC],
-    SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC] in {
-def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s",  VPR64,  v2f32,
-                                             0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
-def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s",  VPR128, v4f32,
-                                             0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
-def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d",  VPR128, v2f64,
-                                             0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
-
-def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s",  VPR64,  v2f32,
-                                              0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
-def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s",  VPR128, v4f32,
-                                             0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
-def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d",  VPR128, v2f64,
-                                             0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
-}
-
-// We're also allowed to match the fma instruction regardless of compile
-// options.
-def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
-          (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
-def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
-          (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
-          (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-
-def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
-          (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
-def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
-          (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
-          (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
-
-// Vector Divide (Floating-Point)
-
-let SchedRW = [WriteFPDiv, ReadFPDiv, ReadFPDiv] in {
-defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv,
-                                     v2f32, v4f32, v2f64, 0>;
-}
-
-// Vector Bitwise Operations
-
-// Vector Bitwise AND
-
-defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
-
-// Vector Bitwise Exclusive OR
-
-defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
-
-// Vector Bitwise OR
-
-defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
-
-// ORR disassembled as MOV if Vn==Vm
-
-// Vector Move - register
-// Alias for ORR if Vn=Vm.
-def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
-                    (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>;
-def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
-                    (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>;
-
-// The MOVI instruction takes two immediate operands.  The first is the
-// immediate encoding, while the second is the cmode.  A cmode of 14, or
-// 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
-def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
-def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
-
-def Neon_not8B  : PatFrag<(ops node:$in),
-                          (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
-def Neon_not16B : PatFrag<(ops node:$in),
-                          (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
-
-def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
-                         (or node:$Rn, (Neon_not8B node:$Rm))>;
-
-def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
-                          (or node:$Rn, (Neon_not16B node:$Rm))>;
-
-def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
-                         (and node:$Rn, (Neon_not8B node:$Rm))>;
-
-def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
-                          (and node:$Rn, (Neon_not16B node:$Rm))>;
-
-
-// Vector Bitwise OR NOT - register
-
-defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
-                                   Neon_orn8B, Neon_orn16B, 0>;
-
-// Vector Bitwise Bit Clear (AND NOT) - register
-
-defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
-                                   Neon_bic8B, Neon_bic16B, 0>;
-
-multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
-                                   SDPatternOperator opnode16B,
-                                   Instruction INST8B,
-                                   Instruction INST16B> {
-  def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$Rn, VPR128:$Rm)>;
-}
-
-// Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
-defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
-defm : Neon_bitwise2V_patterns<or,  or,  ORRvvv_8B, ORRvvv_16B>;
-defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
-defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
-defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
-
-//   Vector Bitwise Select
-def BSLvvv_8B  : NeonI_3VSame_Constraint_impl<"bsl", ".8b",  VPR64, v8i8,
-                                              0b0, 0b1, 0b01, 0b00011, vselect>;
-
-def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
-                                              0b1, 0b1, 0b01, 0b00011, vselect>;
-
-multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
-                                   Instruction INST8B,
-                                   Instruction INST16B> {
-  // Disassociate type from instruction definition
-  def : Pat<(v8i8 (opnode (v8i8 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v2i32 (opnode (v2i32 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v2f32 (opnode (v2i32 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v4i16 (opnode (v4i16 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v1i64 (opnode (v1i64 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v1f64 (opnode (v1i64 VPR64:$src), VPR64:$Rn, VPR64:$Rm)),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v16i8 (opnode (v16i8 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v4i32 (opnode (v4i32 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v8i16 (opnode (v8i16 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v2i64 (opnode (v2i64 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v2f64 (opnode (v2i64 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v4f32 (opnode (v4i32 VPR128:$src), VPR128:$Rn, VPR128:$Rm)),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-
-  // Allow to match BSL instruction pattern with non-constant operand
-  def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
-                    (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
-          (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
-                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
-          (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
-                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
-          (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
-                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
-          (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
-                     (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
-          (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
-                     (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
-          (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
-                     (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
-          (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
-                     (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
-          (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
-
-  // Allow to match llvm.arm.* intrinsics.
-  def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
-                    (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
-                    (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
-                    (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
-                    (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
-                    (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v1f64 (int_arm_neon_vbsl (v1f64 VPR64:$src),
-                    (v1f64 VPR64:$Rn), (v1f64 VPR64:$Rm))),
-            (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
-  def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
-                    (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
-                    (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
-                    (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
-                    (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
-                    (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-  def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
-                    (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
-            (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
-}
-
-// Additional patterns for bitwise instruction BSL
-defm: Neon_bitwise3V_patterns<vselect, BSLvvv_8B, BSLvvv_16B>;
-
-def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
-                           (vselect node:$src, node:$Rn, node:$Rm),
-                           [{ (void)N; return false; }]>;
-
-// Vector Bitwise Insert if True
-
-def BITvvv_8B  : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64,   v8i8,
-                   0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
-def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
-                   0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
-
-// Vector Bitwise Insert if False
-
-def BIFvvv_8B  : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64,  v8i8,
-                                0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
-def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
-                                0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
-
-// Vector Absolute Difference and Accumulate (Signed, Unsigned)
-
-def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                       (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
-def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                       (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
-
-// Vector Absolute Difference and Accumulate (Unsigned)
-def UABAvvv_8B :  NeonI_3VSame_Constraint_impl<"uaba", ".8b",  VPR64,  v8i8,
-                    0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
-def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
-                    0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
-def UABAvvv_4H :  NeonI_3VSame_Constraint_impl<"uaba", ".4h",  VPR64,  v4i16,
-                    0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
-def UABAvvv_8H :  NeonI_3VSame_Constraint_impl<"uaba", ".8h",  VPR128, v8i16,
-                    0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
-def UABAvvv_2S :  NeonI_3VSame_Constraint_impl<"uaba", ".2s",  VPR64,  v2i32,
-                    0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
-def UABAvvv_4S :  NeonI_3VSame_Constraint_impl<"uaba", ".4s",  VPR128, v4i32,
-                    0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
-
-// Vector Absolute Difference and Accumulate (Signed)
-def SABAvvv_8B :  NeonI_3VSame_Constraint_impl<"saba", ".8b",  VPR64,  v8i8,
-                    0b0, 0b0, 0b00, 0b01111, Neon_saba>;
-def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
-                    0b1, 0b0, 0b00, 0b01111, Neon_saba>;
-def SABAvvv_4H :  NeonI_3VSame_Constraint_impl<"saba", ".4h",  VPR64,  v4i16,
-                    0b0, 0b0, 0b01, 0b01111, Neon_saba>;
-def SABAvvv_8H :  NeonI_3VSame_Constraint_impl<"saba", ".8h",  VPR128, v8i16,
-                    0b1, 0b0, 0b01, 0b01111, Neon_saba>;
-def SABAvvv_2S :  NeonI_3VSame_Constraint_impl<"saba", ".2s",  VPR64,  v2i32,
-                    0b0, 0b0, 0b10, 0b01111, Neon_saba>;
-def SABAvvv_4S :  NeonI_3VSame_Constraint_impl<"saba", ".4s",  VPR128, v4i32,
-                    0b1, 0b0, 0b10, 0b01111, Neon_saba>;
-
-
-// Vector Absolute Difference (Signed, Unsigned)
-defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
-defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
-
-// Vector Absolute Difference (Floating Point)
-defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
-                                    int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
-
-// Vector Reciprocal Step (Floating Point)
-defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
-                                       int_arm_neon_vrecps,
-                                       v2f32, v4f32, v2f64, 0>;
-
-// Vector Reciprocal Square Root Step (Floating Point)
-defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
-                                        int_arm_neon_vrsqrts,
-                                        v2f32, v4f32, v2f64, 0>;
-
-// Vector Comparisons
-
-def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
-                        (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
-def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
-                         (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
-def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
-                        (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
-def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
-                        (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
-def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
-                        (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
-
-// NeonI_compare_aliases class: swaps register operands to implement
-// comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
-class NeonI_compare_aliases<string asmop, string asmlane,
-                            Instruction inst, RegisterOperand VPRC>
-  : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
-                    ", $Rm" # asmlane,
-                  (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
-
-// Vector Comparisons (Integer)
-
-// Vector Compare Mask Equal (Integer)
-let isCommutable =1 in {
-defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
-}
-
-// Vector Compare Mask Higher or Same (Unsigned Integer)
-defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
-
-// Vector Compare Mask Greater Than or Equal (Integer)
-defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
-
-// Vector Compare Mask Higher (Unsigned Integer)
-defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
-
-// Vector Compare Mask Greater Than (Integer)
-defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
-
-// Vector Compare Mask Bitwise Test (Integer)
-defm CMTSTvvv:  NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
-
-// Vector Compare Mask Less or Same (Unsigned Integer)
-// CMLS is alias for CMHS with operands reversed.
-def CMLSvvv_8B  : NeonI_compare_aliases<"cmls", ".8b",  CMHSvvv_8B,  VPR64>;
-def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
-def CMLSvvv_4H  : NeonI_compare_aliases<"cmls", ".4h",  CMHSvvv_4H,  VPR64>;
-def CMLSvvv_8H  : NeonI_compare_aliases<"cmls", ".8h",  CMHSvvv_8H,  VPR128>;
-def CMLSvvv_2S  : NeonI_compare_aliases<"cmls", ".2s",  CMHSvvv_2S,  VPR64>;
-def CMLSvvv_4S  : NeonI_compare_aliases<"cmls", ".4s",  CMHSvvv_4S,  VPR128>;
-def CMLSvvv_2D  : NeonI_compare_aliases<"cmls", ".2d",  CMHSvvv_2D,  VPR128>;
-
-// Vector Compare Mask Less Than or Equal (Integer)
-// CMLE is alias for CMGE with operands reversed.
-def CMLEvvv_8B  : NeonI_compare_aliases<"cmle", ".8b",  CMGEvvv_8B,  VPR64>;
-def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
-def CMLEvvv_4H  : NeonI_compare_aliases<"cmle", ".4h",  CMGEvvv_4H,  VPR64>;
-def CMLEvvv_8H  : NeonI_compare_aliases<"cmle", ".8h",  CMGEvvv_8H,  VPR128>;
-def CMLEvvv_2S  : NeonI_compare_aliases<"cmle", ".2s",  CMGEvvv_2S,  VPR64>;
-def CMLEvvv_4S  : NeonI_compare_aliases<"cmle", ".4s",  CMGEvvv_4S,  VPR128>;
-def CMLEvvv_2D  : NeonI_compare_aliases<"cmle", ".2d",  CMGEvvv_2D,  VPR128>;
-
-// Vector Compare Mask Lower (Unsigned Integer)
-// CMLO is alias for CMHI with operands reversed.
-def CMLOvvv_8B  : NeonI_compare_aliases<"cmlo", ".8b",  CMHIvvv_8B,  VPR64>;
-def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
-def CMLOvvv_4H  : NeonI_compare_aliases<"cmlo", ".4h",  CMHIvvv_4H,  VPR64>;
-def CMLOvvv_8H  : NeonI_compare_aliases<"cmlo", ".8h",  CMHIvvv_8H,  VPR128>;
-def CMLOvvv_2S  : NeonI_compare_aliases<"cmlo", ".2s",  CMHIvvv_2S,  VPR64>;
-def CMLOvvv_4S  : NeonI_compare_aliases<"cmlo", ".4s",  CMHIvvv_4S,  VPR128>;
-def CMLOvvv_2D  : NeonI_compare_aliases<"cmlo", ".2d",  CMHIvvv_2D,  VPR128>;
-
-// Vector Compare Mask Less Than (Integer)
-// CMLT is alias for CMGT with operands reversed.
-def CMLTvvv_8B  : NeonI_compare_aliases<"cmlt", ".8b",  CMGTvvv_8B,  VPR64>;
-def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
-def CMLTvvv_4H  : NeonI_compare_aliases<"cmlt", ".4h",  CMGTvvv_4H,  VPR64>;
-def CMLTvvv_8H  : NeonI_compare_aliases<"cmlt", ".8h",  CMGTvvv_8H,  VPR128>;
-def CMLTvvv_2S  : NeonI_compare_aliases<"cmlt", ".2s",  CMGTvvv_2S,  VPR64>;
-def CMLTvvv_4S  : NeonI_compare_aliases<"cmlt", ".4s",  CMGTvvv_4S,  VPR128>;
-def CMLTvvv_2D  : NeonI_compare_aliases<"cmlt", ".2d",  CMGTvvv_2D,  VPR128>;
-
-
-def neon_uimm0_asmoperand : AsmOperandClass
-{
-  let Name = "UImm0";
-  let PredicateMethod = "isUImm<0>";
-  let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
-  let ParserMatchClass = neon_uimm0_asmoperand;
-  let PrintMethod = "printNeonUImm0Operand";
-
-}
-
-multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
-{
-  def _8B :  NeonI_2VMisc<0b0, u, 0b00, opcode,
-             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
-             asmop # "\t$Rd.8b, $Rn.8b, $Imm",
-             [(set (v8i8 VPR64:$Rd),
-                (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
-             NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
-             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
-             asmop # "\t$Rd.16b, $Rn.16b, $Imm",
-             [(set (v16i8 VPR128:$Rd),
-                (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
-             NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
-            (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
-            asmop # "\t$Rd.4h, $Rn.4h, $Imm",
-            [(set (v4i16 VPR64:$Rd),
-               (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
-            (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
-            asmop # "\t$Rd.8h, $Rn.8h, $Imm",
-            [(set (v8i16 VPR128:$Rd),
-               (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
-            (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
-            asmop # "\t$Rd.2s, $Rn.2s, $Imm",
-            [(set (v2i32 VPR64:$Rd),
-               (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
-            (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
-            asmop # "\t$Rd.4s, $Rn.4s, $Imm",
-            [(set (v4i32 VPR128:$Rd),
-               (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
-            (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
-            asmop # "\t$Rd.2d, $Rn.2d, $Imm",
-            [(set (v2i64 VPR128:$Rd),
-               (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-// Vector Compare Mask Equal to Zero (Integer)
-defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
-
-// Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
-defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
-
-// Vector Compare Mask Greater Than Zero (Signed Integer)
-defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
-
-// Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
-defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
-
-// Vector Compare Mask Less Than Zero (Signed Integer)
-defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
-
-// Vector Comparisons (Floating Point)
-
-// Vector Compare Mask Equal (Floating Point)
-let isCommutable =1 in {
-defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
-                                      v2i32, v4i32, v2i64, 0>;
-}
-
-// Vector Compare Mask Greater Than Or Equal (Floating Point)
-defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
-                                      v2i32, v4i32, v2i64, 0>;
-
-// Vector Compare Mask Greater Than (Floating Point)
-defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
-                                      v2i32, v4i32, v2i64, 0>;
-
-// Vector Compare Mask Less Than Or Equal (Floating Point)
-// FCMLE is alias for FCMGE with operands reversed.
-def FCMLEvvv_2S  : NeonI_compare_aliases<"fcmle", ".2s",  FCMGEvvv_2S,  VPR64>;
-def FCMLEvvv_4S  : NeonI_compare_aliases<"fcmle", ".4s",  FCMGEvvv_4S,  VPR128>;
-def FCMLEvvv_2D  : NeonI_compare_aliases<"fcmle", ".2d",  FCMGEvvv_2D,  VPR128>;
-
-// Vector Compare Mask Less Than (Floating Point)
-// FCMLT is alias for FCMGT with operands reversed.
-def FCMLTvvv_2S  : NeonI_compare_aliases<"fcmlt", ".2s",  FCMGTvvv_2S,  VPR64>;
-def FCMLTvvv_4S  : NeonI_compare_aliases<"fcmlt", ".4s",  FCMGTvvv_4S,  VPR128>;
-def FCMLTvvv_2D  : NeonI_compare_aliases<"fcmlt", ".2d",  FCMGTvvv_2D,  VPR128>;
-
-def fpzero_izero_asmoperand : AsmOperandClass {
-  let Name = "FPZeroIZero";
-  let ParserMethod = "ParseFPImm0AndImm0Operand";
-  let DiagnosticType = "FPZero";
-}
-
-def fpzz32 : Operand<f32>,
-             ComplexPattern<f32, 1, "SelectFPZeroOperand", [fpimm]> {
-  let ParserMatchClass = fpzero_izero_asmoperand;
-  let PrintMethod = "printFPZeroOperand";
-  let DecoderMethod = "DecodeFPZeroOperand";
-}
-
-multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
-                              string asmop, CondCode CC>
-{
-  def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
-            (outs VPR64:$Rd), (ins VPR64:$Rn, fpzz32:$FPImm),
-            asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
-            [(set (v2i32 VPR64:$Rd),
-               (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpzz32:$FPImm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
-            (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm),
-            asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
-            [(set (v4i32 VPR128:$Rd),
-               (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
-            (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm),
-            asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
-            [(set (v2i64 VPR128:$Rd),
-               (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))],
-            NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-// Vector Compare Mask Equal to Zero (Floating Point)
-defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
-
-// Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
-defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
-
-// Vector Compare Mask Greater Than Zero (Floating Point)
-defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
-
-// Vector Compare Mask Less Than or Equal To Zero (Floating Point)
-defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
-
-// Vector Compare Mask Less Than Zero (Floating Point)
-defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
-
-// Vector Absolute Comparisons (Floating Point)
-
-// Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
-defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
-                                      int_arm_neon_vacge,
-                                      v2i32, v4i32, v2i64, 0>;
-
-// Vector Absolute Compare Mask Greater Than (Floating Point)
-defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
-                                      int_arm_neon_vacgt,
-                                      v2i32, v4i32, v2i64, 0>;
-
-// Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
-// FACLE is alias for FACGE with operands reversed.
-def FACLEvvv_2S  : NeonI_compare_aliases<"facle", ".2s",  FACGEvvv_2S,  VPR64>;
-def FACLEvvv_4S  : NeonI_compare_aliases<"facle", ".4s",  FACGEvvv_4S,  VPR128>;
-def FACLEvvv_2D  : NeonI_compare_aliases<"facle", ".2d",  FACGEvvv_2D,  VPR128>;
-
-// Vector Absolute Compare Mask Less Than (Floating Point)
-// FACLT is alias for FACGT with operands reversed.
-def FACLTvvv_2S  : NeonI_compare_aliases<"faclt", ".2s",  FACGTvvv_2S,  VPR64>;
-def FACLTvvv_4S  : NeonI_compare_aliases<"faclt", ".4s",  FACGTvvv_4S,  VPR128>;
-def FACLTvvv_2D  : NeonI_compare_aliases<"faclt", ".2d",  FACGTvvv_2D,  VPR128>;
-
-// Vector halving add (Integer Signed, Unsigned)
-defm SHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
-                                        int_arm_neon_vhadds, 1>;
-defm UHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
-                                        int_arm_neon_vhaddu, 1>;
-
-// Vector halving sub (Integer Signed, Unsigned)
-defm SHSUBvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
-                                        int_arm_neon_vhsubs, 0>;
-defm UHSUBvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
-                                        int_arm_neon_vhsubu, 0>;
-
-// Vector rouding halving add (Integer Signed, Unsigned)
-defm SRHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
-                                         int_arm_neon_vrhadds, 1>;
-defm URHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
-                                         int_arm_neon_vrhaddu, 1>;
-
-// Vector Saturating add (Integer Signed, Unsigned)
-defm SQADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
-                   int_arm_neon_vqadds, 1>;
-defm UQADDvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
-                   int_arm_neon_vqaddu, 1>;
-
-// Vector Saturating sub (Integer Signed, Unsigned)
-defm SQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
-                   int_arm_neon_vqsubs, 1>;
-defm UQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
-                   int_arm_neon_vqsubu, 1>;
-
-// Vector Shift Left (Signed and Unsigned Integer)
-defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
-                 int_arm_neon_vshifts, 1>;
-defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
-                 int_arm_neon_vshiftu, 1>;
-
-// Vector Saturating Shift Left (Signed and Unsigned Integer)
-defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
-                  int_arm_neon_vqshifts, 1>;
-defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
-                  int_arm_neon_vqshiftu, 1>;
-
-// Vector Rouding Shift Left (Signed and Unsigned Integer)
-defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
-                  int_arm_neon_vrshifts, 1>;
-defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
-                  int_arm_neon_vrshiftu, 1>;
-
-// Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
-defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
-                   int_arm_neon_vqrshifts, 1>;
-defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
-                   int_arm_neon_vqrshiftu, 1>;
-
-// Vector Maximum (Signed and Unsigned Integer)
-defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
-defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
-
-// Vector Minimum (Signed and Unsigned Integer)
-defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
-defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
-
-// Vector Maximum (Floating Point)
-defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
-                                     int_arm_neon_vmaxs,
-                                     v2f32, v4f32, v2f64, 1>;
-
-// Vector Minimum (Floating Point)
-defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
-                                     int_arm_neon_vmins,
-                                     v2f32, v4f32, v2f64, 1>;
-
-// Vector maxNum (Floating Point) -  prefer a number over a quiet NaN)
-defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
-                                       int_aarch64_neon_vmaxnm,
-                                       v2f32, v4f32, v2f64, 1>;
-
-// Vector minNum (Floating Point) - prefer a number over a quiet NaN)
-defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
-                                       int_aarch64_neon_vminnm,
-                                       v2f32, v4f32, v2f64, 1>;
-
-// Vector Maximum Pairwise (Signed and Unsigned Integer)
-defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
-defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
-
-// Vector Minimum Pairwise (Signed and Unsigned Integer)
-defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
-defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
-
-// Vector Maximum Pairwise (Floating Point)
-defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
-                                     int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
-
-// Vector Minimum Pairwise (Floating Point)
-defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
-                                     int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
-
-// Vector maxNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
-defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
-                                       int_aarch64_neon_vpmaxnm,
-                                       v2f32, v4f32, v2f64, 1>;
-
-// Vector minNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
-defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
-                                       int_aarch64_neon_vpminnm,
-                                       v2f32, v4f32, v2f64, 1>;
-
-// Vector Addition Pairwise (Integer)
-defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
-
-// Vector Addition Pairwise (Floating Point)
-defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
-                                       int_arm_neon_vpadd,
-                                       v2f32, v4f32, v2f64, 1>;
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-// Vector Saturating Doubling Multiply High
-defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
-                    int_arm_neon_vqdmulh, 1>;
-
-// Vector Saturating Rouding Doubling Multiply High
-defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
-                     int_arm_neon_vqrdmulh, 1>;
-
-// Vector Multiply Extended (Floating Point)
-defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
-                                      int_aarch64_neon_vmulx,
-                                      v2f32, v4f32, v2f64, 1>;
-}
-
-// Patterns to match llvm.aarch64.* intrinsic for 
-// ADDP, SMINP, UMINP, SMAXP, UMAXP having i32 as output
-class Neon_VectorPair_v2i32_pattern<SDPatternOperator opnode, Instruction INST>
-  : Pat<(v1i32 (opnode (v2i32 VPR64:$Rn))),
-        (EXTRACT_SUBREG
-             (v2i32 (INST (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rn))),
-             sub_32)>;
-
-def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_sminv, SMINPvvv_2S>;
-def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_uminv, UMINPvvv_2S>;
-def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_smaxv, SMAXPvvv_2S>;
-def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_umaxv, UMAXPvvv_2S>;
-def : Neon_VectorPair_v2i32_pattern<int_aarch64_neon_vaddv, ADDP_2S>;
-
-// Vector Immediate Instructions
-
-multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
-{
-  def _asmoperand : AsmOperandClass
-    {
-      let Name = "NeonMovImmShift" # PREFIX;
-      let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
-      let PredicateMethod = "isNeonMovImmShift" # PREFIX;
-    }
-}
-
-// Definition of vector immediates shift operands
-
-// The selectable use-cases extract the shift operation
-// information from the OpCmode fields encoded in the immediate.
-def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
-  uint64_t OpCmode = N->getZExtValue();
-  unsigned ShiftImm;
-  unsigned ShiftOnesIn;
-  unsigned HasShift =
-    A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
-  if (!HasShift) return SDValue();
-  return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
-}]>;
-
-// Vector immediates shift operands which accept LSL and MSL
-// shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
-// or 0, 8 (LSLH) or 8, 16 (MSL).
-defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
-defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
-// LSLH restricts shift amount to  0, 8 out of 0, 8, 16, 24
-defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
-
-multiclass neon_mov_imm_shift_operands<string PREFIX,
-                                       string HALF, string ISHALF, code pred>
-{
-   def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
-    {
-      let PrintMethod =
-        "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
-      let DecoderMethod =
-        "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
-      let ParserMatchClass =
-        !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
-    }
-}
-
-defm neon_mov_imm_LSL  : neon_mov_imm_shift_operands<"LSL", "", "false", [{
-  unsigned ShiftImm;
-  unsigned ShiftOnesIn;
-  unsigned HasShift =
-    A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
-  return (HasShift && !ShiftOnesIn);
-}]>;
-
-defm neon_mov_imm_MSL  : neon_mov_imm_shift_operands<"MSL", "", "false", [{
-  unsigned ShiftImm;
-  unsigned ShiftOnesIn;
-  unsigned HasShift =
-    A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
-  return (HasShift && ShiftOnesIn);
-}]>;
-
-defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
-  unsigned ShiftImm;
-  unsigned ShiftOnesIn;
-  unsigned HasShift =
-    A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
-  return (HasShift && !ShiftOnesIn);
-}]>;
-
-def neon_uimm1_asmoperand : AsmOperandClass
-{
-  let Name = "UImm1";
-  let PredicateMethod = "isUImm<1>";
-  let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm2_asmoperand : AsmOperandClass
-{
-  let Name = "UImm2";
-  let PredicateMethod = "isUImm<2>";
-  let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm8_asmoperand : AsmOperandClass
-{
-  let Name = "UImm8";
-  let PredicateMethod = "isUImm<8>";
-  let RenderMethod = "addImmOperands";
-}
-
-def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
-  let ParserMatchClass = neon_uimm8_asmoperand;
-  let PrintMethod = "printUImmHexOperand";
-}
-
-def neon_uimm64_mask_asmoperand : AsmOperandClass
-{
-  let Name = "NeonUImm64Mask";
-  let PredicateMethod = "isNeonUImm64Mask";
-  let RenderMethod = "addNeonUImm64MaskOperands";
-}
-
-// MCOperand for 64-bit bytemask with each byte having only the
-// value 0x00 and 0xff is encoded as an unsigned 8-bit value
-def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
-  let ParserMatchClass = neon_uimm64_mask_asmoperand;
-  let PrintMethod = "printNeonUImm64MaskOperand";
-}
-
-multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
-                                   SDPatternOperator opnode>
-{
-    // shift zeros, per word
-    def _2S  : NeonI_1VModImm<0b0, op,
-                              (outs VPR64:$Rd),
-                              (ins neon_uimm8:$Imm,
-                                neon_mov_imm_LSL_operand:$Simm),
-                              !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
-                              [(set (v2i32 VPR64:$Rd),
-                                 (v2i32 (opnode (timm:$Imm),
-                                   (neon_mov_imm_LSL_operand:$Simm))))],
-                              NoItinerary>,
-               Sched<[WriteFPALU]> {
-       bits<2> Simm;
-       let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
-     }
-
-    def _4S  : NeonI_1VModImm<0b1, op,
-                              (outs VPR128:$Rd),
-                              (ins neon_uimm8:$Imm,
-                                neon_mov_imm_LSL_operand:$Simm),
-                              !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
-                              [(set (v4i32 VPR128:$Rd),
-                                 (v4i32 (opnode (timm:$Imm),
-                                   (neon_mov_imm_LSL_operand:$Simm))))],
-                              NoItinerary>,
-               Sched<[WriteFPALU]> {
-      bits<2> Simm;
-      let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
-    }
-
-    // shift zeros, per halfword
-    def _4H  : NeonI_1VModImm<0b0, op,
-                              (outs VPR64:$Rd),
-                              (ins neon_uimm8:$Imm,
-                                neon_mov_imm_LSLH_operand:$Simm),
-                              !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
-                              [(set (v4i16 VPR64:$Rd),
-                                 (v4i16 (opnode (timm:$Imm),
-                                   (neon_mov_imm_LSLH_operand:$Simm))))],
-                              NoItinerary>,
-               Sched<[WriteFPALU]> {
-      bit  Simm;
-      let cmode = {0b1, 0b0, Simm, 0b0};
-    }
-
-    def _8H  : NeonI_1VModImm<0b1, op,
-                              (outs VPR128:$Rd),
-                              (ins neon_uimm8:$Imm,
-                                neon_mov_imm_LSLH_operand:$Simm),
-                              !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
-                              [(set (v8i16 VPR128:$Rd),
-                                 (v8i16 (opnode (timm:$Imm),
-                                   (neon_mov_imm_LSLH_operand:$Simm))))],
-                              NoItinerary>,
-               Sched<[WriteFPALU]> {
-      bit Simm;
-      let cmode = {0b1, 0b0, Simm, 0b0};
-     }
-}
-
-multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
-                                                   SDPatternOperator opnode,
-                                                   SDPatternOperator neonopnode>
-{
-  let Constraints = "$src = $Rd" in {
-    // shift zeros, per word
-    def _2S  : NeonI_1VModImm<0b0, op,
-                 (outs VPR64:$Rd),
-                 (ins VPR64:$src, neon_uimm8:$Imm,
-                   neon_mov_imm_LSL_operand:$Simm),
-                 !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
-                 [(set (v2i32 VPR64:$Rd),
-                    (v2i32 (opnode (v2i32 VPR64:$src),
-                      (v2i32 (neonopnode timm:$Imm,
-                        neon_mov_imm_LSL_operand:$Simm)))))],
-                 NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]> {
-      bits<2> Simm;
-      let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
-    }
-
-    def _4S  : NeonI_1VModImm<0b1, op,
-                 (outs VPR128:$Rd),
-                 (ins VPR128:$src, neon_uimm8:$Imm,
-                   neon_mov_imm_LSL_operand:$Simm),
-                 !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
-                 [(set (v4i32 VPR128:$Rd),
-                    (v4i32 (opnode (v4i32 VPR128:$src),
-                      (v4i32 (neonopnode timm:$Imm,
-                        neon_mov_imm_LSL_operand:$Simm)))))],
-                 NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]> {
-      bits<2> Simm;
-      let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
-    }
-
-    // shift zeros, per halfword
-    def _4H  : NeonI_1VModImm<0b0, op,
-                 (outs VPR64:$Rd),
-                 (ins VPR64:$src, neon_uimm8:$Imm,
-                   neon_mov_imm_LSLH_operand:$Simm),
-                 !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
-                 [(set (v4i16 VPR64:$Rd),
-                    (v4i16 (opnode (v4i16 VPR64:$src),
-                       (v4i16 (neonopnode timm:$Imm,
-                          neon_mov_imm_LSL_operand:$Simm)))))],
-                 NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]> {
-      bit  Simm;
-      let cmode = {0b1, 0b0, Simm, 0b1};
-    }
-
-    def _8H  : NeonI_1VModImm<0b1, op,
-                 (outs VPR128:$Rd),
-                 (ins VPR128:$src, neon_uimm8:$Imm,
-                   neon_mov_imm_LSLH_operand:$Simm),
-                 !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
-                 [(set (v8i16 VPR128:$Rd),
-                    (v8i16 (opnode (v8i16 VPR128:$src),
-                      (v8i16 (neonopnode timm:$Imm,
-                        neon_mov_imm_LSL_operand:$Simm)))))],
-                 NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]> {
-      bit Simm;
-      let cmode = {0b1, 0b0, Simm, 0b1};
-    }
-  }
-}
-
-multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
-                                   SDPatternOperator opnode>
-{
-    // shift ones, per word
-    def _2S  : NeonI_1VModImm<0b0, op,
-                             (outs VPR64:$Rd),
-                             (ins neon_uimm8:$Imm,
-                               neon_mov_imm_MSL_operand:$Simm),
-                             !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
-                              [(set (v2i32 VPR64:$Rd),
-                                 (v2i32 (opnode (timm:$Imm),
-                                   (neon_mov_imm_MSL_operand:$Simm))))],
-                             NoItinerary>,
-               Sched<[WriteFPALU]> {
-       bit Simm;
-       let cmode = {0b1, 0b1, 0b0, Simm};
-     }
-
-   def _4S  : NeonI_1VModImm<0b1, op,
-                              (outs VPR128:$Rd),
-                              (ins neon_uimm8:$Imm,
-                                neon_mov_imm_MSL_operand:$Simm),
-                              !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
-                              [(set (v4i32 VPR128:$Rd),
-                                 (v4i32 (opnode (timm:$Imm),
-                                   (neon_mov_imm_MSL_operand:$Simm))))],
-                              NoItinerary>,
-              Sched<[WriteFPALU]> {
-     bit Simm;
-     let cmode = {0b1, 0b1, 0b0, Simm};
-   }
-}
-
-// Vector Move Immediate Shifted
-let isReMaterializable = 1 in {
-defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
-}
-
-// Vector Move Inverted Immediate Shifted
-let isReMaterializable = 1 in {
-defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
-}
-
-// Vector Bitwise Bit Clear (AND NOT) - immediate
-let isReMaterializable = 1 in {
-defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
-                                                         and, Neon_mvni>;
-}
-
-// Vector Bitwise OR - immedidate
-
-let isReMaterializable = 1 in {
-defm ORRvi_lsl   : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
-                                                           or, Neon_movi>;
-}
-
-// Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
-// LowerBUILD_VECTOR favors lowering MOVI over MVNI.
-// BIC immediate instructions selection requires additional patterns to
-// transform Neon_movi operands into BIC immediate operands
-
-def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
-  uint64_t OpCmode = N->getZExtValue();
-  unsigned ShiftImm;
-  unsigned ShiftOnesIn;
-  (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
-  // LSLH restricts shift amount to  0, 8 which are encoded as 0 and 1
-  // Transform encoded shift amount 0 to 1 and 1 to 0.
-  return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
-}]>;
-
-def neon_mov_imm_LSLH_transform_operand
-  : ImmLeaf<i32, [{
-    unsigned ShiftImm;
-    unsigned ShiftOnesIn;
-    unsigned HasShift =
-      A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
-    return (HasShift && !ShiftOnesIn); }],
-  neon_mov_imm_LSLH_transform_XFORM>;
-
-// Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0xff, LSL 8)
-// Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0xff)
-def : Pat<(v4i16 (and VPR64:$src,
-            (v4i16 (Neon_movi 255,
-              neon_mov_imm_LSLH_transform_operand:$Simm)))),
-          (BICvi_lsl_4H VPR64:$src, 255,
-            neon_mov_imm_LSLH_transform_operand:$Simm)>;
-
-// Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0xff, LSL 8)
-// Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0xff)
-def : Pat<(v8i16 (and VPR128:$src,
-            (v8i16 (Neon_movi 255,
-              neon_mov_imm_LSLH_transform_operand:$Simm)))),
-          (BICvi_lsl_8H VPR128:$src, 255,
-            neon_mov_imm_LSLH_transform_operand:$Simm)>;
-
-def : Pat<(v8i8 (and VPR64:$src,
-                  (bitconvert(v4i16 (Neon_movi 255,
-                    neon_mov_imm_LSLH_transform_operand:$Simm))))),
-          (BICvi_lsl_4H VPR64:$src, 255,
-            neon_mov_imm_LSLH_transform_operand:$Simm)>;
-def : Pat<(v2i32 (and VPR64:$src,
-                 (bitconvert(v4i16 (Neon_movi 255,
-                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
-          (BICvi_lsl_4H VPR64:$src, 255,
-            neon_mov_imm_LSLH_transform_operand:$Simm)>;
-def : Pat<(v1i64 (and VPR64:$src,
-                (bitconvert(v4i16 (Neon_movi 255,
-                  neon_mov_imm_LSLH_transform_operand:$Simm))))),
-        (BICvi_lsl_4H VPR64:$src, 255,
-          neon_mov_imm_LSLH_transform_operand:$Simm)>;
-
-def : Pat<(v16i8 (and VPR128:$src,
-                 (bitconvert(v8i16 (Neon_movi 255,
-                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
-        (BICvi_lsl_8H VPR128:$src, 255,
-          neon_mov_imm_LSLH_transform_operand:$Simm)>;
-def : Pat<(v4i32 (and VPR128:$src,
-                 (bitconvert(v8i16 (Neon_movi 255,
-                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
-        (BICvi_lsl_8H VPR128:$src, 255,
-          neon_mov_imm_LSLH_transform_operand:$Simm)>;
-def : Pat<(v2i64 (and VPR128:$src,
-                 (bitconvert(v8i16 (Neon_movi 255,
-                   neon_mov_imm_LSLH_transform_operand:$Simm))))),
-        (BICvi_lsl_8H VPR128:$src, 255,
-          neon_mov_imm_LSLH_transform_operand:$Simm)>;
-
-multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
-                                   SDPatternOperator neonopnode,
-                                   Instruction INST4H,
-                                   Instruction INST8H,
-                                   Instruction INST2S,
-                                   Instruction INST4S> {
-  def : Pat<(v8i8 (opnode VPR64:$src,
-                    (bitconvert(v4i16 (neonopnode timm:$Imm,
-                      neon_mov_imm_LSLH_operand:$Simm))))),
-            (INST4H VPR64:$src, neon_uimm8:$Imm,
-              neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v2i32 (opnode VPR64:$src,
-                   (bitconvert(v4i16 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-            (INST4H VPR64:$src, neon_uimm8:$Imm,
-              neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v1i64 (opnode VPR64:$src,
-                  (bitconvert(v4i16 (neonopnode timm:$Imm,
-                    neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST4H VPR64:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-
-  def : Pat<(v16i8 (opnode VPR128:$src,
-                   (bitconvert(v8i16 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST8H VPR128:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v4i32 (opnode VPR128:$src,
-                   (bitconvert(v8i16 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST8H VPR128:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v2i64 (opnode VPR128:$src,
-                   (bitconvert(v8i16 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST8H VPR128:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-
-  def : Pat<(v8i8 (opnode VPR64:$src,
-                    (bitconvert(v2i32 (neonopnode timm:$Imm,
-                      neon_mov_imm_LSLH_operand:$Simm))))),
-            (INST2S VPR64:$src, neon_uimm8:$Imm,
-              neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v4i16 (opnode VPR64:$src,
-                   (bitconvert(v2i32 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-            (INST2S VPR64:$src, neon_uimm8:$Imm,
-              neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v1i64 (opnode VPR64:$src,
-                  (bitconvert(v2i32 (neonopnode timm:$Imm,
-                    neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST2S VPR64:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-
-  def : Pat<(v16i8 (opnode VPR128:$src,
-                   (bitconvert(v4i32 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST4S VPR128:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v8i16 (opnode VPR128:$src,
-                   (bitconvert(v4i32 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST4S VPR128:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-  def : Pat<(v2i64 (opnode VPR128:$src,
-                   (bitconvert(v4i32 (neonopnode timm:$Imm,
-                     neon_mov_imm_LSLH_operand:$Simm))))),
-          (INST4S VPR128:$src, neon_uimm8:$Imm,
-            neon_mov_imm_LSLH_operand:$Simm)>;
-}
-
-// Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
-defm : Neon_bitwiseVi_patterns<and, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H,
-                               BICvi_lsl_2S, BICvi_lsl_4S>;
-
-// Additional patterns for Vector Bitwise OR - immedidate
-defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H,
-                               ORRvi_lsl_2S, ORRvi_lsl_4S>;
-
-
-// Vector Move Immediate Masked
-let isReMaterializable = 1 in {
-defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
-}
-
-// Vector Move Inverted Immediate Masked
-let isReMaterializable = 1 in {
-defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
-}
-
-class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
-                                Instruction inst, RegisterOperand VPRC>
-  : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
-                        (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
-
-// Aliases for Vector Move Immediate Shifted
-def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
-
-// Aliases for Vector Move Inverted Immediate Shifted
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
-
-// Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
-def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
-
-// Aliases for Vector Bitwise OR - immedidate
-def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
-def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
-def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
-
-//  Vector Move Immediate - per byte
-let isReMaterializable = 1 in {
-def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
-                               (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
-                               "movi\t$Rd.8b, $Imm",
-                               [(set (v8i8 VPR64:$Rd),
-                                  (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
-                                NoItinerary>,
-                Sched<[WriteFPALU]> {
-  let cmode = 0b1110;
-}
-
-def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
-                                (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
-                                "movi\t$Rd.16b, $Imm",
-                                [(set (v16i8 VPR128:$Rd),
-                                   (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
-                                 NoItinerary>,
-                Sched<[WriteFPALU]> {
-  let cmode = 0b1110;
-}
-}
-
-// Vector Move Immediate - bytemask, per double word
-let isReMaterializable = 1 in {
-def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
-                               (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
-                               "movi\t $Rd.2d, $Imm",
-                               [(set (v2i64 VPR128:$Rd),
-                                  (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
-                               NoItinerary>,
-                Sched<[WriteFPALU]> {
-  let cmode = 0b1110;
-}
-}
-
-// Vector Move Immediate - bytemask, one doubleword
-
-let isReMaterializable = 1 in {
-def MOVIdi : NeonI_1VModImm<0b0, 0b1,
-                           (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
-                           "movi\t $Rd, $Imm",
-                           [(set (v1i64 FPR64:$Rd),
-                             (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))],
-                           NoItinerary>,
-             Sched<[WriteFPALU]> {
-  let cmode = 0b1110;
-}
-}
-
-// Vector Floating Point Move Immediate
-
-class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
-                      Operand immOpType, bit q, bit op>
-  : NeonI_1VModImm<q, op,
-                   (outs VPRC:$Rd), (ins immOpType:$Imm),
-                   "fmov\t$Rd" # asmlane # ", $Imm",
-                   [(set (OpTy VPRC:$Rd),
-                      (OpTy (Neon_fmovi (timm:$Imm))))],
-                   NoItinerary>,
-    Sched<[WriteFPALU]> {
-     let cmode = 0b1111;
-   }
-
-let isReMaterializable = 1 in {
-def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64,  v2f32, fmov32_operand, 0b0, 0b0>;
-def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
-def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
-}
-
-// Vector Shift (Immediate)
-
-// Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
-// as follows:
-//
-//    Offset    Encoding
-//     8        immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
-//     16       immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
-//     32       immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
-//     64       immh:immb<6>   = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
-//
-// The shift right immediate amount, in the range 1 to element bits, is computed
-// as Offset - UInt(immh:immb).  The shift left immediate amount, in the range 0
-// to element bits - 1, is computed as UInt(immh:immb) - Offset.
-
-class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
-  let Name = "ShrImm" # OFFSET;
-  let RenderMethod = "addImmOperands";
-  let DiagnosticType = "ShrImm" # OFFSET;
-}
-
-class shr_imm<string OFFSET> : Operand<i32> {
-  let EncoderMethod = "getShiftRightImm" # OFFSET;
-  let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
-  let ParserMatchClass =
-    !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
-}
-
-def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
-def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
-def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
-def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
-
-def shr_imm8 : shr_imm<"8">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 8;}]>;
-def shr_imm16 : shr_imm<"16">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 16;}]>;
-def shr_imm32 : shr_imm<"32">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 32;}]>;
-def shr_imm64 : shr_imm<"64">, ImmLeaf<i32, [{return Imm > 0 && Imm <= 64;}]>;
-
-class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
-  let Name = "ShlImm" # OFFSET;
-  let RenderMethod = "addImmOperands";
-  let DiagnosticType = "ShlImm" # OFFSET;
-}
-
-class shl_imm<string OFFSET> : Operand<i32> {
-  let EncoderMethod = "getShiftLeftImm" # OFFSET;
-  let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
-  let ParserMatchClass =
-    !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
-}
-
-def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
-def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
-def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
-def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
-
-def shl_imm8 : shl_imm<"8">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 8;}]>;
-def shl_imm16 : shl_imm<"16">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 16;}]>;
-def shl_imm32 : shl_imm<"32">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 32;}]>;
-def shl_imm64 : shl_imm<"64">, ImmLeaf<i32, [{return Imm >= 0 && Imm < 64;}]>;
-
-class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
-               RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
-  : NeonI_2VShiftImm<q, u, opcode,
-                     (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
-                     [(set (Ty VPRC:$Rd),
-                        (Ty (OpNode (Ty VPRC:$Rn),
-                          (Ty (Neon_vdup (i32 ImmTy:$Imm))))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
-  // 64-bit vector types.
-  def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8, shl> {
-    let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
-  }
-
-  def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16, shl> {
-    let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
-  }
-
-  def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32, shl> {
-    let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
-  }
-
-  // 128-bit vector types.
-  def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8, shl> {
-    let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
-  }
-
-  def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16, shl> {
-    let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
-  }
-
-  def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32, shl> {
-    let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
-  }
-
-  def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64, shl> {
-    let Inst{22} = 0b1;        // immh:immb = 1xxxxxx
-  }
-}
-
-multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
-  def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
-                     OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
-                     OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
-                     OpNode> {
-     let Inst{22-21} = 0b01;
-  }
-
-  def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
-                      OpNode> {
-                      let Inst{22-19} = 0b0001;
-                    }
-
-  def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
-                     OpNode> {
-                     let Inst{22-20} = 0b001;
-                    }
-
-  def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
-                     OpNode> {
-                      let Inst{22-21} = 0b01;
-                    }
-
-  def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
-                     OpNode> {
-                      let Inst{22} = 0b1;
-                    }
-}
-
-// Shift left
-
-defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
-
-// Additional patterns to match vector shift left by immediate.
-// (v1i8/v1i16/v1i32 types)
-def : Pat<(v1i8 (shl (v1i8 FPR8:$Rn),
-                     (v1i8 (Neon_vdup (i32 (shl_imm8:$Imm)))))),
-          (EXTRACT_SUBREG
-              (SHLvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                          shl_imm8:$Imm),
-              sub_8)>;
-def : Pat<(v1i16 (shl (v1i16 FPR16:$Rn),
-                      (v1i16 (Neon_vdup (i32 (shl_imm16:$Imm)))))),
-          (EXTRACT_SUBREG
-              (SHLvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                          shl_imm16:$Imm),
-              sub_16)>;
-def : Pat<(v1i32 (shl (v1i32 FPR32:$Rn),
-                      (v1i32 (Neon_vdup (i32 (shl_imm32:$Imm)))))),
-          (EXTRACT_SUBREG
-              (SHLvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                          shl_imm32:$Imm),
-              sub_32)>;
-
-// Shift right
-defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
-defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
-
-// Additional patterns to match vector shift right by immediate.
-// (v1i8/v1i16/v1i32 types)
-def : Pat<(v1i8 (sra (v1i8 FPR8:$Rn),
-                     (v1i8 (Neon_vdup (i32 (shr_imm8:$Imm)))))),
-          (EXTRACT_SUBREG
-              (SSHRvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                          shr_imm8:$Imm),
-              sub_8)>;
-def : Pat<(v1i16 (sra (v1i16 FPR16:$Rn),
-                      (v1i16 (Neon_vdup (i32 (shr_imm16:$Imm)))))),
-          (EXTRACT_SUBREG
-              (SSHRvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                          shr_imm16:$Imm),
-              sub_16)>;
-def : Pat<(v1i32 (sra (v1i32 FPR32:$Rn),
-                      (v1i32 (Neon_vdup (i32 (shr_imm32:$Imm)))))),
-          (EXTRACT_SUBREG
-              (SSHRvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                          shr_imm32:$Imm),
-              sub_32)>;
-def : Pat<(v1i8 (srl (v1i8 FPR8:$Rn),
-                     (v1i8 (Neon_vdup (i32 (shr_imm8:$Imm)))))),
-          (EXTRACT_SUBREG
-              (USHRvvi_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                          shr_imm8:$Imm),
-              sub_8)>;
-def : Pat<(v1i16 (srl (v1i16 FPR16:$Rn),
-                      (v1i16 (Neon_vdup (i32 (shr_imm16:$Imm)))))),
-          (EXTRACT_SUBREG
-              (USHRvvi_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                          shr_imm16:$Imm),
-              sub_16)>;
-def : Pat<(v1i32 (srl (v1i32 FPR32:$Rn),
-                      (v1i32 (Neon_vdup (i32 (shr_imm32:$Imm)))))),
-          (EXTRACT_SUBREG
-              (USHRvvi_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                          shr_imm32:$Imm),
-              sub_32)>;
-
-def Neon_High16B : PatFrag<(ops node:$in),
-                           (extract_subvector (v16i8 node:$in), (iPTR 8))>;
-def Neon_High8H  : PatFrag<(ops node:$in),
-                           (extract_subvector (v8i16 node:$in), (iPTR 4))>;
-def Neon_High4S  : PatFrag<(ops node:$in),
-                           (extract_subvector (v4i32 node:$in), (iPTR 2))>;
-def Neon_High2D  : PatFrag<(ops node:$in),
-                           (extract_subvector (v2i64 node:$in), (iPTR 1))>;
-def Neon_High4float : PatFrag<(ops node:$in),
-                               (extract_subvector (v4f32 node:$in), (iPTR 2))>;
-def Neon_High2double : PatFrag<(ops node:$in),
-                               (extract_subvector (v2f64 node:$in), (iPTR 1))>;
-
-def Neon_Low16B : PatFrag<(ops node:$in),
-                          (v8i8 (extract_subvector (v16i8 node:$in),
-                                                   (iPTR 0)))>;
-def Neon_Low8H : PatFrag<(ops node:$in),
-                         (v4i16 (extract_subvector (v8i16 node:$in),
-                                                   (iPTR 0)))>;
-def Neon_Low4S : PatFrag<(ops node:$in),
-                         (v2i32 (extract_subvector (v4i32 node:$in),
-                                                   (iPTR 0)))>;
-def Neon_Low2D : PatFrag<(ops node:$in),
-                         (v1i64 (extract_subvector (v2i64 node:$in),
-                                                   (iPTR 0)))>;
-def Neon_Low4float : PatFrag<(ops node:$in),
-                             (v2f32 (extract_subvector (v4f32 node:$in),
-                                                       (iPTR 0)))>;
-def Neon_Low2double : PatFrag<(ops node:$in),
-                              (v1f64 (extract_subvector (v2f64 node:$in),
-                                                        (iPTR 0)))>;
-
-class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
-                   string SrcT, ValueType DestTy, ValueType SrcTy,
-                   Operand ImmTy, SDPatternOperator ExtOp>
-  : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
-                     (ins VPR64:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
-                     [(set (DestTy VPR128:$Rd),
-                        (DestTy (shl
-                          (DestTy (ExtOp (SrcTy VPR64:$Rn))),
-                            (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
-                       string SrcT, ValueType DestTy, ValueType SrcTy,
-                       int StartIndex, Operand ImmTy,
-                       SDPatternOperator ExtOp, PatFrag getTop>
-  : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
-                     (ins VPR128:$Rn, ImmTy:$Imm),
-                     asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
-                     [(set (DestTy VPR128:$Rd),
-                        (DestTy (shl
-                          (DestTy (ExtOp
-                            (SrcTy (getTop VPR128:$Rn)))),
-                              (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
-                         SDNode ExtOp> {
-  // 64-bit vector types.
-  def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
-                         shl_imm8, ExtOp> {
-    let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
-  }
-
-  def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
-                         shl_imm16, ExtOp> {
-    let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
-  }
-
-  def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
-                         shl_imm32, ExtOp> {
-    let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
-  }
-
-  // 128-bit vector types
-  def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
-                              8, shl_imm8, ExtOp, Neon_High16B> {
-    let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
-  }
-
-  def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
-                             4, shl_imm16, ExtOp, Neon_High8H> {
-    let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
-  }
-
-  def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
-                             2, shl_imm32, ExtOp, Neon_High4S> {
-    let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
-  }
-
-  // Use other patterns to match when the immediate is 0.
-  def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
-            (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
-
-  def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
-            (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
-
-  def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
-            (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
-
-  def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
-            (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
-
-  def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
-            (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
-
-  def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
-            (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
-}
-
-// Shift left long
-defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
-defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
-
-class NeonI_ext_len_alias<string asmop, string lane, string laneOp,
-                       Instruction inst, RegisterOperand VPRC,
-                       RegisterOperand VPRCOp>
-  : NeonInstAlias<asmop # "\t$Rd" # lane #", $Rn" # laneOp,
-                  (inst VPRC:$Rd, VPRCOp:$Rn, 0), 0b0>;
-
-// Signed integer lengthen (vector) is alias for SSHLL Vd, Vn, #0
-// Signed integer lengthen (vector, second part) is alias for SSHLL2 Vd, Vn, #0
-// FIXME: This is actually the preferred syntax but TableGen can't deal with
-// custom printing of aliases.
-def SXTLvv_8B  : NeonI_ext_len_alias<"sxtl", ".8h", ".8b",  SSHLLvvi_8B, VPR128, VPR64>;
-def SXTLvv_4H  : NeonI_ext_len_alias<"sxtl", ".4s", ".4h",  SSHLLvvi_4H, VPR128, VPR64>;
-def SXTLvv_2S  : NeonI_ext_len_alias<"sxtl", ".2d", ".2s",  SSHLLvvi_2S, VPR128, VPR64>;
-def SXTL2vv_16B : NeonI_ext_len_alias<"sxtl2", ".8h", ".16b",  SSHLLvvi_16B, VPR128, VPR128>;
-def SXTL2vv_8H  : NeonI_ext_len_alias<"sxtl2", ".4s", ".8h",  SSHLLvvi_8H, VPR128, VPR128>;
-def SXTL2vv_4S  : NeonI_ext_len_alias<"sxtl2", ".2d", ".4s",  SSHLLvvi_4S, VPR128, VPR128>;
-
-// Unsigned integer lengthen (vector) is alias for USHLL Vd, Vn, #0
-// Unsigned integer lengthen (vector, second part) is alias for USHLL2 Vd, Vn, #0
-// FIXME: This is actually the preferred syntax but TableGen can't deal with
-// custom printing of aliases.
-def UXTLvv_8B  : NeonI_ext_len_alias<"uxtl", ".8h", ".8b",  USHLLvvi_8B, VPR128, VPR64>;
-def UXTLvv_4H  : NeonI_ext_len_alias<"uxtl", ".4s", ".4h",  USHLLvvi_4H, VPR128, VPR64>;
-def UXTLvv_2S  : NeonI_ext_len_alias<"uxtl", ".2d", ".2s",  USHLLvvi_2S, VPR128, VPR64>;
-def UXTL2vv_16B : NeonI_ext_len_alias<"uxtl2", ".8h", ".16b",  USHLLvvi_16B, VPR128, VPR128>;
-def UXTL2vv_8H  : NeonI_ext_len_alias<"uxtl2", ".4s", ".8h",  USHLLvvi_8H, VPR128, VPR128>;
-def UXTL2vv_4S  : NeonI_ext_len_alias<"uxtl2", ".2d", ".4s",  USHLLvvi_4S, VPR128, VPR128>;
-
-def : Pat<(v8i16 (anyext (v8i8 VPR64:$Rn))), (USHLLvvi_8B VPR64:$Rn, 0)>;
-def : Pat<(v4i32 (anyext (v4i16 VPR64:$Rn))), (USHLLvvi_4H VPR64:$Rn, 0)>;
-def : Pat<(v2i64 (anyext (v2i32 VPR64:$Rn))), (USHLLvvi_2S VPR64:$Rn, 0)>;
-
-// Rounding/Saturating shift
-class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
-                  RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
-                  SDPatternOperator OpNode>
-  : NeonI_2VShiftImm<q, u, opcode,
-                     (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
-                     [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
-                        (i32 ImmTy:$Imm))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-// shift right (vector by immediate)
-multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
-                           SDPatternOperator OpNode> {
-  def _8B  : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
-                         OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H  : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
-                         OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S  : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
-                         OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
-                         OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
-                        OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
-                        OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
-                        OpNode> {
-    let Inst{22} = 0b1;
-  }
-}
-
-multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
-                          SDPatternOperator OpNode> {
-  // 64-bit vector types.
-  def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
-                        OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
-                        OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
-                        OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  // 128-bit vector types.
-  def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
-                         OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
-                        OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
-                        OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
-                        OpNode> {
-    let Inst{22} = 0b1;
-  }
-}
-
-// Rounding shift right
-defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
-                                int_aarch64_neon_vsrshr>;
-defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
-                                int_aarch64_neon_vurshr>;
-
-// Saturating shift left unsigned
-defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
-
-// Saturating shift left
-defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
-defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
-
-class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
-                  RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
-                  SDNode OpNode>
-  : NeonI_2VShiftImm<q, u, opcode,
-           (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
-           asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
-           [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
-              (Ty (OpNode (Ty VPRC:$Rn),
-                (Ty (Neon_vdup (i32 ImmTy:$Imm))))))))],
-           NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-// Shift Right accumulate
-multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
-  def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
-                        OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
-                        OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
-                        OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
-                         OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
-                        OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
-                        OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
-                        OpNode> {
-    let Inst{22} = 0b1;
-  }
-}
-
-// Shift right and accumulate
-defm SSRAvvi    : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
-defm USRAvvi    : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
-
-// Rounding shift accumulate
-class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
-                    RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
-                    SDPatternOperator OpNode>
-  : NeonI_2VShiftImm<q, u, opcode,
-                     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
-                     [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
-                        (Ty (OpNode (Ty VPRC:$Rn), (i32 ImmTy:$Imm))))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
-                             SDPatternOperator OpNode> {
-  def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
-                          OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
-                          OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
-                          OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
-                           OpNode> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
-                          OpNode> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
-                          OpNode> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
-                          OpNode> {
-    let Inst{22} = 0b1;
-  }
-}
-
-// Rounding shift right and accumulate
-defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
-defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
-
-// Shift insert by immediate
-class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
-                  RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
-                  SDPatternOperator OpNode>
-    : NeonI_2VShiftImm<q, u, opcode,
-           (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
-           asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
-           [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
-             (i32 ImmTy:$Imm))))],
-           NoItinerary>,
-      Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-// shift left insert (vector by immediate)
-multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
-  def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shl_imm8,
-                        int_aarch64_neon_vsli> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shl_imm16,
-                        int_aarch64_neon_vsli> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shl_imm32,
-                        int_aarch64_neon_vsli> {
-    let Inst{22-21} = 0b01;
-  }
-
-    // 128-bit vector types
-  def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shl_imm8,
-                         int_aarch64_neon_vsli> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shl_imm16,
-                        int_aarch64_neon_vsli> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shl_imm32,
-                        int_aarch64_neon_vsli> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shl_imm64,
-                        int_aarch64_neon_vsli> {
-    let Inst{22} = 0b1;
-  }
-}
-
-// shift right insert (vector by immediate)
-multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
-    // 64-bit vector types.
-  def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
-                        int_aarch64_neon_vsri> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
-                        int_aarch64_neon_vsri> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
-                        int_aarch64_neon_vsri> {
-    let Inst{22-21} = 0b01;
-  }
-
-    // 128-bit vector types
-  def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
-                         int_aarch64_neon_vsri> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
-                        int_aarch64_neon_vsri> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
-                        int_aarch64_neon_vsri> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
-                        int_aarch64_neon_vsri> {
-    let Inst{22} = 0b1;
-  }
-}
-
-// Shift left and insert
-defm SLIvvi   : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
-
-// Shift right and insert
-defm SRIvvi   : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
-
-class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
-                    string SrcT, Operand ImmTy>
-  : NeonI_2VShiftImm<q, u, opcode,
-                     (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
-                     [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
-                       string SrcT, Operand ImmTy>
-  : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
-                     (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
-                     [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-// left long shift by immediate
-multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
-  def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
-    let Inst{22-21} = 0b01;
-  }
-
-  // Shift Narrow High
-  def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
-                              shr_imm8> {
-    let Inst{22-19} = 0b0001;
-  }
-
-  def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
-                             shr_imm16> {
-    let Inst{22-20} = 0b001;
-  }
-
-  def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
-                             shr_imm32> {
-    let Inst{22-21} = 0b01;
-  }
-}
-
-// Shift right narrow
-defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
-
-// Shift right narrow (prefix Q is saturating, prefix R is rounding)
-defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
-defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
-defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
-defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
-defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
-defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
-defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
-
-def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
-                              (v2i64 (concat_vectors (v1i64 node:$Rm),
-                                                     (v1i64 node:$Rn)))>;
-def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
-                              (v8i16 (concat_vectors (v4i16 node:$Rm),
-                                                     (v4i16 node:$Rn)))>;
-def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
-                              (v4i32 (concat_vectors (v2i32 node:$Rm),
-                                                     (v2i32 node:$Rn)))>;
-def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
-                              (v4f32 (concat_vectors (v2f32 node:$Rm),
-                                                     (v2f32 node:$Rn)))>;
-def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
-                              (v2f64 (concat_vectors (v1f64 node:$Rm),
-                                                     (v1f64 node:$Rn)))>;
-
-def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
-                             (v8i16 (srl (v8i16 node:$lhs),
-                               (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
-                             (v4i32 (srl (v4i32 node:$lhs),
-                               (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
-                             (v2i64 (srl (v2i64 node:$lhs),
-                               (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
-                             (v8i16 (sra (v8i16 node:$lhs),
-                               (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
-                             (v4i32 (sra (v4i32 node:$lhs),
-                               (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
-def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
-                             (v2i64 (sra (v2i64 node:$lhs),
-                               (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
-
-// Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
-multiclass Neon_shiftNarrow_patterns<string shr> {
-  def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
-              (i32 shr_imm8:$Imm)))),
-            (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
-              (i32 shr_imm16:$Imm)))),
-            (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
-              (i32 shr_imm32:$Imm)))),
-            (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
-
-  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
-              (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
-                VPR128:$Rn, (i32 shr_imm8:$Imm))))))),
-            (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
-                         VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
-              (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
-                VPR128:$Rn, (i32 shr_imm16:$Imm))))))),
-            (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-                        VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
-              (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
-                VPR128:$Rn, (i32 shr_imm32:$Imm))))))),
-            (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-                        VPR128:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
-  def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), shr_imm8:$Imm)),
-            (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), shr_imm16:$Imm)),
-            (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), shr_imm32:$Imm)),
-            (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
-
-  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
-                (v1i64 (bitconvert (v8i8
-                    (op (v8i16 VPR128:$Rn), shr_imm8:$Imm))))),
-            (!cast<Instruction>(prefix # "_16B")
-                (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-                VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
-                (v1i64 (bitconvert (v4i16
-                    (op (v4i32 VPR128:$Rn), shr_imm16:$Imm))))),
-            (!cast<Instruction>(prefix # "_8H")
-                (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-                VPR128:$Rn, imm:$Imm)>;
-  def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
-                (v1i64 (bitconvert (v2i32
-                    (op (v2i64 VPR128:$Rn), shr_imm32:$Imm))))),
-            (!cast<Instruction>(prefix # "_4S")
-                  (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-                  VPR128:$Rn, imm:$Imm)>;
-}
-
-defm : Neon_shiftNarrow_patterns<"lshr">;
-defm : Neon_shiftNarrow_patterns<"ashr">;
-
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
-defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
-
-// Convert fix-point and float-pointing
-class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
-                RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
-                Operand ImmTy, SDPatternOperator IntOp>
-  : NeonI_2VShiftImm<q, u, opcode,
-                     (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
-                     asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
-                     [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
-                       (i32 ImmTy:$Imm))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
-                              SDPatternOperator IntOp> {
-  def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
-                      shr_imm32, IntOp> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
-                      shr_imm32, IntOp> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
-                      shr_imm64, IntOp> {
-    let Inst{22} = 0b1;
-  }
-}
-
-multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
-                              SDPatternOperator IntOp> {
-  def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
-                      shr_imm32, IntOp> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
-                      shr_imm32, IntOp> {
-    let Inst{22-21} = 0b01;
-  }
-
-  def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
-                      shr_imm64, IntOp> {
-    let Inst{22} = 0b1;
-  }
-}
-
-// Convert fixed-point to floating-point
-defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
-                                   int_arm_neon_vcvtfxs2fp>;
-defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
-                                   int_arm_neon_vcvtfxu2fp>;
-
-// Convert floating-point to fixed-point
-defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
-                                   int_arm_neon_vcvtfp2fxs>;
-defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
-                                   int_arm_neon_vcvtfp2fxu>;
-
-multiclass Neon_sshll2_0<SDNode ext>
-{
-  def _v8i8  : PatFrag<(ops node:$Rn),
-                       (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
-  def _v4i16 : PatFrag<(ops node:$Rn),
-                       (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
-  def _v2i32 : PatFrag<(ops node:$Rn),
-                       (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
-}
-
-defm NI_sext_high : Neon_sshll2_0<sext>;
-defm NI_zext_high : Neon_sshll2_0<zext>;
-
-
-//===----------------------------------------------------------------------===//
-// Multiclasses for NeonI_Across
-//===----------------------------------------------------------------------===//
-
-// Variant 1
-
-multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
-                            string asmop, SDPatternOperator opnode>
-{
-    def _1h8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
-                (outs FPR16:$Rd), (ins VPR64:$Rn),
-                asmop # "\t$Rd, $Rn.8b",
-                [(set (v1i16 FPR16:$Rd),
-                    (v1i16 (opnode (v8i8 VPR64:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
-                (outs FPR16:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.16b",
-                [(set (v1i16 FPR16:$Rd),
-                    (v1i16 (opnode (v16i8 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def _1s4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
-                (outs FPR32:$Rd), (ins VPR64:$Rn),
-                asmop # "\t$Rd, $Rn.4h",
-                [(set (v1i32 FPR32:$Rd),
-                    (v1i32 (opnode (v4i16 VPR64:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def _1s8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
-                (outs FPR32:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.8h",
-                [(set (v1i32 FPR32:$Rd),
-                    (v1i32 (opnode (v8i16 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    // _1d2s doesn't exist!
-
-    def _1d4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
-                (outs FPR64:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.4s",
-                [(set (v1i64 FPR64:$Rd),
-                    (v1i64 (opnode (v4i32 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
-defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
-
-// Variant 2
-
-multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
-                            string asmop, SDPatternOperator opnode>
-{
-    def _1b8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
-                (outs FPR8:$Rd), (ins VPR64:$Rn),
-                asmop # "\t$Rd, $Rn.8b",
-                [(set (v1i8 FPR8:$Rd),
-                    (v1i8 (opnode (v8i8 VPR64:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
-                (outs FPR8:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.16b",
-                [(set (v1i8 FPR8:$Rd),
-                    (v1i8 (opnode (v16i8 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def _1h4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
-                (outs FPR16:$Rd), (ins VPR64:$Rn),
-                asmop # "\t$Rd, $Rn.4h",
-                [(set (v1i16 FPR16:$Rd),
-                    (v1i16 (opnode (v4i16 VPR64:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def _1h8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
-                (outs FPR16:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.8h",
-                [(set (v1i16 FPR16:$Rd),
-                    (v1i16 (opnode (v8i16 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    // _1s2s doesn't exist!
-
-    def _1s4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
-                (outs FPR32:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.4s",
-                [(set (v1i32 FPR32:$Rd),
-                    (v1i32 (opnode (v4i32 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
-defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
-
-defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
-defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
-
-defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
-
-// Variant 3
-
-multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
-                            string asmop, SDPatternOperator opnode> {
-    def _1s4s:  NeonI_2VAcross<0b1, u, size, opcode,
-                (outs FPR32:$Rd), (ins VPR128:$Rn),
-                asmop # "\t$Rd, $Rn.4s",
-                [(set (f32 FPR32:$Rd),
-                    (f32 (opnode (v4f32 VPR128:$Rn))))],
-                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
-                                int_aarch64_neon_vmaxnmv>;
-defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
-                                int_aarch64_neon_vminnmv>;
-
-defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
-                              int_aarch64_neon_vmaxv>;
-defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
-                              int_aarch64_neon_vminv>;
-
-// The followings are for instruction class (Perm)
-
-class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
-                    string asmop, RegisterOperand OpVPR, string OpS,
-                    SDPatternOperator opnode, ValueType Ty>
-  : NeonI_Perm<q, size, opcode,
-               (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
-               asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
-               [(set (Ty OpVPR:$Rd),
-                  (Ty (opnode (Ty OpVPR:$Rn), (Ty OpVPR:$Rm))))],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_Perm_pat<bits<3> opcode, string asmop,
-                          SDPatternOperator opnode> {
-  def _8b  : NeonI_Permute<0b0, 0b00, opcode, asmop,
-                           VPR64, "8b", opnode, v8i8>;
-  def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop,
-                           VPR128, "16b",opnode, v16i8>;
-  def _4h  : NeonI_Permute<0b0, 0b01, opcode, asmop,
-                           VPR64, "4h", opnode, v4i16>;
-  def _8h  : NeonI_Permute<0b1, 0b01, opcode, asmop,
-                           VPR128, "8h", opnode, v8i16>;
-  def _2s  : NeonI_Permute<0b0, 0b10, opcode, asmop,
-                           VPR64, "2s", opnode, v2i32>;
-  def _4s  : NeonI_Permute<0b1, 0b10, opcode, asmop,
-                           VPR128, "4s", opnode, v4i32>;
-  def _2d  : NeonI_Permute<0b1, 0b11, opcode, asmop,
-                           VPR128, "2d", opnode, v2i64>;
-}
-
-defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1", Neon_uzp1>;
-defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1", Neon_trn1>;
-defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1", Neon_zip1>;
-defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2", Neon_uzp2>;
-defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2", Neon_trn2>;
-defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2", Neon_zip2>;
-
-multiclass NeonI_Perm_float_pat<string INS, SDPatternOperator opnode> {
-  def : Pat<(v2f32 (opnode (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
-            (!cast<Instruction>(INS # "_2s") VPR64:$Rn, VPR64:$Rm)>;
-
-  def : Pat<(v4f32 (opnode (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
-            (!cast<Instruction>(INS # "_4s") VPR128:$Rn, VPR128:$Rm)>;
-
-  def : Pat<(v2f64 (opnode (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
-            (!cast<Instruction>(INS # "_2d") VPR128:$Rn, VPR128:$Rm)>;
-}
-
-defm : NeonI_Perm_float_pat<"UZP1vvv", Neon_uzp1>;
-defm : NeonI_Perm_float_pat<"UZP2vvv", Neon_uzp2>;
-defm : NeonI_Perm_float_pat<"ZIP1vvv", Neon_zip1>;
-defm : NeonI_Perm_float_pat<"ZIP2vvv", Neon_zip2>;
-defm : NeonI_Perm_float_pat<"TRN1vvv", Neon_trn1>;
-defm : NeonI_Perm_float_pat<"TRN2vvv", Neon_trn2>;
-
-// The followings are for instruction class (3V Diff)
-
-// normal long/long2 pattern
-class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
-                 string asmop, string ResS, string OpS,
-                 SDPatternOperator opnode, SDPatternOperator ext,
-                 RegisterOperand OpVPR,
-                 ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy VPR128:$Rd),
-                    (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
-                                   (ResTy (ext (OpTy OpVPR:$Rm))))))],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
-                        string asmop, SDPatternOperator opnode,
-                        bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                           opnode, sext, VPR64, v8i16, v8i8>;
-    def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                           opnode, sext, VPR64, v4i32, v4i16>;
-    def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                           opnode, sext, VPR64, v2i64, v2i32>;
-  }
-}
-
-multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
-                         SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                            opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
-    def _4s8h  : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                            opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
-    def _2d4s  : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                            opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
-  }
-}
-
-multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
-                        SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                           opnode, zext, VPR64, v8i16, v8i8>;
-    def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                           opnode, zext, VPR64, v4i32, v4i16>;
-    def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                           opnode, zext, VPR64, v2i64, v2i32>;
-  }
-}
-
-multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
-                         SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                            opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
-    def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                           opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
-    def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                           opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
-  }
-}
-
-defm SADDLvvv :  NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
-defm UADDLvvv :  NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
-
-defm SADDL2vvv :  NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
-defm UADDL2vvv :  NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
-
-defm SSUBLvvv :  NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
-defm USUBLvvv :  NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
-
-defm SSUBL2vvv :  NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
-defm USUBL2vvv :  NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
-
-// normal wide/wide2 pattern
-class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
-                 string asmop, string ResS, string OpS,
-                 SDPatternOperator opnode, SDPatternOperator ext,
-                 RegisterOperand OpVPR,
-                 ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
-                 [(set (ResTy VPR128:$Rd),
-                    (ResTy (opnode (ResTy VPR128:$Rn),
-                                   (ResTy (ext (OpTy OpVPR:$Rm))))))],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
-                        SDPatternOperator opnode> {
-  def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                         opnode, sext, VPR64, v8i16, v8i8>;
-  def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                         opnode, sext, VPR64, v4i32, v4i16>;
-  def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                         opnode, sext, VPR64, v2i64, v2i32>;
-}
-
-defm SADDWvvv :  NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
-defm SSUBWvvv :  NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
-
-multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
-                         SDPatternOperator opnode> {
-  def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                          opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
-  def _4s8h  : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                          opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
-  def _2d4s  : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                          opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
-}
-
-defm SADDW2vvv :  NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
-defm SSUBW2vvv :  NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
-
-multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
-                        SDPatternOperator opnode> {
-  def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                         opnode, zext, VPR64, v8i16, v8i8>;
-  def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                         opnode, zext, VPR64, v4i32, v4i16>;
-  def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                         opnode, zext, VPR64, v2i64, v2i32>;
-}
-
-defm UADDWvvv :  NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
-defm USUBWvvv :  NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
-
-multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
-                         SDPatternOperator opnode> {
-  def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                          opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
-  def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                         opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
-  def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                         opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
-}
-
-defm UADDW2vvv :  NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
-defm USUBW2vvv :  NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
-
-// Get the high half part of the vector element.
-multiclass NeonI_get_high {
-  def _8h : PatFrag<(ops node:$Rn),
-                    (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
-                                             (v8i16 (Neon_vdup (i32 8)))))))>;
-  def _4s : PatFrag<(ops node:$Rn),
-                    (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
-                                              (v4i32 (Neon_vdup (i32 16)))))))>;
-  def _2d : PatFrag<(ops node:$Rn),
-                    (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
-                                              (v2i64 (Neon_vdup (i32 32)))))))>;
-}
-
-defm NI_get_hi : NeonI_get_high;
-
-// pattern for addhn/subhn with 2 operands
-class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
-                           string asmop, string ResS, string OpS,
-                           SDPatternOperator opnode, SDPatternOperator get_hi,
-                           ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy VPR64:$Rd),
-                    (ResTy (get_hi
-                      (OpTy (opnode (OpTy VPR128:$Rn),
-                                    (OpTy VPR128:$Rm))))))],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
-                                SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
-                                     opnode, NI_get_hi_8h, v8i8, v8i16>;
-    def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
-                                     opnode, NI_get_hi_4s, v4i16, v4i32>;
-    def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
-                                     opnode, NI_get_hi_2d, v2i32, v2i64>;
-  }
-}
-
-defm ADDHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
-defm SUBHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
-
-// pattern for operation with 2 operands
-class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
-                    string asmop, string ResS, string OpS,
-                    SDPatternOperator opnode,
-                    RegisterOperand ResVPR, RegisterOperand OpVPR,
-                    ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy ResVPR:$Rd),
-                    (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-// normal narrow pattern
-multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
-                          SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
-                              opnode, VPR64, VPR128, v8i8, v8i16>;
-    def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
-                              opnode, VPR64, VPR128, v4i16, v4i32>;
-    def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
-                              opnode, VPR64, VPR128, v2i32, v2i64>;
-  }
-}
-
-defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
-defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
-
-// pattern for acle intrinsic with 3 operands
-class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
-                     string asmop, string ResS, string OpS>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  let neverHasSideEffects = 1;
-}
-
-multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
-  def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
-  def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
-  def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
-}
-
-defm ADDHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
-defm SUBHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
-
-defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
-defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
-
-// Patterns have to be separate because there's a SUBREG_TO_REG in the output
-// part.
-class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
-                        SDPatternOperator coreop>
-  : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
-                      (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
-                                                        (SrcTy VPR128:$Rm)))))),
-        (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-              VPR128:$Rn, VPR128:$Rm)>;
-
-// addhn2 patterns
-def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8,  v8i16,
-          BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<ADDHN2vvv_8h4s,  v4i16, v4i32,
-          BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<ADDHN2vvv_4s2d,  v2i32, v2i64,
-          BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
-
-// subhn2 patterns
-def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8,  v8i16,
-          BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<SUBHN2vvv_8h4s,  v4i16, v4i32,
-          BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
-def : NarrowHighHalfPat<SUBHN2vvv_4s2d,  v2i32, v2i64,
-          BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
-
-// raddhn2 patterns
-def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vraddhn>;
-def : NarrowHighHalfPat<RADDHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vraddhn>;
-def : NarrowHighHalfPat<RADDHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vraddhn>;
-
-// rsubhn2 patterns
-def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vrsubhn>;
-def : NarrowHighHalfPat<RSUBHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vrsubhn>;
-def : NarrowHighHalfPat<RSUBHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vrsubhn>;
-
-// pattern that need to extend result
-class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
-                     string asmop, string ResS, string OpS,
-                     SDPatternOperator opnode,
-                     RegisterOperand OpVPR,
-                     ValueType ResTy, ValueType OpTy, ValueType OpSTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy VPR128:$Rd),
-                    (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
-                                                (OpTy OpVPR:$Rm))))))],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
-                           SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                               opnode, VPR64, v8i16, v8i8, v8i8>;
-    def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                               opnode, VPR64, v4i32, v4i16, v4i16>;
-    def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                               opnode, VPR64, v2i64, v2i32, v2i32>;
-  }
-}
-
-defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
-defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
-
-multiclass NeonI_Op_High<SDPatternOperator op> {
-  def _16B : PatFrag<(ops node:$Rn, node:$Rm),
-                     (op (v8i8 (Neon_High16B node:$Rn)),
-                         (v8i8 (Neon_High16B node:$Rm)))>;
-  def _8H  : PatFrag<(ops node:$Rn, node:$Rm),
-                     (op (v4i16 (Neon_High8H node:$Rn)),
-                         (v4i16 (Neon_High8H node:$Rm)))>;
-  def _4S  : PatFrag<(ops node:$Rn, node:$Rm),
-                     (op (v2i32 (Neon_High4S node:$Rn)),
-                         (v2i32 (Neon_High4S node:$Rm)))>;
-}
-
-defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
-defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
-defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
-defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
-defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
-defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
-
-multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
-                            bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h8b  : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                                !cast<PatFrag>(opnode # "_16B"),
-                                VPR128, v8i16, v16i8, v8i8>;
-    def _4s4h  : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                                !cast<PatFrag>(opnode # "_8H"),
-                                VPR128, v4i32, v8i16, v4i16>;
-    def _2d2s  : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                                !cast<PatFrag>(opnode # "_4S"),
-                                VPR128, v2i64, v4i32, v2i32>;
-  }
-}
-
-defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
-defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
-
-// For pattern that need two operators being chained.
-class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
-                     string asmop, string ResS, string OpS,
-                     SDPatternOperator opnode, SDPatternOperator subop,
-                     RegisterOperand OpVPR,
-                     ValueType ResTy, ValueType OpTy, ValueType OpSTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy VPR128:$Rd),
-                    (ResTy (opnode
-                      (ResTy VPR128:$src),
-                      (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
-                                                 (OpTy OpVPR:$Rm))))))))],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
-                             SDPatternOperator opnode, SDPatternOperator subop>{
-  def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                             opnode, subop, VPR64, v8i16, v8i8, v8i8>;
-  def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                             opnode, subop, VPR64, v4i32, v4i16, v4i16>;
-  def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                             opnode, subop, VPR64, v2i64, v2i32, v2i32>;
-}
-
-defm SABALvvv :  NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
-                                   add, int_arm_neon_vabds>;
-defm UABALvvv :  NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
-                                   add, int_arm_neon_vabdu>;
-
-multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
-                              SDPatternOperator opnode, string subop> {
-  def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                             opnode, !cast<PatFrag>(subop # "_16B"),
-                             VPR128, v8i16, v16i8, v8i8>;
-  def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                             opnode, !cast<PatFrag>(subop # "_8H"),
-                             VPR128, v4i32, v8i16, v4i16>;
-  def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                             opnode, !cast<PatFrag>(subop # "_4S"),
-                             VPR128, v2i64, v4i32, v2i32>;
-}
-
-defm SABAL2vvv :  NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
-                                     "NI_sabdl_hi">;
-defm UABAL2vvv :  NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
-                                     "NI_uabdl_hi">;
-
-// Long pattern with 2 operands
-multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
-                          SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable,
-      SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-    def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                              opnode, VPR128, VPR64, v8i16, v8i8>;
-    def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                              opnode, VPR128, VPR64, v4i32, v4i16>;
-    def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                              opnode, VPR128, VPR64, v2i64, v2i32>;
-  }
-}
-
-defm SMULLvvv :  NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
-defm UMULLvvv :  NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
-
-class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
-                           string asmop, string ResS, string OpS,
-                           SDPatternOperator opnode,
-                           ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy VPR128:$Rd),
-                    (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
-                 NoItinerary>,
-    Sched<[WriteFPMul, ReadFPMul, ReadFPMul]>;
-
-multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
-                                   string opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                                      !cast<PatFrag>(opnode # "_16B"),
-                                      v8i16, v16i8>;
-    def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                                     !cast<PatFrag>(opnode # "_8H"),
-                                     v4i32, v8i16>;
-    def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                                     !cast<PatFrag>(opnode # "_4S"),
-                                     v2i64, v4i32>;
-  }
-}
-
-defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
-                                         "NI_smull_hi", 1>;
-defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
-                                         "NI_umull_hi", 1>;
-
-// Long pattern with 3 operands
-class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
-                     string asmop, string ResS, string OpS,
-                     SDPatternOperator opnode,
-                     ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-                 (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-                 [(set (ResTy VPR128:$Rd),
-                    (ResTy (opnode
-                      (ResTy VPR128:$src),
-                      (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
-               NoItinerary>,
-    Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
-  let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
-                             SDPatternOperator opnode> {
-  def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                             opnode, v8i16, v8i8>;
-  def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                             opnode, v4i32, v4i16>;
-  def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                             opnode, v2i64, v2i32>;
-}
-
-def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
-                         (add node:$Rd,
-                            (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
-
-def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
-                         (add node:$Rd,
-                            (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
-
-def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
-                         (sub node:$Rd,
-                            (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
-
-def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
-                         (sub node:$Rd,
-                            (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
-
-defm SMLALvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
-defm UMLALvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
-
-defm SMLSLvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
-defm UMLSLvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
-
-class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
-                           string asmop, string ResS, string OpS,
-                           SDPatternOperator subop, SDPatternOperator opnode,
-                           RegisterOperand OpVPR,
-                           ValueType ResTy, ValueType OpTy>
-  : NeonI_3VDiff<q, u, size, opcode,
-               (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
-               asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
-               [(set (ResTy VPR128:$Rd),
-                  (ResTy (subop
-                    (ResTy VPR128:$src),
-                    (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
-               NoItinerary>,
-    Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
-  let Constraints = "$src = $Rd";
-}
-
-multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop,
-                                   SDPatternOperator subop, string opnode> {
-  def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                                    subop, !cast<PatFrag>(opnode # "_16B"),
-                                    VPR128, v8i16, v16i8>;
-  def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                                   subop, !cast<PatFrag>(opnode # "_8H"),
-                                   VPR128, v4i32, v8i16>;
-  def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                                   subop, !cast<PatFrag>(opnode # "_4S"),
-                                   VPR128, v2i64, v4i32>;
-}
-
-defm SMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
-                                          add, "NI_smull_hi">;
-defm UMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
-                                          add, "NI_umull_hi">;
-
-defm SMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
-                                          sub, "NI_smull_hi">;
-defm UMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
-                                          sub, "NI_umull_hi">;
-
-multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
-                                    SDPatternOperator opnode> {
-  def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                                   opnode, int_arm_neon_vqdmull,
-                                   VPR64, v4i32, v4i16>;
-  def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                                   opnode, int_arm_neon_vqdmull,
-                                   VPR64, v2i64, v2i32>;
-}
-
-defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
-                                           int_arm_neon_vqadds>;
-defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
-                                           int_arm_neon_vqsubs>;
-
-multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
-                         SDPatternOperator opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
-                              opnode, VPR128, VPR64, v4i32, v4i16>;
-    def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
-                              opnode, VPR128, VPR64, v2i64, v2i32>;
-  }
-}
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
-                                int_arm_neon_vqdmull, 1>;
-}
-
-multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop,
-                                   string opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                                     !cast<PatFrag>(opnode # "_8H"),
-                                     v4i32, v8i16>;
-    def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                                     !cast<PatFrag>(opnode # "_4S"),
-                                     v2i64, v4i32>;
-  }
-}
-
-defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
-                                           "NI_qdmull_hi", 1>;
-
-multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop,
-                                     SDPatternOperator opnode> {
-  def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
-                                   opnode, NI_qdmull_hi_8H,
-                                   VPR128, v4i32, v8i16>;
-  def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
-                                   opnode, NI_qdmull_hi_4S,
-                                   VPR128, v2i64, v4i32>;
-}
-
-defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
-                                             int_arm_neon_vqadds>;
-defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
-                                             int_arm_neon_vqsubs>;
-
-multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
-                         SDPatternOperator opnode_8h8b,
-                         SDPatternOperator opnode_1q1d, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
-                              opnode_8h8b, VPR128, VPR64, v8i16, v8i8>;
-
-    def _1q1d : NeonI_3VD_2Op<0b0, u, 0b11, opcode, asmop, "1q", "1d",
-                              opnode_1q1d, VPR128, VPR64, v16i8, v1i64>;
-  }
-}
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in
-defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp,
-                              int_aarch64_neon_vmull_p64, 1>;
-
-multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop,
-                                   string opnode, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
-                                      !cast<PatFrag>(opnode # "_16B"),
-                                      v8i16, v16i8>;
-
-    def _1q2d : 
-      NeonI_3VDiff<0b1, u, 0b11, opcode,
-                   (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
-                   asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
-                   [(set (v16i8 VPR128:$Rd),
-                      (v16i8 (int_aarch64_neon_vmull_p64 
-                        (v1i64 (scalar_to_vector
-                          (i64 (vector_extract (v2i64 VPR128:$Rn), 1)))),
-                        (v1i64 (scalar_to_vector
-                          (i64 (vector_extract (v2i64 VPR128:$Rm), 1)))))))],
-                   NoItinerary>,
-      Sched<[WriteFPMul, ReadFPMul, ReadFPMul]>;
-  }
-
-  def : Pat<(v16i8 (int_aarch64_neon_vmull_p64
-                      (v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 1))),
-                      (v1i64 (extract_subvector (v2i64 VPR128:$Rm), (i64 1))))),
-            (!cast<Instruction>(NAME # "_1q2d") VPR128:$Rn, VPR128:$Rm)>;
-}
-
-defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
-                                         1>;
-
-// End of implementation for instruction class (3V Diff)
-
-// The followings are vector load/store multiple N-element structure
-// (class SIMD lselem).
-
-// ld1:         load multiple 1-element structure to 1/2/3/4 registers.
-// ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
-//              The structure consists of a sequence of sets of N values.
-//              The first element of the structure is placed in the first lane
-//              of the first first vector, the second element in the first lane
-//              of the second vector, and so on.
-// E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
-// the three 64-bit vectors list {BA, DC, FE}.
-// E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
-// 64-bit vectors list {DA, EB, FC}.
-// Store instructions store multiple structure to N registers like load.
-
-
-class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
-                    RegisterOperand VecList, string asmop>
-  : NeonI_LdStMult<q, 1, opcode, size,
-                 (outs VecList:$Rt), (ins GPR64xsp:$Rn),
-                 asmop # "\t$Rt, [$Rn]",
-                 [],
-                 NoItinerary>,
-    Sched<[WriteVecLd, ReadVecLd]> {
-  let mayLoad = 1;
-  let neverHasSideEffects = 1;
-}
-
-multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
-  def _8B : NeonI_LDVList<0, opcode, 0b00,
-                          !cast<RegisterOperand>(List # "8B_operand"), asmop>;
-
-  def _4H : NeonI_LDVList<0, opcode, 0b01,
-                          !cast<RegisterOperand>(List # "4H_operand"), asmop>;
-
-  def _2S : NeonI_LDVList<0, opcode, 0b10,
-                          !cast<RegisterOperand>(List # "2S_operand"), asmop>;
-
-  def _16B : NeonI_LDVList<1, opcode, 0b00,
-                           !cast<RegisterOperand>(List # "16B_operand"), asmop>;
-
-  def _8H : NeonI_LDVList<1, opcode, 0b01,
-                          !cast<RegisterOperand>(List # "8H_operand"), asmop>;
-
-  def _4S : NeonI_LDVList<1, opcode, 0b10,
-                          !cast<RegisterOperand>(List # "4S_operand"), asmop>;
-
-  def _2D : NeonI_LDVList<1, opcode, 0b11,
-                          !cast<RegisterOperand>(List # "2D_operand"), asmop>;
-}
-
-// Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
-defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
-def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
-
-defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
-
-defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
-
-defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
-
-// Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
-defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
-def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
-
-defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
-def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
-
-defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
-def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
-
-class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
-                    RegisterOperand VecList, string asmop>
-  : NeonI_LdStMult<q, 0, opcode, size,
-                 (outs), (ins GPR64xsp:$Rn, VecList:$Rt),
-                 asmop # "\t$Rt, [$Rn]",
-                 [],
-                 NoItinerary>,
-    Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
-  let mayStore = 1;
-  let neverHasSideEffects = 1;
-}
-
-multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
-  def _8B : NeonI_STVList<0, opcode, 0b00,
-                          !cast<RegisterOperand>(List # "8B_operand"), asmop>;
-
-  def _4H : NeonI_STVList<0, opcode, 0b01,
-                          !cast<RegisterOperand>(List # "4H_operand"), asmop>;
-
-  def _2S : NeonI_STVList<0, opcode, 0b10,
-                          !cast<RegisterOperand>(List # "2S_operand"), asmop>;
-
-  def _16B : NeonI_STVList<1, opcode, 0b00,
-                           !cast<RegisterOperand>(List # "16B_operand"), asmop>;
-
-  def _8H : NeonI_STVList<1, opcode, 0b01,
-                          !cast<RegisterOperand>(List # "8H_operand"), asmop>;
-
-  def _4S : NeonI_STVList<1, opcode, 0b10,
-                          !cast<RegisterOperand>(List # "4S_operand"), asmop>;
-
-  def _2D : NeonI_STVList<1, opcode, 0b11,
-                          !cast<RegisterOperand>(List # "2D_operand"), asmop>;
-}
-
-// Store multiple N-element structures from N registers (N = 1,2,3,4)
-defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
-def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
-
-defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
-
-defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
-
-defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
-
-// Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
-defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
-def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
-
-defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
-def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
-
-defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
-def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
-
-def : Pat<(v2f64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
-def : Pat<(v2i64 (load GPR64xsp:$addr)), (LD1_2D GPR64xsp:$addr)>;
-
-def : Pat<(v4f32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
-def : Pat<(v4i32 (load GPR64xsp:$addr)), (LD1_4S GPR64xsp:$addr)>;
-
-def : Pat<(v8i16 (load GPR64xsp:$addr)), (LD1_8H GPR64xsp:$addr)>;
-def : Pat<(v16i8 (load GPR64xsp:$addr)), (LD1_16B GPR64xsp:$addr)>;
-
-def : Pat<(v1f64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
-def : Pat<(v1i64 (load GPR64xsp:$addr)), (LD1_1D GPR64xsp:$addr)>;
-
-def : Pat<(v2f32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
-def : Pat<(v2i32 (load GPR64xsp:$addr)), (LD1_2S GPR64xsp:$addr)>;
-
-def : Pat<(v4i16 (load GPR64xsp:$addr)), (LD1_4H GPR64xsp:$addr)>;
-def : Pat<(v8i8 (load GPR64xsp:$addr)), (LD1_8B GPR64xsp:$addr)>;
-
-def : Pat<(store (v2i64 VPR128:$value), GPR64xsp:$addr),
-          (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
-def : Pat<(store (v2f64 VPR128:$value), GPR64xsp:$addr),
-          (ST1_2D GPR64xsp:$addr, VPR128:$value)>;
-
-def : Pat<(store (v4i32 VPR128:$value), GPR64xsp:$addr),
-          (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
-def : Pat<(store (v4f32 VPR128:$value), GPR64xsp:$addr),
-          (ST1_4S GPR64xsp:$addr, VPR128:$value)>;
-
-def : Pat<(store (v8i16 VPR128:$value), GPR64xsp:$addr),
-          (ST1_8H GPR64xsp:$addr, VPR128:$value)>;
-def : Pat<(store (v16i8 VPR128:$value), GPR64xsp:$addr),
-          (ST1_16B GPR64xsp:$addr, VPR128:$value)>;
-
-def : Pat<(store (v1i64 VPR64:$value), GPR64xsp:$addr),
-          (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
-def : Pat<(store (v1f64 VPR64:$value), GPR64xsp:$addr),
-          (ST1_1D GPR64xsp:$addr, VPR64:$value)>;
-
-def : Pat<(store (v2i32 VPR64:$value), GPR64xsp:$addr),
-          (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
-def : Pat<(store (v2f32 VPR64:$value), GPR64xsp:$addr),
-          (ST1_2S GPR64xsp:$addr, VPR64:$value)>;
-
-def : Pat<(store (v4i16 VPR64:$value), GPR64xsp:$addr),
-          (ST1_4H GPR64xsp:$addr, VPR64:$value)>;
-def : Pat<(store (v8i8 VPR64:$value), GPR64xsp:$addr),
-          (ST1_8B GPR64xsp:$addr, VPR64:$value)>;
-
-// Match load/store of v1i8/v1i16/v1i32 type to FPR8/FPR16/FPR32 load/store.
-// FIXME: for now we have v1i8, v1i16, v1i32 legal types, if they are illegal,
-// these patterns are not needed any more.
-def : Pat<(v1i8 (load GPR64xsp:$addr)), (LSFP8_LDR $addr, 0)>;
-def : Pat<(v1i16 (load GPR64xsp:$addr)), (LSFP16_LDR $addr, 0)>;
-def : Pat<(v1i32 (load GPR64xsp:$addr)), (LSFP32_LDR $addr, 0)>;
-
-def : Pat<(store (v1i8 FPR8:$value), GPR64xsp:$addr),
-          (LSFP8_STR $value, $addr, 0)>;
-def : Pat<(store (v1i16 FPR16:$value), GPR64xsp:$addr),
-          (LSFP16_STR $value, $addr, 0)>;
-def : Pat<(store (v1i32 FPR32:$value), GPR64xsp:$addr),
-          (LSFP32_STR $value, $addr, 0)>;
-
-
-// End of vector load/store multiple N-element structure(class SIMD lselem)
-
-// The followings are post-index vector load/store multiple N-element
-// structure(class SIMD lselem-post)
-def exact1_asmoperand : AsmOperandClass {
-  let Name = "Exact1";
-  let PredicateMethod = "isExactImm<1>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact1 : Operand<i32>, ImmLeaf<i32, [{return Imm == 1;}]> {
-  let ParserMatchClass = exact1_asmoperand;
-}
-
-def exact2_asmoperand : AsmOperandClass {
-  let Name = "Exact2";
-  let PredicateMethod = "isExactImm<2>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact2 : Operand<i32>, ImmLeaf<i32, [{return Imm == 2;}]> {
-  let ParserMatchClass = exact2_asmoperand;
-}
-
-def exact3_asmoperand : AsmOperandClass {
-  let Name = "Exact3";
-  let PredicateMethod = "isExactImm<3>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact3 : Operand<i32>, ImmLeaf<i32, [{return Imm == 3;}]> {
-  let ParserMatchClass = exact3_asmoperand;
-}
-
-def exact4_asmoperand : AsmOperandClass {
-  let Name = "Exact4";
-  let PredicateMethod = "isExactImm<4>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact4 : Operand<i32>, ImmLeaf<i32, [{return Imm == 4;}]> {
-  let ParserMatchClass = exact4_asmoperand;
-}
-
-def exact6_asmoperand : AsmOperandClass {
-  let Name = "Exact6";
-  let PredicateMethod = "isExactImm<6>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact6 : Operand<i32>, ImmLeaf<i32, [{return Imm == 6;}]> {
-  let ParserMatchClass = exact6_asmoperand;
-}
-
-def exact8_asmoperand : AsmOperandClass {
-  let Name = "Exact8";
-  let PredicateMethod = "isExactImm<8>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
-  let ParserMatchClass = exact8_asmoperand;
-}
-
-def exact12_asmoperand : AsmOperandClass {
-  let Name = "Exact12";
-  let PredicateMethod = "isExactImm<12>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact12 : Operand<i32>, ImmLeaf<i32, [{return Imm == 12;}]> {
-  let ParserMatchClass = exact12_asmoperand;
-}
-
-def exact16_asmoperand : AsmOperandClass {
-  let Name = "Exact16";
-  let PredicateMethod = "isExactImm<16>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
-  let ParserMatchClass = exact16_asmoperand;
-}
-
-def exact24_asmoperand : AsmOperandClass {
-  let Name = "Exact24";
-  let PredicateMethod = "isExactImm<24>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
-  let ParserMatchClass = exact24_asmoperand;
-}
-
-def exact32_asmoperand : AsmOperandClass {
-  let Name = "Exact32";
-  let PredicateMethod = "isExactImm<32>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
-  let ParserMatchClass = exact32_asmoperand;
-}
-
-def exact48_asmoperand : AsmOperandClass {
-  let Name = "Exact48";
-  let PredicateMethod = "isExactImm<48>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
-  let ParserMatchClass = exact48_asmoperand;
-}
-
-def exact64_asmoperand : AsmOperandClass {
-  let Name = "Exact64";
-  let PredicateMethod = "isExactImm<64>";
-  let RenderMethod = "addImmOperands";
-}
-def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
-  let ParserMatchClass = exact64_asmoperand;
-}
-
-multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
-                           RegisterOperand VecList, Operand ImmTy,
-                           string asmop> {
-  let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1,
-      DecoderMethod = "DecodeVLDSTPostInstruction" in {
-    def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
-                     (outs VecList:$Rt, GPR64xsp:$wb),
-                     (ins GPR64xsp:$Rn, ImmTy:$amt),
-                     asmop # "\t$Rt, [$Rn], $amt",
-                     [],
-                     NoItinerary>,
-                 Sched<[WriteVecLd, WriteVecLd, ReadVecLd]> {
-      let Rm = 0b11111;
-    }
-
-    def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
-                        (outs VecList:$Rt, GPR64xsp:$wb),
-                        (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
-                        asmop # "\t$Rt, [$Rn], $Rm",
-                        [],
-                        NoItinerary>,
-                    Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]>;
-  }
-}
-
-multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
-    Operand ImmTy2, string asmop> {
-  defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
-                              !cast<RegisterOperand>(List # "8B_operand"),
-                              ImmTy, asmop>;
-
-  defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
-                              !cast<RegisterOperand>(List # "4H_operand"),
-                              ImmTy, asmop>;
-
-  defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
-                              !cast<RegisterOperand>(List # "2S_operand"),
-                              ImmTy, asmop>;
-
-  defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
-                               !cast<RegisterOperand>(List # "16B_operand"),
-                               ImmTy2, asmop>;
-
-  defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
-                              !cast<RegisterOperand>(List # "8H_operand"),
-                              ImmTy2, asmop>;
-
-  defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
-                              !cast<RegisterOperand>(List # "4S_operand"),
-                              ImmTy2, asmop>;
-
-  defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
-                              !cast<RegisterOperand>(List # "2D_operand"),
-                              ImmTy2, asmop>;
-}
-
-// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
-defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
-defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
-                                 "ld1">;
-
-defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
-
-defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
-                             "ld3">;
-
-defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
-
-// Post-index load multiple 1-element structures from N consecutive registers
-// (N = 2,3,4)
-defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
-                               "ld1">;
-defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
-                                   uimm_exact16, "ld1">;
-
-defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
-                               "ld1">;
-defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
-                                   uimm_exact24, "ld1">;
-
-defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
-                                "ld1">;
-defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
-                                   uimm_exact32, "ld1">;
-
-multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
-                            RegisterOperand VecList, Operand ImmTy,
-                            string asmop> {
-  let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
-      DecoderMethod = "DecodeVLDSTPostInstruction" in {
-    def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
-                     (outs GPR64xsp:$wb),
-                     (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
-                     asmop # "\t$Rt, [$Rn], $amt",
-                     [],
-                     NoItinerary>,
-                 Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
-      let Rm = 0b11111;
-    }
-
-    def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
-                      (outs GPR64xsp:$wb),
-                      (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt),
-                      asmop # "\t$Rt, [$Rn], $Rm",
-                      [],
-                      NoItinerary>,
-                    Sched<[WriteVecSt, ReadVecSt, ReadVecSt, ReadVecSt]>;
-  }
-}
-
-multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
-                           Operand ImmTy2, string asmop> {
-  defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
-                 !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
-
-  defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
-                              !cast<RegisterOperand>(List # "4H_operand"),
-                              ImmTy, asmop>;
-
-  defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
-                              !cast<RegisterOperand>(List # "2S_operand"),
-                              ImmTy, asmop>;
-
-  defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
-                               !cast<RegisterOperand>(List # "16B_operand"),
-                               ImmTy2, asmop>;
-
-  defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
-                              !cast<RegisterOperand>(List # "8H_operand"),
-                              ImmTy2, asmop>;
-
-  defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
-                              !cast<RegisterOperand>(List # "4S_operand"),
-                              ImmTy2, asmop>;
-
-  defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
-                              !cast<RegisterOperand>(List # "2D_operand"),
-                              ImmTy2, asmop>;
-}
-
-// Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
-defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
-defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
-                                 "st1">;
-
-defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
-
-defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
-                             "st3">;
-
-defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
-
-// Post-index load multiple 1-element structures from N consecutive registers
-// (N = 2,3,4)
-defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
-                               "st1">;
-defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
-                                   uimm_exact16, "st1">;
-
-defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
-                               "st1">;
-defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
-                                   uimm_exact24, "st1">;
-
-defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
-                               "st1">;
-defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
-                                   uimm_exact32, "st1">;
-
-// End of post-index vector load/store multiple N-element structure
-// (class SIMD lselem-post)
-
-// The followings are vector load/store single N-element structure
-// (class SIMD lsone).
-def neon_uimm0_bare : Operand<i64>,
-                        ImmLeaf<i64, [{return Imm == 0;}]> {
-  let ParserMatchClass = neon_uimm0_asmoperand;
-  let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm1_bare : Operand<i64>,
-                        ImmLeaf<i64, [{return Imm < 2;}]> {
-  let ParserMatchClass = neon_uimm1_asmoperand;
-  let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm2_bare : Operand<i64>,
-                        ImmLeaf<i64, [{return Imm < 4;}]> {
-  let ParserMatchClass = neon_uimm2_asmoperand;
-  let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm3_bare : Operand<i64>,
-                        ImmLeaf<i64, [{return Imm < 8;}]> {
-  let ParserMatchClass = uimm3_asmoperand;
-  let PrintMethod = "printUImmBareOperand";
-}
-
-def neon_uimm4_bare : Operand<i64>,
-                        ImmLeaf<i64, [{return Imm < 16;}]> {
-  let ParserMatchClass = uimm4_asmoperand;
-  let PrintMethod = "printUImmBareOperand";
-}
-
-class NeonI_LDN_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
-                    RegisterOperand VecList, string asmop>
-    : NeonI_LdOne_Dup<q, r, opcode, size,
-                      (outs VecList:$Rt), (ins GPR64xsp:$Rn),
-                      asmop # "\t$Rt, [$Rn]",
-                      [],
-                      NoItinerary>,
-      Sched<[WriteVecLd, ReadVecLd]> {
-  let mayLoad = 1;
-  let neverHasSideEffects = 1;
-}
-
-multiclass LDN_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop> {
-  def _8B : NeonI_LDN_Dup<0, r, opcode, 0b00,
-                          !cast<RegisterOperand>(List # "8B_operand"), asmop>;
-
-  def _4H : NeonI_LDN_Dup<0, r, opcode, 0b01,
-                          !cast<RegisterOperand>(List # "4H_operand"), asmop>;
-
-  def _2S : NeonI_LDN_Dup<0, r, opcode, 0b10,
-                          !cast<RegisterOperand>(List # "2S_operand"), asmop>;
-
-  def _1D : NeonI_LDN_Dup<0, r, opcode, 0b11,
-                          !cast<RegisterOperand>(List # "1D_operand"), asmop>;
-
-  def _16B : NeonI_LDN_Dup<1, r, opcode, 0b00,
-                           !cast<RegisterOperand>(List # "16B_operand"), asmop>;
-
-  def _8H : NeonI_LDN_Dup<1, r, opcode, 0b01,
-                          !cast<RegisterOperand>(List # "8H_operand"), asmop>;
-
-  def _4S : NeonI_LDN_Dup<1, r, opcode, 0b10,
-                          !cast<RegisterOperand>(List # "4S_operand"), asmop>;
-
-  def _2D : NeonI_LDN_Dup<1, r, opcode, 0b11,
-                          !cast<RegisterOperand>(List # "2D_operand"), asmop>;
-}
-
-// Load single 1-element structure to all lanes of 1 register
-defm LD1R : LDN_Dup_BHSD<0b0, 0b110, "VOne", "ld1r">;
-
-// Load single N-element structure to all lanes of N consecutive
-// registers (N = 2,3,4)
-defm LD2R : LDN_Dup_BHSD<0b1, 0b110, "VPair", "ld2r">;
-defm LD3R : LDN_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r">;
-defm LD4R : LDN_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r">;
-
-
-class LD1R_pattern <ValueType VTy, ValueType DTy, PatFrag LoadOp,
-                    Instruction INST>
-    : Pat<(VTy (Neon_vdup (DTy (LoadOp GPR64xsp:$Rn)))),
-          (VTy (INST GPR64xsp:$Rn))>;
-
-// Match all LD1R instructions
-def : LD1R_pattern<v8i8, i32, extloadi8, LD1R_8B>;
-
-def : LD1R_pattern<v16i8, i32, extloadi8, LD1R_16B>;
-
-def : LD1R_pattern<v4i16, i32, extloadi16, LD1R_4H>;
-
-def : LD1R_pattern<v8i16, i32, extloadi16, LD1R_8H>;
-
-def : LD1R_pattern<v2i32, i32, load, LD1R_2S>;
-def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
-
-def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
-def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
-
-def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
-def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
-
-class LD1R_pattern_v1 <ValueType VTy, ValueType DTy, PatFrag LoadOp,
-                       Instruction INST>
-  : Pat<(VTy (scalar_to_vector (DTy (LoadOp GPR64xsp:$Rn)))),
-        (VTy (INST GPR64xsp:$Rn))>;
-
-def : LD1R_pattern_v1<v1i64, i64, load, LD1R_1D>;
-def : LD1R_pattern_v1<v1f64, f64, load, LD1R_1D>;
-
-multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
-                                RegisterClass RegList> {
-  defm B : VectorList_operands<PREFIX, "B", Count, RegList>;
-  defm H : VectorList_operands<PREFIX, "H", Count, RegList>;
-  defm S : VectorList_operands<PREFIX, "S", Count, RegList>;
-  defm D : VectorList_operands<PREFIX, "D", Count, RegList>;
-}
-
-// Special vector list operand of 128-bit vectors with bare layout.
-// i.e. only show ".b", ".h", ".s", ".d"
-defm VOne : VectorList_Bare_BHSD<"VOne", 1, FPR128>;
-defm VPair : VectorList_Bare_BHSD<"VPair", 2, QPair>;
-defm VTriple : VectorList_Bare_BHSD<"VTriple", 3, QTriple>;
-defm VQuad : VectorList_Bare_BHSD<"VQuad", 4, QQuad>;
-
-class NeonI_LDN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
-                     Operand ImmOp, string asmop>
-    : NeonI_LdStOne_Lane<1, r, op2_1, op0,
-                         (outs VList:$Rt),
-                         (ins GPR64xsp:$Rn, VList:$src, ImmOp:$lane),
-                         asmop # "\t$Rt[$lane], [$Rn]",
-                         [],
-                         NoItinerary>,
-      Sched<[WriteVecLd, ReadVecLd, ReadVecLd]> {
-  let mayLoad = 1;
-  let neverHasSideEffects = 1;
-  let hasExtraDefRegAllocReq = 1;
-  let Constraints = "$src = $Rt";
-}
-
-multiclass LDN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
-  def _B : NeonI_LDN_Lane<r, 0b00, op0,
-                          !cast<RegisterOperand>(List # "B_operand"),
-                          neon_uimm4_bare, asmop> {
-    let Inst{12-10} = lane{2-0};
-    let Inst{30} = lane{3};
-  }
-
-  def _H : NeonI_LDN_Lane<r, 0b01, op0,
-                          !cast<RegisterOperand>(List # "H_operand"),
-                          neon_uimm3_bare, asmop> {
-    let Inst{12-10} = {lane{1}, lane{0}, 0b0};
-    let Inst{30} = lane{2};
-  }
-
-  def _S : NeonI_LDN_Lane<r, 0b10, op0,
-                          !cast<RegisterOperand>(List # "S_operand"),
-                          neon_uimm2_bare, asmop> {
-    let Inst{12-10} = {lane{0}, 0b0, 0b0};
-    let Inst{30} = lane{1};
-  }
-
-  def _D : NeonI_LDN_Lane<r, 0b10, op0,
-                          !cast<RegisterOperand>(List # "D_operand"),
-                          neon_uimm1_bare, asmop> {
-    let Inst{12-10} = 0b001;
-    let Inst{30} = lane{0};
-  }
-}
-
-// Load single 1-element structure to one lane of 1 register.
-defm LD1LN : LDN_Lane_BHSD<0b0, 0b0, "VOne", "ld1">;
-
-// Load single N-element structure to one lane of N consecutive registers
-// (N = 2,3,4)
-defm LD2LN : LDN_Lane_BHSD<0b1, 0b0, "VPair", "ld2">;
-defm LD3LN : LDN_Lane_BHSD<0b0, 0b1, "VTriple", "ld3">;
-defm LD4LN : LDN_Lane_BHSD<0b1, 0b1, "VQuad", "ld4">;
-
-multiclass LD1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
-                          Operand ImmOp, Operand ImmOp2, PatFrag LoadOp,
-                          Instruction INST> {
-  def : Pat<(VTy (vector_insert (VTy VPR64:$src),
-                     (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp:$lane))),
-            (VTy (EXTRACT_SUBREG
-                     (INST GPR64xsp:$Rn,
-                           (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
-                           ImmOp:$lane),
-                     sub_64))>;
-
-  def : Pat<(VTy2 (vector_insert (VTy2 VPR128:$src),
-                      (DTy (LoadOp GPR64xsp:$Rn)), (ImmOp2:$lane))),
-            (VTy2 (INST GPR64xsp:$Rn, VPR128:$src, ImmOp2:$lane))>;
-}
-
-// Match all LD1LN instructions
-defm : LD1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
-                      extloadi8, LD1LN_B>;
-
-defm : LD1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
-                      extloadi16, LD1LN_H>;
-
-defm : LD1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
-                      load, LD1LN_S>;
-defm : LD1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
-                      load, LD1LN_S>;
-
-defm : LD1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
-                      load, LD1LN_D>;
-defm : LD1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
-                      load, LD1LN_D>;
-
-class NeonI_STN_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
-                     Operand ImmOp, string asmop>
-    : NeonI_LdStOne_Lane<0, r, op2_1, op0,
-                         (outs), (ins GPR64xsp:$Rn, VList:$Rt, ImmOp:$lane),
-                         asmop # "\t$Rt[$lane], [$Rn]",
-                         [],
-                         NoItinerary>,
-      Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
-  let mayStore = 1;
-  let neverHasSideEffects = 1;
-  let hasExtraDefRegAllocReq = 1;
-}
-
-multiclass STN_Lane_BHSD<bit r, bit op0, string List, string asmop> {
-  def _B : NeonI_STN_Lane<r, 0b00, op0,
-                          !cast<RegisterOperand>(List # "B_operand"),
-                          neon_uimm4_bare, asmop> {
-    let Inst{12-10} = lane{2-0};
-    let Inst{30} = lane{3};
-  }
-
-  def _H : NeonI_STN_Lane<r, 0b01, op0,
-                          !cast<RegisterOperand>(List # "H_operand"),
-                          neon_uimm3_bare, asmop> {
-    let Inst{12-10} = {lane{1}, lane{0}, 0b0};
-    let Inst{30} = lane{2};
-  }
-
-  def _S : NeonI_STN_Lane<r, 0b10, op0,
-                          !cast<RegisterOperand>(List # "S_operand"),
-                           neon_uimm2_bare, asmop> {
-    let Inst{12-10} = {lane{0}, 0b0, 0b0};
-    let Inst{30} = lane{1};
-  }
-
-  def _D : NeonI_STN_Lane<r, 0b10, op0,
-                          !cast<RegisterOperand>(List # "D_operand"),
-                          neon_uimm1_bare, asmop>{
-    let Inst{12-10} = 0b001;
-    let Inst{30} = lane{0};
-  }
-}
-
-// Store single 1-element structure from one lane of 1 register.
-defm ST1LN : STN_Lane_BHSD<0b0, 0b0, "VOne", "st1">;
-
-// Store single N-element structure from one lane of N consecutive registers
-// (N = 2,3,4)
-defm ST2LN : STN_Lane_BHSD<0b1, 0b0, "VPair", "st2">;
-defm ST3LN : STN_Lane_BHSD<0b0, 0b1, "VTriple", "st3">;
-defm ST4LN : STN_Lane_BHSD<0b1, 0b1, "VQuad", "st4">;
-
-multiclass ST1LN_patterns<ValueType VTy, ValueType VTy2, ValueType DTy,
-                          Operand ImmOp, Operand ImmOp2, PatFrag StoreOp,
-                          Instruction INST> {
-  def : Pat<(StoreOp (DTy (vector_extract (VTy VPR64:$Rt), ImmOp:$lane)),
-                     GPR64xsp:$Rn),
-            (INST GPR64xsp:$Rn,
-                  (SUBREG_TO_REG (i64 0), VPR64:$Rt, sub_64),
-                  ImmOp:$lane)>;
-
-  def : Pat<(StoreOp (DTy (vector_extract (VTy2 VPR128:$Rt), ImmOp2:$lane)),
-                     GPR64xsp:$Rn),
-            (INST GPR64xsp:$Rn, VPR128:$Rt, ImmOp2:$lane)>;
-}
-
-// Match all ST1LN instructions
-defm : ST1LN_patterns<v8i8, v16i8, i32, neon_uimm3_bare, neon_uimm4_bare,
-                      truncstorei8, ST1LN_B>;
-
-defm : ST1LN_patterns<v4i16, v8i16, i32, neon_uimm2_bare, neon_uimm3_bare,
-                      truncstorei16, ST1LN_H>;
-
-defm : ST1LN_patterns<v2i32, v4i32, i32, neon_uimm1_bare, neon_uimm2_bare,
-                      store, ST1LN_S>;
-defm : ST1LN_patterns<v2f32, v4f32, f32, neon_uimm1_bare, neon_uimm2_bare,
-                      store, ST1LN_S>;
-
-defm : ST1LN_patterns<v1i64, v2i64, i64, neon_uimm0_bare, neon_uimm1_bare,
-                      store, ST1LN_D>;
-defm : ST1LN_patterns<v1f64, v2f64, f64, neon_uimm0_bare, neon_uimm1_bare,
-                      store, ST1LN_D>;
-
-// End of vector load/store single N-element structure (class SIMD lsone).
-
-
-// The following are post-index load/store single N-element instructions
-// (class SIMD lsone-post)
-
-multiclass NeonI_LDN_WB_Dup<bit q, bit r, bits<3> opcode, bits<2> size,
-                            RegisterOperand VecList, Operand ImmTy,
-                            string asmop> {
-  let mayLoad = 1, neverHasSideEffects = 1, Constraints = "$wb = $Rn",
-  DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
-    def _fixed : NeonI_LdOne_Dup_Post<q, r, opcode, size,
-                      (outs VecList:$Rt, GPR64xsp:$wb),
-                      (ins GPR64xsp:$Rn, ImmTy:$amt),
-                      asmop # "\t$Rt, [$Rn], $amt",
-                      [],
-                      NoItinerary>,
-                 Sched<[WriteVecLd, WriteVecLd, ReadVecLd]> {
-      let Rm = 0b11111;
-    }
-
-    def _register : NeonI_LdOne_Dup_Post<q, r, opcode, size,
-                      (outs VecList:$Rt, GPR64xsp:$wb),
-                      (ins GPR64xsp:$Rn, GPR64noxzr:$Rm),
-                      asmop # "\t$Rt, [$Rn], $Rm",
-                      [],
-                      NoItinerary>,
-                    Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]>;
-  }
-}
-
-multiclass LDWB_Dup_BHSD<bit r, bits<3> opcode, string List, string asmop,
-                         Operand uimm_b, Operand uimm_h,
-                         Operand uimm_s, Operand uimm_d> {
-  defm _8B : NeonI_LDN_WB_Dup<0, r, opcode, 0b00,
-                              !cast<RegisterOperand>(List # "8B_operand"),
-                              uimm_b, asmop>;
-
-  defm _4H : NeonI_LDN_WB_Dup<0, r, opcode, 0b01,
-                              !cast<RegisterOperand>(List # "4H_operand"),
-                              uimm_h, asmop>;
-
-  defm _2S : NeonI_LDN_WB_Dup<0, r, opcode, 0b10,
-                              !cast<RegisterOperand>(List # "2S_operand"),
-                              uimm_s, asmop>;
-
-  defm _1D : NeonI_LDN_WB_Dup<0, r, opcode, 0b11,
-                              !cast<RegisterOperand>(List # "1D_operand"),
-                              uimm_d, asmop>;
-
-  defm _16B : NeonI_LDN_WB_Dup<1, r, opcode, 0b00,
-                               !cast<RegisterOperand>(List # "16B_operand"),
-                               uimm_b, asmop>;
-
-  defm _8H : NeonI_LDN_WB_Dup<1, r, opcode, 0b01,
-                              !cast<RegisterOperand>(List # "8H_operand"),
-                              uimm_h, asmop>;
-
-  defm _4S : NeonI_LDN_WB_Dup<1, r, opcode, 0b10,
-                              !cast<RegisterOperand>(List # "4S_operand"),
-                              uimm_s, asmop>;
-
-  defm _2D : NeonI_LDN_WB_Dup<1, r, opcode, 0b11,
-                              !cast<RegisterOperand>(List # "2D_operand"),
-                              uimm_d, asmop>;
-}
-
-// Post-index load single 1-element structure to all lanes of 1 register
-defm LD1R_WB : LDWB_Dup_BHSD<0b0, 0b110, "VOne", "ld1r", uimm_exact1,
-                             uimm_exact2, uimm_exact4, uimm_exact8>;
-
-// Post-index load single N-element structure to all lanes of N consecutive
-// registers (N = 2,3,4)
-defm LD2R_WB : LDWB_Dup_BHSD<0b1, 0b110, "VPair", "ld2r", uimm_exact2,
-                             uimm_exact4, uimm_exact8, uimm_exact16>;
-defm LD3R_WB : LDWB_Dup_BHSD<0b0, 0b111, "VTriple", "ld3r", uimm_exact3,
-                             uimm_exact6, uimm_exact12, uimm_exact24>;
-defm LD4R_WB : LDWB_Dup_BHSD<0b1, 0b111, "VQuad", "ld4r", uimm_exact4,
-                             uimm_exact8, uimm_exact16, uimm_exact32>;
-
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1,
-    Constraints = "$Rn = $wb, $Rt = $src",
-    DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
-  class LDN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
-                                Operand ImmTy, Operand ImmOp, string asmop>
-      : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
-                                (outs VList:$Rt, GPR64xsp:$wb),
-                                (ins GPR64xsp:$Rn, ImmTy:$amt,
-                                    VList:$src, ImmOp:$lane),
-                                asmop # "\t$Rt[$lane], [$Rn], $amt",
-                                [],
-                                NoItinerary>,
-        Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd]> {
-    let Rm = 0b11111;
-  }
-
-  class LDN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
-                                 Operand ImmTy, Operand ImmOp, string asmop>
-      : NeonI_LdStOne_Lane_Post<1, r, op2_1, op0,
-                                (outs VList:$Rt, GPR64xsp:$wb),
-                                (ins GPR64xsp:$Rn, GPR64noxzr:$Rm,
-                                    VList:$src, ImmOp:$lane),
-                                asmop # "\t$Rt[$lane], [$Rn], $Rm",
-                                [],
-                                NoItinerary>,
-        Sched<[WriteVecLd, WriteVecLd, ReadVecLd, ReadVecLd, ReadVecLd]>;
-}
-
-multiclass LD_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
-                           Operand uimm_b, Operand uimm_h,
-                           Operand uimm_s, Operand uimm_d> {
-  def _B_fixed : LDN_WBFx_Lane<r, 0b00, op0,
-                               !cast<RegisterOperand>(List # "B_operand"),
-                               uimm_b, neon_uimm4_bare, asmop> {
-    let Inst{12-10} = lane{2-0};
-    let Inst{30} = lane{3};
-  }
-
-  def _B_register : LDN_WBReg_Lane<r, 0b00, op0,
-                                   !cast<RegisterOperand>(List # "B_operand"),
-                                   uimm_b, neon_uimm4_bare, asmop> {
-    let Inst{12-10} = lane{2-0};
-    let Inst{30} = lane{3};
-  }
-
-  def _H_fixed : LDN_WBFx_Lane<r, 0b01, op0,
-                               !cast<RegisterOperand>(List # "H_operand"),
-                               uimm_h, neon_uimm3_bare, asmop> {
-    let Inst{12-10} = {lane{1}, lane{0}, 0b0};
-    let Inst{30} = lane{2};
-  }
-
-  def _H_register : LDN_WBReg_Lane<r, 0b01, op0,
-                                   !cast<RegisterOperand>(List # "H_operand"),
-                                   uimm_h, neon_uimm3_bare, asmop> {
-    let Inst{12-10} = {lane{1}, lane{0}, 0b0};
-    let Inst{30} = lane{2};
-  }
-
-  def _S_fixed : LDN_WBFx_Lane<r, 0b10, op0,
-                               !cast<RegisterOperand>(List # "S_operand"),
-                               uimm_s, neon_uimm2_bare, asmop> {
-    let Inst{12-10} = {lane{0}, 0b0, 0b0};
-    let Inst{30} = lane{1};
-  }
-
-  def _S_register : LDN_WBReg_Lane<r, 0b10, op0,
-                                   !cast<RegisterOperand>(List # "S_operand"),
-                                   uimm_s, neon_uimm2_bare, asmop> {
-    let Inst{12-10} = {lane{0}, 0b0, 0b0};
-    let Inst{30} = lane{1};
-  }
-
-  def _D_fixed : LDN_WBFx_Lane<r, 0b10, op0,
-                               !cast<RegisterOperand>(List # "D_operand"),
-                               uimm_d, neon_uimm1_bare, asmop> {
-    let Inst{12-10} = 0b001;
-    let Inst{30} = lane{0};
-  }
-
-  def _D_register : LDN_WBReg_Lane<r, 0b10, op0,
-                                   !cast<RegisterOperand>(List # "D_operand"),
-                                   uimm_d, neon_uimm1_bare, asmop> {
-    let Inst{12-10} = 0b001;
-    let Inst{30} = lane{0};
-  }
-}
-
-// Post-index load single 1-element structure to one lane of 1 register.
-defm LD1LN_WB : LD_Lane_WB_BHSD<0b0, 0b0, "VOne", "ld1", uimm_exact1,
-                                uimm_exact2, uimm_exact4, uimm_exact8>;
-
-// Post-index load single N-element structure to one lane of N consecutive
-// registers
-// (N = 2,3,4)
-defm LD2LN_WB : LD_Lane_WB_BHSD<0b1, 0b0, "VPair", "ld2", uimm_exact2,
-                                uimm_exact4, uimm_exact8, uimm_exact16>;
-defm LD3LN_WB : LD_Lane_WB_BHSD<0b0, 0b1, "VTriple", "ld3", uimm_exact3,
-                                uimm_exact6, uimm_exact12, uimm_exact24>;
-defm LD4LN_WB : LD_Lane_WB_BHSD<0b1, 0b1, "VQuad", "ld4", uimm_exact4,
-                                uimm_exact8, uimm_exact16, uimm_exact32>;
-
-let mayStore = 1, neverHasSideEffects = 1,
-    hasExtraDefRegAllocReq = 1, Constraints = "$Rn = $wb",
-    DecoderMethod = "DecodeVLDSTLanePostInstruction" in {
-  class STN_WBFx_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
-                      Operand ImmTy, Operand ImmOp, string asmop>
-      : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
-                                (outs GPR64xsp:$wb),
-                                (ins GPR64xsp:$Rn, ImmTy:$amt,
-                                    VList:$Rt, ImmOp:$lane),
-                                asmop # "\t$Rt[$lane], [$Rn], $amt",
-                                [],
-                                NoItinerary>,
-        Sched<[WriteVecSt, ReadVecSt, ReadVecSt]> {
-    let Rm = 0b11111;
-  }
-
-  class STN_WBReg_Lane<bit r, bits<2> op2_1, bit op0, RegisterOperand VList,
-                       Operand ImmTy, Operand ImmOp, string asmop>
-      : NeonI_LdStOne_Lane_Post<0, r, op2_1, op0,
-                                (outs GPR64xsp:$wb),
-                                (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VList:$Rt,
-                                    ImmOp:$lane),
-                                asmop # "\t$Rt[$lane], [$Rn], $Rm",
-                                [],
-                                NoItinerary>,
-        Sched<[WriteVecSt, ReadVecSt, ReadVecSt, ReadVecSt]>;
-}
-
-multiclass ST_Lane_WB_BHSD<bit r, bit op0, string List, string asmop,
-                           Operand uimm_b, Operand uimm_h,
-                           Operand uimm_s, Operand uimm_d> {
-  def _B_fixed : STN_WBFx_Lane<r, 0b00, op0,
-                               !cast<RegisterOperand>(List # "B_operand"),
-                               uimm_b, neon_uimm4_bare, asmop> {
-    let Inst{12-10} = lane{2-0};
-    let Inst{30} = lane{3};
-  }
-
-  def _B_register : STN_WBReg_Lane<r, 0b00, op0,
-                                   !cast<RegisterOperand>(List # "B_operand"),
-                                   uimm_b, neon_uimm4_bare, asmop> {
-    let Inst{12-10} = lane{2-0};
-    let Inst{30} = lane{3};
-  }
-
-  def _H_fixed : STN_WBFx_Lane<r, 0b01, op0,
-                               !cast<RegisterOperand>(List # "H_operand"),
-                               uimm_h, neon_uimm3_bare, asmop> {
-    let Inst{12-10} = {lane{1}, lane{0}, 0b0};
-    let Inst{30} = lane{2};
-  }
-
-  def _H_register : STN_WBReg_Lane<r, 0b01, op0,
-                                   !cast<RegisterOperand>(List # "H_operand"),
-                                   uimm_h, neon_uimm3_bare, asmop> {
-    let Inst{12-10} = {lane{1}, lane{0}, 0b0};
-    let Inst{30} = lane{2};
-  }
-
-  def _S_fixed : STN_WBFx_Lane<r, 0b10, op0,
-                               !cast<RegisterOperand>(List # "S_operand"),
-                               uimm_s, neon_uimm2_bare, asmop> {
-    let Inst{12-10} = {lane{0}, 0b0, 0b0};
-    let Inst{30} = lane{1};
-  }
-
-  def _S_register : STN_WBReg_Lane<r, 0b10, op0,
-                                   !cast<RegisterOperand>(List # "S_operand"),
-                                   uimm_s, neon_uimm2_bare, asmop> {
-    let Inst{12-10} = {lane{0}, 0b0, 0b0};
-    let Inst{30} = lane{1};
-  }
-
-  def _D_fixed : STN_WBFx_Lane<r, 0b10, op0,
-                               !cast<RegisterOperand>(List # "D_operand"),
-                               uimm_d, neon_uimm1_bare, asmop> {
-    let Inst{12-10} = 0b001;
-    let Inst{30} = lane{0};
-  }
-
-  def _D_register : STN_WBReg_Lane<r, 0b10, op0,
-                                   !cast<RegisterOperand>(List # "D_operand"),
-                                   uimm_d, neon_uimm1_bare, asmop> {
-    let Inst{12-10} = 0b001;
-    let Inst{30} = lane{0};
-  }
-}
-
-// Post-index store single 1-element structure from one lane of 1 register.
-defm ST1LN_WB : ST_Lane_WB_BHSD<0b0, 0b0, "VOne", "st1", uimm_exact1,
-                                uimm_exact2, uimm_exact4, uimm_exact8>;
-
-// Post-index store single N-element structure from one lane of N consecutive
-// registers (N = 2,3,4)
-defm ST2LN_WB : ST_Lane_WB_BHSD<0b1, 0b0, "VPair", "st2", uimm_exact2,
-                                uimm_exact4, uimm_exact8, uimm_exact16>;
-defm ST3LN_WB : ST_Lane_WB_BHSD<0b0, 0b1, "VTriple", "st3", uimm_exact3,
-                                uimm_exact6, uimm_exact12, uimm_exact24>;
-defm ST4LN_WB : ST_Lane_WB_BHSD<0b1, 0b1, "VQuad", "st4", uimm_exact4,
-                                uimm_exact8, uimm_exact16, uimm_exact32>;
-
-// End of post-index load/store single N-element instructions
-// (class SIMD lsone-post)
-
-// Neon Scalar instructions implementation
-// Scalar Three Same
-
-class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
-                             RegisterClass FPRC>
-  : NeonI_Scalar3Same<u, size, opcode,
-                      (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
-                      !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
-                      [],
-                      NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
-  : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
-
-multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
-                                      bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
-    def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
-  }
-}
-
-multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
-                                      string asmop, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
-    def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
-  }
-}
-
-multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
-                                        string asmop, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
-    def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
-    def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
-    def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
-  }
-}
-
-multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
-                                            Instruction INSTD> {
-  def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
-            (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
-                                               Instruction INSTB,
-                                               Instruction INSTH,
-                                               Instruction INSTS,
-                                               Instruction INSTD>
-  : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
-  def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
-           (INSTB FPR8:$Rn, FPR8:$Rm)>;
-  def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-           (INSTH FPR16:$Rn, FPR16:$Rm)>;
-  def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-           (INSTS FPR32:$Rn, FPR32:$Rm)>;
-}
-
-multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
-                                             Instruction INSTH,
-                                             Instruction INSTS> {
-  def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-            (INSTH FPR16:$Rn, FPR16:$Rm)>;
-  def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-            (INSTS FPR32:$Rn, FPR32:$Rm)>;
-}
-
-multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
-                                             ValueType SResTy, ValueType STy,
-                                             Instruction INSTS, ValueType DResTy,
-                                             ValueType DTy, Instruction INSTD> {
-  def : Pat<(SResTy (opnode (STy FPR32:$Rn), (STy FPR32:$Rm))),
-            (INSTS FPR32:$Rn, FPR32:$Rm)>;
-  def : Pat<(DResTy (opnode (DTy FPR64:$Rn), (DTy FPR64:$Rm))),
-            (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-class Neon_Scalar3Same_cmp_V1_D_size_patterns<CondCode CC,
-                                              Instruction INSTD>
-  : Pat<(v1i64 (Neon_cmp (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm), CC)),
-        (INSTD FPR64:$Rn, FPR64:$Rm)>;
-
-// Scalar Three Different
-
-class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
-                             RegisterClass FPRCD, RegisterClass FPRCS>
-  : NeonI_Scalar3Diff<u, size, opcode,
-                      (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
-                      !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
-                      [],
-                      NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
-  def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
-  def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
-}
-
-multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
-  let Constraints = "$Src = $Rd" in {
-    def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
-                       (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
-                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
-                       [],
-                       NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]>;
-    def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
-                       (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
-                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
-                       [],
-                       NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
-                                             Instruction INSTH,
-                                             Instruction INSTS> {
-  def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-            (INSTH FPR16:$Rn, FPR16:$Rm)>;
-  def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-            (INSTS FPR32:$Rn, FPR32:$Rm)>;
-}
-
-multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
-                                             Instruction INSTH,
-                                             Instruction INSTS> {
-  def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-            (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
-  def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-            (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
-}
-
-// Scalar Two Registers Miscellaneous
-
-class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
-                             RegisterClass FPRCD, RegisterClass FPRCS>
-  : NeonI_Scalar2SameMisc<u, size, opcode,
-                          (outs FPRCD:$Rd), (ins FPRCS:$Rn),
-                          !strconcat(asmop, "\t$Rd, $Rn"),
-                          [],
-                          NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
-                                         string asmop> {
-  def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
-                                      FPR32>;
-  def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
-                                      FPR64>;
-}
-
-multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
-  def dd : NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
-}
-
-multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
-  : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
-  def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
-  def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
-  def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
-}
-
-class NeonI_Scalar2SameMisc_fcvtxn_D_size<bit u, bits<5> opcode, string asmop>
-  : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR32, FPR64>;
-
-multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
-                                                 string asmop> {
-  def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
-  def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
-  def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
-}
-
-class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
-                                       string asmop, RegisterClass FPRC>
-  : NeonI_Scalar2SameMisc<u, size, opcode,
-                          (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
-                          !strconcat(asmop, "\t$Rd, $Rn"),
-                          [],
-                          NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
-                                                 string asmop> {
-
-  let Constraints = "$Src = $Rd" in {
-    def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
-    def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
-    def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
-    def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
-  }
-}
-
-class Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<SDPatternOperator opnode,
-                                                  Instruction INSTD>
-  : Pat<(f32 (opnode (f64 FPR64:$Rn))),
-        (INSTD FPR64:$Rn)>;
-
-multiclass Neon_Scalar2SameMisc_fcvt_SD_size_patterns<SDPatternOperator opnode,
-                                                      Instruction INSTS,
-                                                      Instruction INSTD> {
-  def : Pat<(v1i32 (opnode (f32 FPR32:$Rn))),
-            (INSTS FPR32:$Rn)>;
-  def : Pat<(v1i64 (opnode (f64 FPR64:$Rn))),
-            (INSTD FPR64:$Rn)>;
-}
-
-class Neon_Scalar2SameMisc_vcvt_D_size_patterns<SDPatternOperator opnode,
-                                                Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))),
-            (INSTD FPR64:$Rn)>;
-
-multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator opnode,
-                                                     Instruction INSTS,
-                                                     Instruction INSTD> {
-  def : Pat<(f32 (opnode (v1i32 FPR32:$Rn))),
-            (INSTS FPR32:$Rn)>;
-  def : Pat<(f64 (opnode (v1i64 FPR64:$Rn))),
-            (INSTD FPR64:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
-                                                 Instruction INSTS,
-                                                 Instruction INSTD> {
-  def : Pat<(f32 (opnode (f32 FPR32:$Rn))),
-            (INSTS FPR32:$Rn)>;
-  def : Pat<(f64 (opnode (f64 FPR64:$Rn))),
-            (INSTD FPR64:$Rn)>;
-}
-
-class Neon_Scalar2SameMisc_V1_D_size_patterns<SDPatternOperator opnode,
-                                              Instruction INSTD>
-  : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
-        (INSTD FPR64:$Rn)>;
-
-class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
-  : NeonI_Scalar2SameMisc<u, 0b11, opcode,
-                          (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
-                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
-                          [],
-                          NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
-                                              string asmop> {
-  def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
-                           (outs FPR32:$Rd), (ins FPR32:$Rn, fpzz32:$FPImm),
-                           !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
-                           [],
-                           NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-  def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
-                           (outs FPR64:$Rd), (ins FPR64:$Rn, fpzz32:$FPImm),
-                           !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
-                           [],
-                           NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
-                                                Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
-                       (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
-        (INSTD FPR64:$Rn, 0)>;
-
-class Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<CondCode CC,
-                                                   Instruction INSTD>
-  : Pat<(v1i64 (Neon_cmpz (v1i64 FPR64:$Rn),
-                          (i32 neon_uimm0:$Imm), CC)),
-        (INSTD FPR64:$Rn, neon_uimm0:$Imm)>;
-
-multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
-                                                      CondCode CC,
-                                                      Instruction INSTS,
-                                                      Instruction INSTD> {
-  def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 fpzz32:$FPImm))),
-            (INSTS FPR32:$Rn, fpzz32:$FPImm)>;
-  def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f32 fpzz32:$FPImm))),
-            (INSTD FPR64:$Rn, fpzz32:$FPImm)>;
-  def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpzz32:$FPImm), CC)),
-            (INSTD FPR64:$Rn, fpzz32:$FPImm)>;
-}
-
-multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
-                                                Instruction INSTD> {
-  def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
-            (INSTD FPR64:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
-                                                   Instruction INSTB,
-                                                   Instruction INSTH,
-                                                   Instruction INSTS,
-                                                   Instruction INSTD>
-  : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
-  def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
-            (INSTB FPR8:$Rn)>;
-  def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
-            (INSTH FPR16:$Rn)>;
-  def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
-            (INSTS FPR32:$Rn)>;
-}
-
-multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
-                                                       SDPatternOperator opnode,
-                                                       Instruction INSTH,
-                                                       Instruction INSTS,
-                                                       Instruction INSTD> {
-  def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
-            (INSTH FPR16:$Rn)>;
-  def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
-            (INSTS FPR32:$Rn)>;
-  def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
-            (INSTD FPR64:$Rn)>;
-
-}
-
-multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
-                                                       SDPatternOperator opnode,
-                                                       Instruction INSTB,
-                                                       Instruction INSTH,
-                                                       Instruction INSTS,
-                                                       Instruction INSTD> {
-  def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
-            (INSTB FPR8:$Src, FPR8:$Rn)>;
-  def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
-            (INSTH FPR16:$Src, FPR16:$Rn)>;
-  def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
-            (INSTS FPR32:$Src, FPR32:$Rn)>;
-  def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
-            (INSTD FPR64:$Src, FPR64:$Rn)>;
-}
-
-// Scalar Shift By Immediate
-
-class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
-                                RegisterClass FPRC, Operand ImmTy>
-  : NeonI_ScalarShiftImm<u, opcode,
-                         (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
-                         !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
-                         [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
-                                            string asmop> {
-  def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
-    bits<6> Imm;
-    let Inst{22} = 0b1; // immh:immb = 1xxxxxx
-    let Inst{21-16} = Imm;
-  }
-}
-
-multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
-                                               string asmop>
-  : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
-  def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
-    bits<3> Imm;
-    let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
-    let Inst{18-16} = Imm;
-  }
-  def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
-    bits<4> Imm;
-    let Inst{22-20} = 0b001; // immh:immb = 001xxxx
-    let Inst{19-16} = Imm;
-  }
-  def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
-    bits<5> Imm;
-    let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
-    let Inst{20-16} = Imm;
-  }
-}
-
-multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
-                                            string asmop> {
-  def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
-    bits<6> Imm;
-    let Inst{22} = 0b1; // immh:immb = 1xxxxxx
-    let Inst{21-16} = Imm;
-  }
-}
-
-multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
-                                              string asmop>
-  : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
-  def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
-    bits<3> Imm;
-    let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
-    let Inst{18-16} = Imm;
-  }
-  def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
-    bits<4> Imm;
-    let Inst{22-20} = 0b001; // immh:immb = 001xxxx
-    let Inst{19-16} = Imm;
-  }
-  def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
-    bits<5> Imm;
-    let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
-    let Inst{20-16} = Imm;
-  }
-}
-
-class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
-  : NeonI_ScalarShiftImm<u, opcode,
-                         (outs FPR64:$Rd),
-                         (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
-                         !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
-                         [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-    bits<6> Imm;
-    let Inst{22} = 0b1; // immh:immb = 1xxxxxx
-    let Inst{21-16} = Imm;
-    let Constraints = "$Src = $Rd";
-}
-
-class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
-  : NeonI_ScalarShiftImm<u, opcode,
-                         (outs FPR64:$Rd),
-                         (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
-                         !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
-                         [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-    bits<6> Imm;
-    let Inst{22} = 0b1; // immh:immb = 1xxxxxx
-    let Inst{21-16} = Imm;
-    let Constraints = "$Src = $Rd";
-}
-
-class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
-                                       RegisterClass FPRCD, RegisterClass FPRCS,
-                                       Operand ImmTy>
-  : NeonI_ScalarShiftImm<u, opcode,
-                         (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
-                         !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
-                         [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
-                                                string asmop> {
-  def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
-                                             shr_imm8> {
-    bits<3> Imm;
-    let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
-    let Inst{18-16} = Imm;
-  }
-  def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
-                                             shr_imm16> {
-    bits<4> Imm;
-    let Inst{22-20} = 0b001; // immh:immb = 001xxxx
-    let Inst{19-16} = Imm;
-  }
-  def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
-                                             shr_imm32> {
-    bits<5> Imm;
-    let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
-    let Inst{20-16} = Imm;
-  }
-}
-
-multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
-  def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
-    bits<5> Imm;
-    let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
-    let Inst{20-16} = Imm;
-  }
-  def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
-    bits<6> Imm;
-    let Inst{22} = 0b1; // immh:immb = 1xxxxxx
-    let Inst{21-16} = Imm;
-  }
-}
-
-multiclass Neon_ScalarShiftRImm_D_size_patterns<SDPatternOperator opnode,
-                                               Instruction INSTD> {
-  def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
-                (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_ScalarShiftLImm_D_size_patterns<SDPatternOperator opnode,
-                                               Instruction INSTD> {
-  def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 shl_imm64:$Imm))),
-                (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-class Neon_ScalarShiftLImm_V1_D_size_patterns<SDPatternOperator opnode,
-                                             Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
-            (v1i64 (Neon_vdup (i32 shl_imm64:$Imm))))),
-        (INSTD FPR64:$Rn, imm:$Imm)>;
-
-class Neon_ScalarShiftRImm_V1_D_size_patterns<SDPatternOperator opnode,
-                                             Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
-            (v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))),
-        (INSTD FPR64:$Rn, imm:$Imm)>;
-
-multiclass Neon_ScalarShiftLImm_BHSD_size_patterns<SDPatternOperator opnode,
-                                                   Instruction INSTB,
-                                                   Instruction INSTH,
-                                                   Instruction INSTS,
-                                                   Instruction INSTD>
-  : Neon_ScalarShiftLImm_D_size_patterns<opnode, INSTD> {
-  def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 shl_imm8:$Imm))),
-                (INSTB FPR8:$Rn, imm:$Imm)>;
-  def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 shl_imm16:$Imm))),
-                (INSTH FPR16:$Rn, imm:$Imm)>;
-  def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 shl_imm32:$Imm))),
-                (INSTS FPR32:$Rn, imm:$Imm)>;
-}
-
-class Neon_ScalarShiftLImm_accum_D_size_patterns<SDPatternOperator opnode,
-                                                Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
-            (i32 shl_imm64:$Imm))),
-        (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
-
-class Neon_ScalarShiftRImm_accum_D_size_patterns<SDPatternOperator opnode,
-                                                Instruction INSTD>
-  : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn),
-            (i32 shr_imm64:$Imm))),
-        (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
-
-multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
-                                                       SDPatternOperator opnode,
-                                                       Instruction INSTH,
-                                                       Instruction INSTS,
-                                                       Instruction INSTD> {
-  def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 shr_imm16:$Imm))),
-                (INSTH FPR16:$Rn, imm:$Imm)>;
-  def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
-                (INSTS FPR32:$Rn, imm:$Imm)>;
-  def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
-                (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator opnode,
-                                                      Instruction INSTS,
-                                                      Instruction INSTD> {
-  def ssi : Pat<(f32 (opnode (v1i32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
-                (INSTS FPR32:$Rn, imm:$Imm)>;
-  def ddi : Pat<(f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
-                (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator opnode,
-                                                      Instruction INSTS,
-                                                      Instruction INSTD> {
-  def ssi : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (i32 shr_imm32:$Imm))),
-                (INSTS FPR32:$Rn, imm:$Imm)>;
-  def ddi : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
-                (INSTD FPR64:$Rn, imm:$Imm)>;
-}
-
-// Scalar Signed Shift Right (Immediate)
-defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
-// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftRImm_V1_D_size_patterns<sra, SSHRddi>;
-
-// Scalar Unsigned Shift Right (Immediate)
-defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
-// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftRImm_V1_D_size_patterns<srl, USHRddi>;
-
-// Scalar Signed Rounding Shift Right (Immediate)
-defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
-
-// Scalar Unigned Rounding Shift Right (Immediate)
-defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
-defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
-
-// Scalar Signed Shift Right and Accumulate (Immediate)
-def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
-          <int_aarch64_neon_vsrads_n, SSRA>;
-
-// Scalar Unsigned Shift Right and Accumulate (Immediate)
-def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
-          <int_aarch64_neon_vsradu_n, USRA>;
-
-// Scalar Signed Rounding Shift Right and Accumulate (Immediate)
-def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
-          <int_aarch64_neon_vrsrads_n, SRSRA>;
-
-// Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
-def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
-          <int_aarch64_neon_vrsradu_n, URSRA>;
-
-// Scalar Shift Left (Immediate)
-defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
-defm : Neon_ScalarShiftLImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
-// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftLImm_V1_D_size_patterns<shl, SHLddi>;
-
-// Signed Saturating Shift Left (Immediate)
-defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
-defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
-                                               SQSHLbbi, SQSHLhhi,
-                                               SQSHLssi, SQSHLddi>;
-// Pattern to match llvm.arm.* intrinsic.
-defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
-
-// Unsigned Saturating Shift Left (Immediate)
-defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
-defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
-                                               UQSHLbbi, UQSHLhhi,
-                                               UQSHLssi, UQSHLddi>;
-// Pattern to match llvm.arm.* intrinsic.
-defm : Neon_ScalarShiftLImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
-
-// Signed Saturating Shift Left Unsigned (Immediate)
-defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
-defm : Neon_ScalarShiftLImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
-                                               SQSHLUbbi, SQSHLUhhi,
-                                               SQSHLUssi, SQSHLUddi>;
-
-// Shift Right And Insert (Immediate)
-def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
-def : Neon_ScalarShiftRImm_accum_D_size_patterns
-          <int_aarch64_neon_vsri, SRI>;
-
-// Shift Left And Insert (Immediate)
-def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
-def : Neon_ScalarShiftLImm_accum_D_size_patterns
-          <int_aarch64_neon_vsli, SLI>;
-
-// Signed Saturating Shift Right Narrow (Immediate)
-defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
-                                                    SQSHRNbhi, SQSHRNhsi,
-                                                    SQSHRNsdi>;
-
-// Unsigned Saturating Shift Right Narrow (Immediate)
-defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
-                                                    UQSHRNbhi, UQSHRNhsi,
-                                                    UQSHRNsdi>;
-
-// Signed Saturating Rounded Shift Right Narrow (Immediate)
-defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
-                                                    SQRSHRNbhi, SQRSHRNhsi,
-                                                    SQRSHRNsdi>;
-
-// Unsigned Saturating Rounded Shift Right Narrow (Immediate)
-defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
-                                                    UQRSHRNbhi, UQRSHRNhsi,
-                                                    UQRSHRNsdi>;
-
-// Signed Saturating Shift Right Unsigned Narrow (Immediate)
-defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
-                                                    SQSHRUNbhi, SQSHRUNhsi,
-                                                    SQSHRUNsdi>;
-
-// Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
-defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
-defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
-                                                    SQRSHRUNbhi, SQRSHRUNhsi,
-                                                    SQRSHRUNsdi>;
-
-// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
-defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
-defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxs2fp_n,
-                                                  SCVTF_Nssi, SCVTF_Nddi>;
-
-// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
-defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
-defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtfxu2fp_n,
-                                                  UCVTF_Nssi, UCVTF_Nddi>;
-
-// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
-defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
-defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxs_n,
-                                                  FCVTZS_Nssi, FCVTZS_Nddi>;
-
-// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
-defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
-defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvtfp2fxu_n,
-                                                  FCVTZU_Nssi, FCVTZU_Nddi>;
-
-// Patterns For Convert Instructions Between v1f64 and v1i64
-class Neon_ScalarShiftImm_cvtf_v1f64_pattern<SDPatternOperator opnode,
-                                             Instruction INST>
-    : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
-          (INST FPR64:$Rn, imm:$Imm)>;
-
-class Neon_ScalarShiftImm_fcvt_v1f64_pattern<SDPatternOperator opnode,
-                                             Instruction INST>
-    : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (i32 shr_imm64:$Imm))),
-          (INST FPR64:$Rn, imm:$Imm)>;
-
-def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxs2fp,
-                                             SCVTF_Nddi>;
-
-def : Neon_ScalarShiftImm_cvtf_v1f64_pattern<int_arm_neon_vcvtfxu2fp,
-                                             UCVTF_Nddi>;
-
-def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxs,
-                                             FCVTZS_Nddi>;
-
-def : Neon_ScalarShiftImm_fcvt_v1f64_pattern<int_arm_neon_vcvtfp2fxu,
-                                             FCVTZU_Nddi>;
-
-// Scalar Integer Add
-let isCommutable = 1 in {
-def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
-}
-
-// Scalar Integer Sub
-def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
-
-// Pattern for Scalar Integer Add and Sub with D register only
-defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
-defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
-
-// Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
-
-// Scalar Integer Saturating Add (Signed, Unsigned)
-defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
-defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
-
-// Scalar Integer Saturating Sub (Signed, Unsigned)
-defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
-defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
-
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqadds, SQADDbbb,
-                                           SQADDhhh, SQADDsss, SQADDddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqaddu, UQADDbbb,
-                                           UQADDhhh, UQADDsss, UQADDddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubs, SQSUBbbb,
-                                           SQSUBhhh, SQSUBsss, SQSUBddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_arm_neon_vqsubu, UQSUBbbb,
-                                           UQSUBhhh, UQSUBsss, UQSUBddd>;
-
-// Scalar Integer Saturating Doubling Multiply Half High
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in
-defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
-
-// Scalar Integer Saturating Rounding Doubling Multiply Half High
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
-}
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Saturating Doubling Multiply Half High and
-// Scalar Integer Saturating Rounding Doubling Multiply Half High
-defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
-                                                               SQDMULHsss>;
-defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
-                                                                SQRDMULHsss>;
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in {
-// Scalar Floating-point Multiply Extended
-defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
-}
-
-// Scalar Floating-point Reciprocal Step
-defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vrecps, f32, f32,
-                                         FRECPSsss, f64, f64, FRECPSddd>;
-def : Pat<(v1f64 (int_arm_neon_vrecps (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
-          (FRECPSddd FPR64:$Rn, FPR64:$Rm)>;
-
-// Scalar Floating-point Reciprocal Square Root Step
-defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vrsqrts, f32, f32,
-                                         FRSQRTSsss, f64, f64, FRSQRTSddd>;
-def : Pat<(v1f64 (int_arm_neon_vrsqrts (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
-          (FRSQRTSddd FPR64:$Rn, FPR64:$Rm)>;
-def : Pat<(v1f64 (fsqrt (v1f64 FPR64:$Rn))), (FSQRTdd FPR64:$Rn)>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Floating-point Multiply Extended,
-multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
-                                                  Instruction INSTS,
-                                                  Instruction INSTD> {
-  def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
-            (INSTS FPR32:$Rn, FPR32:$Rm)>;
-  def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
-            (INSTD FPR64:$Rn, FPR64:$Rm)>;
-}
-
-defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
-                                              FMULXsss, FMULXddd>;
-def : Pat<(v1f64 (int_aarch64_neon_vmulx (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
-          (FMULXddd FPR64:$Rn, FPR64:$Rm)>;
-
-// Scalar Integer Shift Left (Signed, Unsigned)
-def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
-def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
-
-// Scalar Integer Saturating Shift Left (Signed, Unsigned)
-defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
-defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
-                                           SQSHLhhh, SQSHLsss, SQSHLddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
-                                           UQSHLhhh, UQSHLsss, UQSHLddd>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
-
-// Scalar Integer Rounding Shift Left (Signed, Unsigned)
-def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
-def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
-
-// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
-defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
-defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
-                                           SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
-defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
-                                           UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
-
-// Patterns to match llvm.arm.* intrinsic for
-// Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
-defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
-
-let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC] in {
-// Signed Saturating Doubling Multiply-Add Long
-defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
-}
-defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
-                                            SQDMLALshh, SQDMLALdss>;
-
-// Signed Saturating Doubling Multiply-Subtract Long
-let SchedRW = [WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC] in {
-defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
-}
-defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
-                                            SQDMLSLshh, SQDMLSLdss>;
-
-// Signed Saturating Doubling Multiply Long
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul, ReadFPMul] in {
-defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
-}
-defm : Neon_Scalar3Diff_HS_size_patterns<int_arm_neon_vqdmull,
-                                         SQDMULLshh, SQDMULLdss>;
-
-// Scalar Signed Integer Convert To Floating-point
-defm SCVTF  : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
-defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtint2fps,
-                                                 SCVTFss, SCVTFdd>;
-
-// Scalar Unsigned Integer Convert To Floating-point
-defm UCVTF  : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
-defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtint2fpu,
-                                                 UCVTFss, UCVTFdd>;
-
-// Scalar Floating-point Converts
-def FCVTXN : NeonI_Scalar2SameMisc_fcvtxn_D_size<0b1, 0b10110, "fcvtxn">;
-def : Neon_Scalar2SameMisc_fcvtxn_D_size_patterns<int_aarch64_neon_fcvtxn,
-                                                  FCVTXN>;
-
-defm FCVTNS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11010, "fcvtns">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtns,
-                                                  FCVTNSss, FCVTNSdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtns, FCVTNSdd>;
-
-defm FCVTNU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11010, "fcvtnu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtnu,
-                                                  FCVTNUss, FCVTNUdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtnu, FCVTNUdd>;
-
-defm FCVTMS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11011, "fcvtms">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtms,
-                                                  FCVTMSss, FCVTMSdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtms, FCVTMSdd>;
-
-defm FCVTMU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11011, "fcvtmu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtmu,
-                                                  FCVTMUss, FCVTMUdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtmu, FCVTMUdd>;
-
-defm FCVTAS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11100, "fcvtas">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtas,
-                                                  FCVTASss, FCVTASdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtas, FCVTASdd>;
-
-defm FCVTAU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11100, "fcvtau">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtau,
-                                                  FCVTAUss, FCVTAUdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtau, FCVTAUdd>;
-
-defm FCVTPS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11010, "fcvtps">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtps,
-                                                  FCVTPSss, FCVTPSdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtps, FCVTPSdd>;
-
-defm FCVTPU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11010, "fcvtpu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtpu,
-                                                  FCVTPUss, FCVTPUdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_arm_neon_vcvtpu, FCVTPUdd>;
-
-defm FCVTZS : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11011, "fcvtzs">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzs,
-                                                  FCVTZSss, FCVTZSdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_aarch64_neon_vcvtzs,
-                                                FCVTZSdd>;
-
-defm FCVTZU : NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11011, "fcvtzu">;
-defm : Neon_Scalar2SameMisc_fcvt_SD_size_patterns<int_aarch64_neon_fcvtzu,
-                                                  FCVTZUss, FCVTZUdd>;
-def : Neon_Scalar2SameMisc_vcvt_D_size_patterns<int_aarch64_neon_vcvtzu,
-                                                FCVTZUdd>;
-
-// Patterns For Convert Instructions Between v1f64 and v1i64
-class Neon_Scalar2SameMisc_cvtf_v1f64_pattern<SDPatternOperator opnode,
-                                              Instruction INST>
-    : Pat<(v1f64 (opnode (v1i64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-class Neon_Scalar2SameMisc_fcvt_v1f64_pattern<SDPatternOperator opnode,
-                                              Instruction INST>
-    : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<sint_to_fp, SCVTFdd>;
-def : Neon_Scalar2SameMisc_cvtf_v1f64_pattern<uint_to_fp, UCVTFdd>;
-
-def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_sint, FCVTZSdd>;
-def : Neon_Scalar2SameMisc_fcvt_v1f64_pattern<fp_to_uint, FCVTZUdd>;
-
-// Scalar Floating-point Reciprocal Estimate
-defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpe,
-                                             FRECPEss, FRECPEdd>;
-def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrecpe,
-                                              FRECPEdd>;
-
-// Scalar Floating-point Reciprocal Exponent
-defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
-                                             FRECPXss, FRECPXdd>;
-
-// Scalar Floating-point Reciprocal Square Root Estimate
-defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrsqrte,
-                                                 FRSQRTEss, FRSQRTEdd>;
-def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrsqrte,
-                                              FRSQRTEdd>;
-
-// Scalar Floating-point Round
-class Neon_ScalarFloatRound_pattern<SDPatternOperator opnode, Instruction INST>
-    : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-def : Neon_ScalarFloatRound_pattern<fceil, FRINTPdd>;
-def : Neon_ScalarFloatRound_pattern<ffloor, FRINTMdd>;
-def : Neon_ScalarFloatRound_pattern<ftrunc, FRINTZdd>;
-def : Neon_ScalarFloatRound_pattern<frint, FRINTXdd>;
-def : Neon_ScalarFloatRound_pattern<fnearbyint, FRINTIdd>;
-def : Neon_ScalarFloatRound_pattern<frnd, FRINTAdd>;
-def : Neon_ScalarFloatRound_pattern<int_aarch64_neon_frintn, FRINTNdd>;
-
-// Scalar Integer Compare
-
-// Scalar Compare Bitwise Equal
-def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
-
-class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
-                                              Instruction INSTD,
-                                              CondCode CC>
-  : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
-        (INSTD FPR64:$Rn, FPR64:$Rm)>;
-
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
-
-// Scalar Compare Signed Greather Than Or Equal
-def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGEddd, SETGE>;
-
-// Scalar Compare Unsigned Higher Or Same
-def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHSddd, SETUGE>;
-
-// Scalar Compare Unsigned Higher
-def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMHIddd, SETUGT>;
-
-// Scalar Compare Signed Greater Than
-def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
-def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMGTddd, SETGT>;
-
-// Scalar Compare Bitwise Test Bits
-def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
-defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
-defm : Neon_Scalar3Same_D_size_patterns<Neon_tst, CMTSTddd>;
-
-// Scalar Compare Bitwise Equal To Zero
-def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
-                                                CMEQddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETEQ, CMEQddi>;
-
-// Scalar Compare Signed Greather Than Or Equal To Zero
-def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
-                                                CMGEddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGE, CMGEddi>;
-
-// Scalar Compare Signed Greater Than Zero
-def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
-                                                CMGTddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETGT, CMGTddi>;
-
-// Scalar Compare Signed Less Than Or Equal To Zero
-def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
-                                                CMLEddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLE, CMLEddi>;
-
-// Scalar Compare Less Than Zero
-def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
-def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
-                                                CMLTddi>;
-def : Neon_Scalar2SameMisc_cmpz_D_V1_size_patterns<SETLT, CMLTddi>;
-
-// Scalar Floating-point Compare
-
-// Scalar Floating-point Compare Mask Equal
-defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fceq, v1i32, f32,
-                                         FCMEQsss, v1i64, f64, FCMEQddd>;
-def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETEQ, FCMEQddd>;
-
-// Scalar Floating-point Compare Mask Equal To Zero
-defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fceq, SETEQ,
-                                                  FCMEQZssi, FCMEQZddi>;
-
-// Scalar Floating-point Compare Mask Greater Than Or Equal
-defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcge, v1i32, f32,
-                                         FCMGEsss, v1i64, f64, FCMGEddd>;
-def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGE, FCMGEddd>;
-
-// Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
-defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcge, SETGE,
-                                                  FCMGEZssi, FCMGEZddi>;
-
-// Scalar Floating-point Compare Mask Greather Than
-defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcgt, v1i32, f32,
-                                         FCMGTsss, v1i64, f64, FCMGTddd>;
-def : Neon_Scalar3Same_cmp_V1_D_size_patterns<SETGT, FCMGTddd>;
-
-// Scalar Floating-point Compare Mask Greather Than Zero
-defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcgt, SETGT,
-                                                  FCMGTZssi, FCMGTZddi>;
-
-// Scalar Floating-point Compare Mask Less Than Or Equal To Zero
-defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fclez, SETLE,
-                                                  FCMLEZssi, FCMLEZddi>;
-
-// Scalar Floating-point Compare Mask Less Than Zero
-defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
-defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_fcltz, SETLT,
-                                                  FCMLTZssi, FCMLTZddi>;
-
-// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
-defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcage, v1i32, f32,
-                                         FACGEsss, v1i64, f64, FACGEddd>;
-def : Pat<(v1i64 (int_arm_neon_vacge (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
-          (FACGEddd FPR64:$Rn, FPR64:$Rm)>;
-
-// Scalar Floating-point Absolute Compare Mask Greater Than
-defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_fcagt, v1i32, f32,
-                                         FACGTsss, v1i64, f64, FACGTddd>;
-def : Pat<(v1i64 (int_arm_neon_vacgt (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
-          (FACGTddd FPR64:$Rn, FPR64:$Rm)>;
-
-// Scalar Floating-point Absolute Difference
-defm FABD: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11010, "fabd">;
-defm : Neon_Scalar3Same_SD_size_patterns<int_aarch64_neon_vabd, f32, f32,
-                                         FABDsss, f64, f64, FABDddd>;
-
-// Scalar Absolute Value
-defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
-defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
-
-// Scalar Signed Saturating Absolute Value
-defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
-defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
-                                               SQABSbb, SQABShh, SQABSss, SQABSdd>;
-
-// Scalar Negate
-defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
-defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
-
-// Scalar Signed Saturating Negate
-defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
-defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
-                                               SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
-
-// Scalar Signed Saturating Accumulated of Unsigned Value
-defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
-defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
-                                                     SUQADDbb, SUQADDhh,
-                                                     SUQADDss, SUQADDdd>;
-
-// Scalar Unsigned Saturating Accumulated of Signed Value
-defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
-defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
-                                                     USQADDbb, USQADDhh,
-                                                     USQADDss, USQADDdd>;
-
-def : Pat<(v1i64 (int_aarch64_neon_suqadd (v1i64 FPR64:$Src),
-                                          (v1i64 FPR64:$Rn))),
-          (SUQADDdd FPR64:$Src, FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_aarch64_neon_usqadd (v1i64 FPR64:$Src),
-                                          (v1i64 FPR64:$Rn))),
-          (USQADDdd FPR64:$Src, FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_arm_neon_vabs (v1i64 FPR64:$Rn))),
-          (ABSdd FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_arm_neon_vqabs (v1i64 FPR64:$Rn))),
-          (SQABSdd FPR64:$Rn)>;
-
-def : Pat<(v1i64 (int_arm_neon_vqneg (v1i64 FPR64:$Rn))),
-          (SQNEGdd FPR64:$Rn)>;
-
-def : Pat<(v1i64 (sub (v1i64 (bitconvert (v8i8 Neon_AllZero))),
-                      (v1i64 FPR64:$Rn))),
-          (NEGdd FPR64:$Rn)>;
-
-// Scalar Signed Saturating Extract Unsigned Narrow
-defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
-defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
-                                                     SQXTUNbh, SQXTUNhs,
-                                                     SQXTUNsd>;
-
-// Scalar Signed Saturating Extract Narrow
-defm SQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
-defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
-                                                     SQXTNbh, SQXTNhs,
-                                                     SQXTNsd>;
-
-// Scalar Unsigned Saturating Extract Narrow
-defm UQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
-defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
-                                                     UQXTNbh, UQXTNhs,
-                                                     UQXTNsd>;
-
-// Scalar Reduce Pairwise
-
-multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
-                                     string asmop, bit Commutable = 0> {
-  let isCommutable = Commutable in {
-    def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
-                                (outs FPR64:$Rd), (ins VPR128:$Rn),
-                                !strconcat(asmop, "\t$Rd, $Rn.2d"),
-                                [],
-                                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-  }
-}
-
-multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
-                                     string asmop, bit Commutable = 0>
-  : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
-  let isCommutable = Commutable in {
-    def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
-                                (outs FPR32:$Rd), (ins VPR64:$Rn),
-                                !strconcat(asmop, "\t$Rd, $Rn.2s"),
-                                [],
-                                NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-  }
-}
-
-// Scalar Reduce Addition Pairwise (Integer) with
-// Pattern to match llvm.arm.* intrinsic
-defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
-
-// Pattern to match llvm.aarch64.* intrinsic for
-// Scalar Reduce Addition Pairwise (Integer)
-def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
-          (ADDPvv_D_2D VPR128:$Rn)>;
-def : Pat<(v1i64 (int_aarch64_neon_vaddv (v2i64 VPR128:$Rn))),
-          (ADDPvv_D_2D VPR128:$Rn)>;
-
-// Scalar Reduce Addition Pairwise (Floating Point)
-defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
-
-// Scalar Reduce Maximum Pairwise (Floating Point)
-defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
-
-// Scalar Reduce Minimum Pairwise (Floating Point)
-defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
-
-// Scalar Reduce maxNum Pairwise (Floating Point)
-defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
-
-// Scalar Reduce minNum Pairwise (Floating Point)
-defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
-
-multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnode,
-                                            Instruction INSTS,
-                                            Instruction INSTD> {
-  def : Pat<(f32 (opnode (v2f32 VPR64:$Rn))),
-            (INSTS VPR64:$Rn)>;
-  def : Pat<(f64 (opnode (v2f64 VPR128:$Rn))),
-            (INSTD VPR128:$Rn)>;
-}
-
-// Patterns to match llvm.aarch64.* intrinsic for
-// Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
-                                        FADDPvv_S_2S, FADDPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
-                                        FMAXPvv_S_2S, FMAXPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
-                                        FMINPvv_S_2S, FMINPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
-                                        FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
-
-defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
-                                        FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
-
-def : Pat<(f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))),
-          (FADDPvv_S_2S (v2f32
-               (EXTRACT_SUBREG
-                   (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))),
-                   sub_64)))>;
-
-// Scalar by element Arithmetic
-
-class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
-                                    string rmlane, bit u, bit szhi, bit szlo,
-                                    RegisterClass ResFPR, RegisterClass OpFPR,
-                                    RegisterOperand OpVPR, Operand OpImm>
-  : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
-                             (outs ResFPR:$Rd),
-                             (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
-                             asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
-                             [],
-                             NoItinerary>,
-    Sched<[WriteFPMul, ReadFPMul, ReadFPMul]> {
-  bits<3> Imm;
-  bits<5> MRm;
-}
-
-class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
-                                                    string rmlane,
-                                                    bit u, bit szhi, bit szlo,
-                                                    RegisterClass ResFPR,
-                                                    RegisterClass OpFPR,
-                                                    RegisterOperand OpVPR,
-                                                    Operand OpImm>
-  : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
-                             (outs ResFPR:$Rd),
-                             (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
-                             asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
-                             [],
-                             NoItinerary>,
-    Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
-  let Constraints = "$src = $Rd";
-  bits<3> Imm;
-  bits<5> MRm;
-}
-
-// Scalar Floating Point  multiply (scalar, by element)
-def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
-  0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1}; // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
-  0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
-  let Inst{11} = Imm{0}; // h
-  let Inst{21} = 0b0;    // l
-  let Inst{20-16} = MRm;
-}
-
-// Scalar Floating Point  multiply extended (scalar, by element)
-def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
-  0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1}; // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
-  0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
-  let Inst{11} = Imm{0}; // h
-  let Inst{21} = 0b0;    // l
-  let Inst{20-16} = MRm;
-}
-
-multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
-  SDPatternOperator opnode,
-  Instruction INST,
-  ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
-  ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
-
-  def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
-               (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
-             (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
-               (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
-             (ResTy (INST (ResTy FPRC:$Rn),
-               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
-               OpNImm:$Imm))>;
-
-  // swapped operands
-  def  : Pat<(ResTy (opnode
-               (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
-               (ResTy FPRC:$Rn))),
-             (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (opnode
-               (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
-               (ResTy FPRC:$Rn))),
-             (ResTy (INST (ResTy FPRC:$Rn),
-               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
-               OpNImm:$Imm))>;
-}
-
-// Patterns for Scalar Floating Point  multiply (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
-  f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
-  f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-
-// Patterns for Scalar Floating Point  multiply extended (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
-  FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
-  v2f32, v4f32, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
-  FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
-  v1f64, v2f64, neon_uimm0_bare>;
-
-// Scalar Floating Point fused multiply-add (scalar, by element)
-def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
-  0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1}; // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
-  0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
-  let Inst{11} = Imm{0}; // h
-  let Inst{21} = 0b0;    // l
-  let Inst{20-16} = MRm;
-}
-
-// Scalar Floating Point fused multiply-subtract (scalar, by element)
-def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
-  0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1}; // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
-  0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
-  let Inst{11} = Imm{0}; // h
-  let Inst{21} = 0b0;    // l
-  let Inst{20-16} = MRm;
-}
-// We are allowed to match the fma instruction regardless of compile options.
-multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
-  Instruction FMLAI, Instruction FMLSI,
-  ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
-  ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
-  // fmla
-  def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
-               (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLAI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
-               (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLAI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn),
-               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
-               OpNImm:$Imm))>;
-
-  // swapped fmla operands
-  def  : Pat<(ResTy (fma
-               (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
-               (ResTy FPRC:$Rn),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLAI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (fma
-               (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
-               (ResTy FPRC:$Rn),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLAI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn),
-               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
-               OpNImm:$Imm))>;
-
-  // fmls
-  def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
-               (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLSI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
-               (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLSI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn),
-               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
-               OpNImm:$Imm))>;
-
-  // swapped fmls operands
-  def  : Pat<(ResTy (fma
-               (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
-               (ResTy FPRC:$Rn),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLSI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (fma
-               (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
-               (ResTy FPRC:$Rn),
-               (ResTy FPRC:$Ra))),
-             (ResTy (FMLSI (ResTy FPRC:$Ra),
-               (ResTy FPRC:$Rn),
-               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
-               OpNImm:$Imm))>;
-}
-
-// Scalar Floating Point fused multiply-add and
-// multiply-subtract (scalar, by element)
-defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
-  f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
-  f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
-  f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
-
-// Scalar Signed saturating doubling multiply long (scalar, by element)
-def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
-  0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
-  let Inst{11} = 0b0; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
-  0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
-  let Inst{11} = Imm{2}; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
-  0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
-  let Inst{11} = 0b0;    // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
-  0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1};    // h
-  let Inst{21} = Imm{0};    // l
-  let Inst{20-16} = MRm;
-}
-
-multiclass Neon_ScalarXIndexedElem_MUL_Patterns<
-  SDPatternOperator opnode,
-  Instruction INST,
-  ValueType ResTy, RegisterClass FPRC,
-  ValueType OpVTy, ValueType OpTy,
-  ValueType VecOpTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
-
-  def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
-               (OpVTy (scalar_to_vector
-                 (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))))),
-             (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (opnode (OpVTy FPRC:$Rn),
-               (OpVTy (extract_subvector (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
-             (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
-
-  //swapped operands
-  def  : Pat<(ResTy (opnode
-               (OpVTy (scalar_to_vector
-                 (ExTy (vector_extract (VecOpTy VPRC:$MRm), OpImm:$Imm)))),
-                 (OpVTy FPRC:$Rn))),
-             (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (opnode
-               (OpVTy (extract_subvector (VecOpTy VPRC:$MRm), OpImm:$Imm)),
-               (OpVTy FPRC:$Rn))),
-             (ResTy (INST (OpVTy FPRC:$Rn), (VecOpTy VPRC:$MRm), OpImm:$Imm))>;
-}
-
-
-// Patterns for Scalar Signed saturating doubling
-// multiply long (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
-  SQDMULLshv_4H, v1i32, FPR16, v1i16, i16, v4i16,
-  i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
-  SQDMULLshv_8H, v1i32, FPR16, v1i16, i16, v8i16,
-  i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
-  SQDMULLdsv_2S, v1i64, FPR32, v1i32, i32, v2i32,
-  i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmull,
-  SQDMULLdsv_4S, v1i64, FPR32, v1i32, i32, v4i32,
-  i32, VPR128Lo, neon_uimm2_bare>;
-
-// Scalar Signed saturating doubling multiply-add long (scalar, by element)
-def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
-  0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
-  let Inst{11} = 0b0; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
-  0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
-  let Inst{11} = Imm{2}; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
-  0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
-  let Inst{11} = 0b0;    // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
-  0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1};    // h
-  let Inst{21} = Imm{0};    // l
-  let Inst{20-16} = MRm;
-}
-
-// Scalar Signed saturating doubling
-// multiply-subtract long (scalar, by element)
-def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
-  0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
-  let Inst{11} = 0b0; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
-  0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
-  let Inst{11} = Imm{2}; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
-  0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
-  let Inst{11} = 0b0;    // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
-  0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1};    // h
-  let Inst{21} = Imm{0};    // l
-  let Inst{20-16} = MRm;
-}
-
-multiclass Neon_ScalarXIndexedElem_MLAL_Patterns<
-  SDPatternOperator opnode,
-  SDPatternOperator coreopnode,
-  Instruction INST,
-  ValueType ResTy, RegisterClass ResFPRC, RegisterClass FPRC,
-  ValueType OpTy,
-  ValueType OpVTy, ValueType ExTy, RegisterOperand VPRC, Operand OpImm> {
-
-  def  : Pat<(ResTy (opnode
-               (ResTy ResFPRC:$Ra),
-               (ResTy (coreopnode (OpTy FPRC:$Rn),
-                 (OpTy (scalar_to_vector
-                   (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))))))),
-             (ResTy (INST (ResTy ResFPRC:$Ra),
-               (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (opnode
-               (ResTy ResFPRC:$Ra),
-               (ResTy (coreopnode (OpTy FPRC:$Rn),
-                 (OpTy (extract_subvector (OpVTy VPRC:$MRm), OpImm:$Imm)))))),
-             (ResTy (INST (ResTy ResFPRC:$Ra),
-               (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
-
-  // swapped operands
-  def  : Pat<(ResTy (opnode
-               (ResTy ResFPRC:$Ra),
-               (ResTy (coreopnode
-                 (OpTy (scalar_to_vector
-                   (ExTy (vector_extract (OpVTy VPRC:$MRm), OpImm:$Imm)))),
-                 (OpTy FPRC:$Rn))))),
-             (ResTy (INST (ResTy ResFPRC:$Ra),
-               (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
-
-  def  : Pat<(ResTy (opnode
-               (ResTy ResFPRC:$Ra),
-               (ResTy (coreopnode
-                 (OpTy (extract_subvector (OpVTy VPRC:$MRm), OpImm:$Imm)),
-                 (OpTy FPRC:$Rn))))),
-             (ResTy (INST (ResTy ResFPRC:$Ra),
-               (OpTy FPRC:$Rn), (OpVTy VPRC:$MRm), OpImm:$Imm))>;
-}
-
-// Patterns for Scalar Signed saturating
-// doubling multiply-add long (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
-  int_arm_neon_vqdmull, SQDMLALshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
-  i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
-  int_arm_neon_vqdmull, SQDMLALshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
-  i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
-  int_arm_neon_vqdmull, SQDMLALdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
-  i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqadds,
-  int_arm_neon_vqdmull, SQDMLALdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
-  i32, VPR128Lo, neon_uimm2_bare>;
-
-// Patterns for Scalar Signed saturating
-// doubling multiply-sub long (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
-  int_arm_neon_vqdmull, SQDMLSLshv_4H, v1i32, FPR32, FPR16, v1i16, v4i16,
-  i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
-  int_arm_neon_vqdmull, SQDMLSLshv_8H, v1i32, FPR32, FPR16, v1i16, v8i16,
-  i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
-  int_arm_neon_vqdmull, SQDMLSLdsv_2S, v1i64, FPR64, FPR32, v1i32, v2i32,
-  i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MLAL_Patterns<int_arm_neon_vqsubs,
-  int_arm_neon_vqdmull, SQDMLSLdsv_4S, v1i64, FPR64, FPR32, v1i32, v4i32,
-  i32, VPR128Lo, neon_uimm2_bare>;
-
-// Scalar Signed saturating doubling multiply returning
-// high half (scalar, by element)
-def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
-  0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
-  let Inst{11} = 0b0; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
-  0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
-  let Inst{11} = Imm{2}; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
-  0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
-  let Inst{11} = 0b0;    // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
-  0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1};    // h
-  let Inst{21} = Imm{0};    // l
-  let Inst{20-16} = MRm;
-}
-
-// Patterns for Scalar Signed saturating doubling multiply returning
-// high half (scalar, by element)
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
-  SQDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16,
-  i32, VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
-  SQDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16,
-  i32, VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
-  SQDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32,
-  i32, VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqdmulh,
-  SQDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32,
-  i32, VPR128Lo, neon_uimm2_bare>;
-
-// Scalar Signed saturating rounding doubling multiply
-// returning high half (scalar, by element)
-def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
-  0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
-  let Inst{11} = 0b0; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
-  0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
-  let Inst{11} = Imm{2}; // h
-  let Inst{21} = Imm{1}; // l
-  let Inst{20} = Imm{0}; // m
-  let Inst{19-16} = MRm{3-0};
-}
-def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
-  0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
-  let Inst{11} = 0b0;    // h
-  let Inst{21} = Imm{0}; // l
-  let Inst{20-16} = MRm;
-}
-def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
-  0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{11} = Imm{1};    // h
-  let Inst{21} = Imm{0};    // l
-  let Inst{20-16} = MRm;
-}
-
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
-  SQRDMULHhhv_4H, v1i16, FPR16, v1i16, i16, v4i16, i32,
-  VPR64Lo, neon_uimm2_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
-  SQRDMULHhhv_8H, v1i16, FPR16, v1i16, i16, v8i16, i32,
-  VPR128Lo, neon_uimm3_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
-  SQRDMULHssv_2S, v1i32, FPR32, v1i32, i32, v2i32, i32,
-  VPR64Lo, neon_uimm1_bare>;
-defm : Neon_ScalarXIndexedElem_MUL_Patterns<int_arm_neon_vqrdmulh,
-  SQRDMULHssv_4S, v1i32, FPR32, v1i32, i32, v4i32, i32,
-  VPR128Lo, neon_uimm2_bare>;
-
-// Scalar general arithmetic operation
-class Neon_Scalar_GeneralMath2D_pattern<SDPatternOperator opnode,
-                                        Instruction INST> 
-    : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))), (INST FPR64:$Rn)>;
-
-class Neon_Scalar_GeneralMath3D_pattern<SDPatternOperator opnode,
-                                        Instruction INST> 
-    : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
-          (INST FPR64:$Rn, FPR64:$Rm)>;
-
-class Neon_Scalar_GeneralMath4D_pattern<SDPatternOperator opnode,
-                                        Instruction INST> 
-    : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm),
-              (v1f64 FPR64:$Ra))),
-          (INST FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
-
-def : Neon_Scalar_GeneralMath3D_pattern<fadd, FADDddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<fmul, FMULddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<fsub, FSUBddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<fdiv, FDIVddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vabds, FABDddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmaxs, FMAXddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_arm_neon_vmins, FMINddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vmaxnm, FMAXNMddd>;
-def : Neon_Scalar_GeneralMath3D_pattern<int_aarch64_neon_vminnm, FMINNMddd>;
-
-def : Neon_Scalar_GeneralMath2D_pattern<fabs, FABSdd>;
-def : Neon_Scalar_GeneralMath2D_pattern<fneg, FNEGdd>;
-
-def : Neon_Scalar_GeneralMath4D_pattern<fma, FMADDdddd>;
-def : Neon_Scalar_GeneralMath4D_pattern<fmsub, FMSUBdddd>;
-
-// Scalar Copy - DUP element to scalar
-class NeonI_Scalar_DUP<string asmop, string asmlane,
-                       RegisterClass ResRC, RegisterOperand VPRC,
-                       Operand OpImm>
-  : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
-                     asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
-                     [],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]> {
-  bits<4> Imm;
-}
-
-def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
-  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
-  let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 0)),
-          (f32 (EXTRACT_SUBREG (v4f32 VPR128:$Rn), sub_32))>;
-def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 1)),
-          (f32 (DUPsv_S (v4f32 VPR128:$Rn), 1))>;
-def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 2)),
-          (f32 (DUPsv_S (v4f32 VPR128:$Rn), 2))>;
-def : Pat<(f32 (vector_extract (v4f32 VPR128:$Rn), 3)),
-          (f32 (DUPsv_S (v4f32 VPR128:$Rn), 3))>;
-
-def : Pat<(f64 (vector_extract (v2f64 VPR128:$Rn), 0)),
-          (f64 (EXTRACT_SUBREG (v2f64 VPR128:$Rn), sub_64))>;
-def : Pat<(f64 (vector_extract (v2f64 VPR128:$Rn), 1)),
-          (f64 (DUPdv_D (v2f64 VPR128:$Rn), 1))>;
-
-def : Pat<(f32 (vector_extract (v2f32 VPR64:$Rn), 0)),
-          (f32 (EXTRACT_SUBREG (v2f32 VPR64:$Rn), sub_32))>;
-def : Pat<(f32 (vector_extract (v2f32 VPR64:$Rn), 1)),
-          (f32 (DUPsv_S (v4f32 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-            1))>;
-
-def : Pat<(f64 (vector_extract (v1f64 VPR64:$Rn), 0)),
-          (f64 (EXTRACT_SUBREG (v1f64 VPR64:$Rn), sub_64))>;
-
-multiclass NeonI_Scalar_DUP_Ext_Vec_pattern<Instruction DUPI,
-  ValueType ResTy, ValueType OpTy,Operand OpLImm,
-  ValueType NOpTy, ValueType ExTy, Operand OpNImm> {
-
-  def : Pat<(ResTy (extract_subvector (OpTy VPR128:$Rn), OpLImm:$Imm)),
-            (ResTy (DUPI VPR128:$Rn, OpLImm:$Imm))>;
-
-  def : Pat<(ResTy (extract_subvector (NOpTy VPR64:$Rn), OpNImm:$Imm)),
-            (ResTy (DUPI
-              (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-                OpNImm:$Imm))>;
-}
-
-// Patterns for extract subvectors of v1ix data using scalar DUP instructions.
-defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPbv_B, v1i8, v16i8, neon_uimm4_bare,
-                                        v8i8, v16i8, neon_uimm3_bare>;
-defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPhv_H, v1i16, v8i16, neon_uimm3_bare,
-                                        v4i16, v8i16, neon_uimm2_bare>;
-defm : NeonI_Scalar_DUP_Ext_Vec_pattern<DUPsv_S, v1i32, v4i32, neon_uimm2_bare,
-                                        v2i32, v4i32, neon_uimm1_bare>;
-
-multiclass NeonI_Scalar_DUP_Copy_pattern1<Instruction DUPI, ValueType ResTy,
-                                          ValueType OpTy, ValueType ElemTy,
-                                          Operand OpImm, ValueType OpNTy,
-                                          ValueType ExTy, Operand OpNImm> {
-
-  def : Pat<(ResTy (vector_insert (ResTy undef),
-              (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
-              (neon_uimm0_bare:$Imm))),
-            (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
-
-  def : Pat<(ResTy (vector_insert (ResTy undef),
-              (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
-              (OpNImm:$Imm))),
-            (ResTy (DUPI
-              (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              OpNImm:$Imm))>;
-}
-
-multiclass NeonI_Scalar_DUP_Copy_pattern2<Instruction DUPI, ValueType ResTy,
-                                          ValueType OpTy, ValueType ElemTy,
-                                          Operand OpImm, ValueType OpNTy,
-                                          ValueType ExTy, Operand OpNImm> {
-
-  def : Pat<(ResTy (scalar_to_vector
-              (ElemTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)))),
-            (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
-
-  def : Pat<(ResTy (scalar_to_vector
-              (ElemTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)))),
-            (ResTy (DUPI
-              (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              OpNImm:$Imm))>;
-}
-
-// Patterns for vector copy to v1ix and v1fx vectors using scalar DUP
-// instructions.
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPdv_D,
-  v1i64, v2i64, i64, neon_uimm1_bare,
-  v1i64, v2i64, neon_uimm0_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPsv_S,
-  v1i32, v4i32, i32, neon_uimm2_bare,
-  v2i32, v4i32, neon_uimm1_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPhv_H,
-  v1i16, v8i16, i32, neon_uimm3_bare,
-  v4i16, v8i16, neon_uimm2_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern1<DUPbv_B,
-  v1i8, v16i8, i32, neon_uimm4_bare,
-  v8i8, v16i8, neon_uimm3_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPdv_D,
-  v1i64, v2i64, i64, neon_uimm1_bare,
-  v1i64, v2i64, neon_uimm0_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPsv_S,
-  v1i32, v4i32, i32, neon_uimm2_bare,
-  v2i32, v4i32, neon_uimm1_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPhv_H,
-  v1i16, v8i16, i32, neon_uimm3_bare,
-  v4i16, v8i16, neon_uimm2_bare>;
-defm : NeonI_Scalar_DUP_Copy_pattern2<DUPbv_B,
-  v1i8, v16i8, i32, neon_uimm4_bare,
-  v8i8, v16i8, neon_uimm3_bare>;
-
-multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
-                                  Instruction DUPI, Operand OpImm,
-                                  RegisterClass ResRC> {
-  def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn" # asmlane # "[$Imm]"),
-          (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
-}
-
-// Aliases for Scalar copy - DUP element (scalar)
-// FIXME: This is actually the preferred syntax but TableGen can't deal with
-// custom printing of aliases.
-defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
-defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
-defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
-defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
-
-multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh, ValueType ResTy,
-                      ValueType OpTy> {
-  def : Pat<(ResTy (GetLow VPR128:$Rn)),
-            (ResTy (EXTRACT_SUBREG (OpTy VPR128:$Rn), sub_64))>;
-  def : Pat<(ResTy (GetHigh VPR128:$Rn)),
-            (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>;
-}
-
-defm : NeonI_SDUP<Neon_Low16B, Neon_High16B, v8i8, v16i8>;
-defm : NeonI_SDUP<Neon_Low8H, Neon_High8H, v4i16, v8i16>;
-defm : NeonI_SDUP<Neon_Low4S, Neon_High4S, v2i32, v4i32>;
-defm : NeonI_SDUP<Neon_Low2D, Neon_High2D, v1i64, v2i64>;
-defm : NeonI_SDUP<Neon_Low4float, Neon_High4float, v2f32, v4f32>;
-defm : NeonI_SDUP<Neon_Low2double, Neon_High2double, v1f64, v2f64>;
-
-// The following is for sext/zext from v1xx to v1xx
-multiclass NeonI_ext<string prefix, SDNode ExtOp> {
-  // v1i32 -> v1i64
-  def : Pat<(v1i64 (ExtOp (v1i32 FPR32:$Rn))),
-            (EXTRACT_SUBREG 
-              (v2i64 (!cast<Instruction>(prefix # "_2S")
-                (v2i32 (SUBREG_TO_REG (i64 0), $Rn, sub_32)), 0)),
-              sub_64)>;
-  
-  // v1i16 -> v1i32
-  def : Pat<(v1i32 (ExtOp (v1i16 FPR16:$Rn))),
-            (EXTRACT_SUBREG 
-              (v4i32 (!cast<Instruction>(prefix # "_4H")
-                (v4i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)), 0)),
-              sub_32)>;
-  
-  // v1i8 -> v1i16
-  def : Pat<(v1i16 (ExtOp (v1i8 FPR8:$Rn))),
-            (EXTRACT_SUBREG 
-              (v8i16 (!cast<Instruction>(prefix # "_8B")
-                (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
-              sub_16)>;
-}
-
-defm NeonI_zext : NeonI_ext<"USHLLvvi", zext>;
-defm NeonI_sext : NeonI_ext<"SSHLLvvi", sext>;
-
-// zext v1i8 -> v1i32
-def : Pat<(v1i32 (zext (v1i8 FPR8:$Rn))),
-          (v1i32 (EXTRACT_SUBREG
-            (v1i64 (SUBREG_TO_REG (i64 0),
-              (v1i8 (DUPbv_B
-                (v16i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)),
-                0)),
-              sub_8)),
-            sub_32))>;
-
-// zext v1i8 -> v1i64
-def : Pat<(v1i64 (zext (v1i8 FPR8:$Rn))),
-          (v1i64 (SUBREG_TO_REG (i64 0),
-            (v1i8 (DUPbv_B
-              (v16i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)),
-              0)),
-            sub_8))>;
-
-// zext v1i16 -> v1i64
-def : Pat<(v1i64 (zext (v1i16 FPR16:$Rn))),
-          (v1i64 (SUBREG_TO_REG (i64 0),
-            (v1i16 (DUPhv_H
-              (v8i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)),
-              0)),
-            sub_16))>;
-
-// sext v1i8 -> v1i32
-def : Pat<(v1i32 (sext (v1i8 FPR8:$Rn))),
-          (EXTRACT_SUBREG
-            (v4i32 (SSHLLvvi_4H
-              (v4i16 (SUBREG_TO_REG (i64 0),
-                (v1i16 (EXTRACT_SUBREG 
-                  (v8i16 (SSHLLvvi_8B
-                    (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
-                  sub_16)),
-                sub_16)), 0)),
-            sub_32)>;
-              
-// sext v1i8 -> v1i64
-def : Pat<(v1i64 (sext (v1i8 FPR8:$Rn))),
-          (EXTRACT_SUBREG 
-            (v2i64 (SSHLLvvi_2S
-              (v2i32 (SUBREG_TO_REG (i64 0),
-                (v1i32 (EXTRACT_SUBREG
-                  (v4i32 (SSHLLvvi_4H
-                    (v4i16 (SUBREG_TO_REG (i64 0),
-                      (v1i16 (EXTRACT_SUBREG 
-                        (v8i16 (SSHLLvvi_8B
-                          (v8i8 (SUBREG_TO_REG (i64 0), $Rn, sub_8)), 0)),
-                        sub_16)),
-                      sub_16)), 0)),
-                  sub_32)),
-                sub_32)), 0)),
-            sub_64)>;
-
-  
-// sext v1i16 -> v1i64
-def : Pat<(v1i64 (sext (v1i16 FPR16:$Rn))),
-          (EXTRACT_SUBREG
-            (v2i64 (SSHLLvvi_2S
-              (v2i32 (SUBREG_TO_REG (i64 0),
-                (v1i32 (EXTRACT_SUBREG 
-                  (v4i32 (SSHLLvvi_4H
-                    (v4i16 (SUBREG_TO_REG (i64 0), $Rn, sub_16)), 0)),
-                  sub_32)),
-                sub_32)), 0)),
-            sub_64)>;
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
-
-// 64-bit vector bitcasts...
-
-def : Pat<(v1i64 (bitconvert (v8i8  VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8  VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v8i8  VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v8i8  VPR64:$src))), (v4i16 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v4i16  VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v4i16  VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16  VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v8i8  (bitconvert (v4i16  VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v2i32  VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v2i32  VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2i32  VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8  (bitconvert (v2i32  VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v2f32  VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v2f32  VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32  VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8  (bitconvert (v2f32  VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v2f32 (bitconvert (v1i64  VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (v1f64  VPR64:$src))), (v1i64 VPR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v1f64  VPR64:$src))), (v2f32 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v1f64  VPR64:$src))), (v2i32 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v1f64  VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v1f64  VPR64:$src))), (v8i8 VPR64:$src)>;
-def : Pat<(f64   (bitconvert (v1f64  VPR64:$src))), (f64 VPR64:$src)>;
-
-def : Pat<(v1f64 (bitconvert (v1i64  VPR64:$src))), (v1f64 VPR64:$src)>;
-def : Pat<(v1f64 (bitconvert (v2f32  VPR64:$src))), (v1f64 VPR64:$src)>;
-def : Pat<(v1f64 (bitconvert (v2i32  VPR64:$src))), (v1f64 VPR64:$src)>;
-def : Pat<(v1f64 (bitconvert (v4i16  VPR64:$src))), (v1f64 VPR64:$src)>;
-def : Pat<(v1f64 (bitconvert (v8i8  VPR64:$src))), (v1f64 VPR64:$src)>;
-def : Pat<(v1f64 (bitconvert (f64  VPR64:$src))), (v1f64 VPR64:$src)>;
-
-// ..and 128-bit vector bitcasts...
-
-def : Pat<(v2f64 (bitconvert (v16i8  VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v16i8  VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v16i8  VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v16i8  VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v16i8  VPR128:$src))), (v8i16 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v8i16  VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v8i16  VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v8i16  VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v8i16  VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v8i16  VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v4i32  VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v4i32  VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v4i32  VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v4i32  VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v4i32  VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v4f32  VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v2i64 (bitconvert (v4f32  VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v4f32  VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v4f32  VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v4f32  VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2f64 (bitconvert (v2i64  VPR128:$src))), (v2f64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v2i64  VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v2i64  VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v2i64  VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v2i64  VPR128:$src))), (v16i8 VPR128:$src)>;
-
-def : Pat<(v2i64 (bitconvert (v2f64  VPR128:$src))), (v2i64 VPR128:$src)>;
-def : Pat<(v4f32 (bitconvert (v2f64  VPR128:$src))), (v4f32 VPR128:$src)>;
-def : Pat<(v4i32 (bitconvert (v2f64  VPR128:$src))), (v4i32 VPR128:$src)>;
-def : Pat<(v8i16 (bitconvert (v2f64  VPR128:$src))), (v8i16 VPR128:$src)>;
-def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
-
-// ...and scalar bitcasts...
-def : Pat<(f16 (bitconvert (v1i16  FPR16:$src))), (f16 FPR16:$src)>;
-def : Pat<(f32 (bitconvert (v1i32  FPR32:$src))), (f32 FPR32:$src)>;
-def : Pat<(f64 (bitconvert (v1i64  FPR64:$src))), (f64 FPR64:$src)>;
-def : Pat<(f64 (bitconvert (v1f64  FPR64:$src))), (f64 FPR64:$src)>;
-
-def : Pat<(i64 (bitconvert (v1i64  FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v1f64  FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v2i32  FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v2f32  FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v4i16  FPR64:$src))), (FMOVxd $src)>;
-def : Pat<(i64 (bitconvert (v8i8  FPR64:$src))), (FMOVxd $src)>;
-
-def : Pat<(i32 (bitconvert (v1i32  FPR32:$src))), (FMOVws $src)>;
-
-def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
-
-def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))), (f64 VPR64:$src)>;
-def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))), (f64 VPR64:$src)>;
-
-def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))), (f128 VPR128:$src)>;
-def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))), (f128 VPR128:$src)>;
-
-def : Pat<(v1i16 (bitconvert (f16  FPR16:$src))), (v1i16 FPR16:$src)>;
-def : Pat<(v1i32 (bitconvert (f32  FPR32:$src))), (v1i32 FPR32:$src)>;
-def : Pat<(v1i64 (bitconvert (f64  FPR64:$src))), (v1i64 FPR64:$src)>;
-def : Pat<(v1f64 (bitconvert (f64  FPR64:$src))), (v1f64 FPR64:$src)>;
-
-def : Pat<(v1i64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v1f64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v2i32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v2f32 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v4i16 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
-def : Pat<(v8i8 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
-
-def : Pat<(v1i32 (bitconvert (i32  GPR32:$src))), (FMOVsw $src)>;
-
-def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))), (v8i8 FPR64:$src)>;
-def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
-def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
-def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
-def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
-
-def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))), (v16i8 FPR128:$src)>;
-def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))), (v8i16 FPR128:$src)>;
-def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))), (v4i32 FPR128:$src)>;
-def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))), (v2i64 FPR128:$src)>;
-def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))), (v4f32 FPR128:$src)>;
-def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))), (v2f64 FPR128:$src)>;
-
-// Scalar Three Same
-
-def neon_uimm3 : Operand<i64>,
-                   ImmLeaf<i64, [{return Imm < 8;}]> {
-  let ParserMatchClass = uimm3_asmoperand;
-  let PrintMethod = "printUImmHexOperand";
-}
-
-def neon_uimm4 : Operand<i64>,
-                   ImmLeaf<i64, [{return Imm < 16;}]> {
-  let ParserMatchClass = uimm4_asmoperand;
-  let PrintMethod = "printUImmHexOperand";
-}
-
-// Bitwise Extract
-class NeonI_Extract<bit q, bits<2> op2, string asmop,
-                    string OpS, RegisterOperand OpVPR, Operand OpImm>
-  : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
-                     (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
-                     asmop # "\t$Rd." # OpS # ", $Rn." # OpS #
-                     ", $Rm." # OpS # ", $Index",
-                     [],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>{
-  bits<4> Index;
-}
-
-def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
-                               VPR64, neon_uimm3> {
-  let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
-}
-
-def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
-                               VPR128, neon_uimm4> {
-  let Inst{14-11} = Index;
-}
-
-class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
-                 Operand OpImm>
-  : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
-                                 (i64 OpImm:$Imm))),
-              (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
-
-def : NI_Extract<v8i8,  VPR64,  EXTvvvi_8b,  neon_uimm3>;
-def : NI_Extract<v4i16, VPR64,  EXTvvvi_8b,  neon_uimm3>;
-def : NI_Extract<v2i32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
-def : NI_Extract<v1i64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
-def : NI_Extract<v2f32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
-def : NI_Extract<v1f64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
-def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
-def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
-
-// Table lookup
-class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
-             string asmop, string OpS, RegisterOperand OpVPR,
-             RegisterOperand VecList>
-  : NeonI_TBL<q, op2, len, op,
-              (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
-              asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
-              [],
-              NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-// The vectors in look up table are always 16b
-multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
-  def _8b  : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
-                    !cast<RegisterOperand>(List # "16B_operand")>;
-
-  def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
-                    !cast<RegisterOperand>(List # "16B_operand")>;
-}
-
-defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
-defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
-defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
-defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
-
-// Table lookup extension
-class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
-             string asmop, string OpS, RegisterOperand OpVPR,
-             RegisterOperand VecList>
-  : NeonI_TBL<q, op2, len, op,
-              (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
-              asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
-              [],
-              NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-}
-
-// The vectors in look up table are always 16b
-multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
-  def _8b  : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
-                    !cast<RegisterOperand>(List # "16B_operand")>;
-
-  def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
-                    !cast<RegisterOperand>(List # "16B_operand")>;
-}
-
-defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
-defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
-defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
-defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
-
-class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
-                     RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
-  : NeonI_copy<0b1, 0b0, 0b0011,
-               (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
-               asmop # "\t$Rd." # Res # "[$Imm], $Rn",
-               [(set (ResTy VPR128:$Rd),
-                 (ResTy (vector_insert
-                   (ResTy VPR128:$src),
-                   (OpTy OpGPR:$Rn),
-                   (OpImm:$Imm))))],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  bits<4> Imm;
-  let Constraints = "$src = $Rd";
-}
-
-//Insert element (vector, from main)
-def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
-                           neon_uimm4_bare> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
-                           neon_uimm3_bare> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
-                           neon_uimm2_bare> {
-  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
-                           neon_uimm1_bare> {
-  let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
-                    (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
-                    (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
-                    (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
-                    (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
-
-class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
-                             RegisterClass OpGPR, ValueType OpTy,
-                             Operand OpImm, Instruction INS>
-  : Pat<(ResTy (vector_insert
-              (ResTy VPR64:$src),
-              (OpTy OpGPR:$Rn),
-              (OpImm:$Imm))),
-        (ResTy (EXTRACT_SUBREG
-          (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
-            OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
-
-def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
-                                          neon_uimm3_bare, INSbw>;
-def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
-                                          neon_uimm2_bare, INShw>;
-def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
-                                          neon_uimm1_bare, INSsw>;
-def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
-                                          neon_uimm0_bare, INSdx>;
-
-class NeonI_INS_element<string asmop, string Res, Operand ResImm>
-  : NeonI_insert<0b1, 0b1,
-                 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
-                 ResImm:$Immd, ResImm:$Immn),
-                 asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
-                 [],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  bits<4> Immd;
-  bits<4> Immn;
-}
-
-//Insert element (vector, from element)
-def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
-  let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
-  let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
-}
-def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
-  let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
-  let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
-  // bit 11 is unspecified, but should be set to zero.
-}
-def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
-  let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
-  let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
-  // bits 11-12 are unspecified, but should be set to zero.
-}
-def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
-  let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
-  let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
-  // bits 11-13 are unspecified, but should be set to zero.
-}
-
-def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
-                    (INSELb VPR128:$Rd, VPR128:$Rn,
-                      neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
-def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
-                    (INSELh VPR128:$Rd, VPR128:$Rn,
-                      neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
-def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
-                    (INSELs VPR128:$Rd, VPR128:$Rn,
-                      neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
-def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
-                    (INSELd VPR128:$Rd, VPR128:$Rn,
-                      neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
-
-multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
-                                ValueType MidTy, Operand StImm, Operand NaImm,
-                                Instruction INS> {
-def : Pat<(ResTy (vector_insert
-            (ResTy VPR128:$src),
-            (MidTy (vector_extract
-              (ResTy VPR128:$Rn),
-              (StImm:$Immn))),
-            (StImm:$Immd))),
-          (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
-              StImm:$Immd, StImm:$Immn)>;
-
-def : Pat <(ResTy (vector_insert
-             (ResTy VPR128:$src),
-             (MidTy (vector_extract
-               (NaTy VPR64:$Rn),
-               (NaImm:$Immn))),
-             (StImm:$Immd))),
-           (INS (ResTy VPR128:$src),
-             (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
-             StImm:$Immd, NaImm:$Immn)>;
-
-def : Pat <(NaTy (vector_insert
-             (NaTy VPR64:$src),
-             (MidTy (vector_extract
-               (ResTy VPR128:$Rn),
-               (StImm:$Immn))),
-             (NaImm:$Immd))),
-           (NaTy (EXTRACT_SUBREG
-             (ResTy (INS
-               (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
-               (ResTy VPR128:$Rn),
-               NaImm:$Immd, StImm:$Immn)),
-             sub_64))>;
-
-def : Pat <(NaTy (vector_insert
-             (NaTy VPR64:$src),
-             (MidTy (vector_extract
-               (NaTy VPR64:$Rn),
-               (NaImm:$Immn))),
-             (NaImm:$Immd))),
-           (NaTy (EXTRACT_SUBREG
-             (ResTy (INS
-               (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
-               (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
-               NaImm:$Immd, NaImm:$Immn)),
-             sub_64))>;
-}
-
-defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
-                            neon_uimm1_bare, INSELs>;
-defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
-                            neon_uimm0_bare, INSELd>;
-defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
-                            neon_uimm3_bare, INSELb>;
-defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
-                            neon_uimm2_bare, INSELh>;
-defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
-                            neon_uimm1_bare, INSELs>;
-defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
-                            neon_uimm0_bare, INSELd>;
-
-multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
-                                      ValueType MidTy,
-                                      RegisterClass OpFPR, Operand ResImm,
-                                      SubRegIndex SubIndex, Instruction INS> {
-def : Pat <(ResTy (vector_insert
-             (ResTy VPR128:$src),
-             (MidTy OpFPR:$Rn),
-             (ResImm:$Imm))),
-           (INS (ResTy VPR128:$src),
-             (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
-             ResImm:$Imm,
-             (i64 0))>;
-
-def : Pat <(NaTy (vector_insert
-             (NaTy VPR64:$src),
-             (MidTy OpFPR:$Rn),
-             (ResImm:$Imm))),
-           (NaTy (EXTRACT_SUBREG
-             (ResTy (INS
-               (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
-               (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
-               ResImm:$Imm,
-               (i64 0))),
-             sub_64))>;
-}
-
-defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
-                                  sub_32, INSELs>;
-defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
-                                  sub_64, INSELd>;
-
-class NeonI_SMOV<string asmop, string Res, bit Q,
-                 ValueType OpTy, ValueType eleTy,
-                 Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
-  : NeonI_copy<Q, 0b0, 0b0101,
-               (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
-               asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
-               [(set (ResTy ResGPR:$Rd),
-                 (ResTy (sext_inreg
-                   (ResTy (vector_extract
-                     (OpTy VPR128:$Rn), (OpImm:$Imm))),
-                   eleTy)))],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]> {
-  bits<4> Imm;
-}
-
-//Signed integer move (main, from element)
-def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
-                        GPR32, i32> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
-                        GPR32, i32> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
-                        GPR64, i64> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
-                        GPR64, i64> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
-                        GPR64, i64> {
-  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-
-multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
-                               ValueType eleTy, Operand StImm,  Operand NaImm,
-                               Instruction SMOVI> {
-  def : Pat<(i64 (sext_inreg
-              (i64 (anyext
-                (i32 (vector_extract
-                  (StTy VPR128:$Rn), (StImm:$Imm))))),
-              eleTy)),
-            (SMOVI VPR128:$Rn, StImm:$Imm)>;
-
-  def : Pat<(i64 (sext
-              (i32 (vector_extract
-                (StTy VPR128:$Rn), (StImm:$Imm))))),
-            (SMOVI VPR128:$Rn, StImm:$Imm)>;
-
-  def : Pat<(i64 (sext_inreg
-              (i64 (vector_extract
-                (NaTy VPR64:$Rn), (NaImm:$Imm))),
-              eleTy)),
-            (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              NaImm:$Imm)>;
-
-  def : Pat<(i64 (sext_inreg
-              (i64 (anyext
-                (i32 (vector_extract
-                  (NaTy VPR64:$Rn), (NaImm:$Imm))))),
-              eleTy)),
-            (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              NaImm:$Imm)>;
-
-  def : Pat<(i64 (sext
-              (i32 (vector_extract
-                (NaTy VPR64:$Rn), (NaImm:$Imm))))),
-            (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              NaImm:$Imm)>;
-}
-
-defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
-                          neon_uimm3_bare, SMOVxb>;
-defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
-                          neon_uimm2_bare, SMOVxh>;
-defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
-                          neon_uimm1_bare, SMOVxs>;
-
-class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
-                          ValueType eleTy, Operand StImm,  Operand NaImm,
-                          Instruction SMOVI>
-  : Pat<(i32 (sext_inreg
-          (i32 (vector_extract
-            (NaTy VPR64:$Rn), (NaImm:$Imm))),
-          eleTy)),
-        (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-          NaImm:$Imm)>;
-
-def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
-                         neon_uimm3_bare, SMOVwb>;
-def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
-                         neon_uimm2_bare, SMOVwh>;
-
-class NeonI_UMOV<string asmop, string Res, bit Q,
-                 ValueType OpTy, Operand OpImm,
-                 RegisterClass ResGPR, ValueType ResTy>
-  : NeonI_copy<Q, 0b0, 0b0111,
-               (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
-               asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
-               [(set (ResTy ResGPR:$Rd),
-                  (ResTy (vector_extract
-                    (OpTy VPR128:$Rn), (OpImm:$Imm))))],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]> {
-  bits<4> Imm;
-}
-
-//Unsigned integer move (main, from element)
-def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
-                         GPR32, i32> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
-                         GPR32, i32> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
-                         GPR32, i32> {
-  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
-                         GPR64, i64> {
-  let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
-                    (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
-def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
-                    (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
-
-class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
-                         Operand StImm,  Operand NaImm,
-                         Instruction SMOVI>
-  : Pat<(ResTy (vector_extract
-          (NaTy VPR64:$Rn), NaImm:$Imm)),
-        (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-          NaImm:$Imm)>;
-
-def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
-                        neon_uimm3_bare, UMOVwb>;
-def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
-                        neon_uimm2_bare, UMOVwh>;
-def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
-                        neon_uimm1_bare, UMOVws>;
-
-def : Pat<(i32 (and
-            (i32 (vector_extract
-              (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
-            255)),
-          (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
-
-def : Pat<(i32 (and
-            (i32 (vector_extract
-              (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
-            65535)),
-          (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
-
-def : Pat<(i64 (zext
-            (i32 (vector_extract
-              (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
-          (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
-
-def : Pat<(i32 (and
-            (i32 (vector_extract
-              (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
-            255)),
-          (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
-            neon_uimm3_bare:$Imm)>;
-
-def : Pat<(i32 (and
-            (i32 (vector_extract
-              (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
-            65535)),
-          (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
-            neon_uimm2_bare:$Imm)>;
-
-def : Pat<(i64 (zext
-            (i32 (vector_extract
-              (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
-          (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
-            neon_uimm0_bare:$Imm)>;
-
-// Additional copy patterns for scalar types
-def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
-          (UMOVwb (v16i8
-            (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
-
-def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
-          (UMOVwh (v8i16
-            (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
-
-def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
-          (FMOVws FPR32:$Rn)>;
-
-def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
-          (FMOVxd FPR64:$Rn)>;
-
-def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
-          (f64 FPR64:$Rn)>;
-
-def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
-          (v1i8 (EXTRACT_SUBREG (v16i8
-            (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
-            sub_8))>;
-
-def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
-          (v1i16 (EXTRACT_SUBREG (v8i16
-            (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
-            sub_16))>;
-
-def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
-          (FMOVsw $src)>;
-
-def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
-          (FMOVdx $src)>;
-
-def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
-          (v8i8 (EXTRACT_SUBREG (v16i8
-            (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
-            sub_64))>;
-
-def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
-          (v4i16 (EXTRACT_SUBREG (v8i16
-            (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
-            sub_64))>;
-
-def : Pat<(v2i32 (scalar_to_vector GPR32:$Rn)),
-          (v2i32 (EXTRACT_SUBREG (v16i8
-            (INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))),
-            sub_64))>;
-
-def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
-          (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))>;
-
-def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
-          (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))>;
-
-def : Pat<(v4i32 (scalar_to_vector GPR32:$Rn)),
-          (INSsw (v4i32 (IMPLICIT_DEF)), $Rn, (i64 0))>;
-
-def : Pat<(v2i64 (scalar_to_vector GPR64:$Rn)),
-          (INSdx (v2i64 (IMPLICIT_DEF)), $Rn, (i64 0))>;
-
-def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
-          (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)>;
-def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
-          (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)>;
-
-def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
-          (v1f64 FPR64:$Rn)>;
-
-def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
-          (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
-                         (f64 FPR64:$src), sub_64)>;
-
-class NeonI_DUP_Elt<bit Q, string asmop, string rdlane,  string rnlane,
-                    RegisterOperand ResVPR, Operand OpImm>
-  : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
-               (ins VPR128:$Rn, OpImm:$Imm),
-               asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
-               [],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]> {
-  bits<4> Imm;
-}
-
-def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
-                              neon_uimm4_bare> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-
-def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
-                              neon_uimm3_bare> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-
-def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
-                              neon_uimm2_bare> {
-  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-
-def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
-                              neon_uimm1_bare> {
-  let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
-}
-
-def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
-                              neon_uimm4_bare> {
-  let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
-}
-
-def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
-                              neon_uimm3_bare> {
-  let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
-}
-
-def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
-                              neon_uimm2_bare> {
-  let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
-}
-
-multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
-                                       ValueType OpTy,ValueType NaTy,
-                                       ValueType ExTy, Operand OpLImm,
-                                       Operand OpNImm> {
-def  : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
-        (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
-
-def : Pat<(ResTy (Neon_vduplane
-            (NaTy VPR64:$Rn), OpNImm:$Imm)),
-          (ResTy (DUPELT
-            (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
-}
-defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
-                             neon_uimm4_bare, neon_uimm3_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
-                             neon_uimm4_bare, neon_uimm3_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
-                             neon_uimm3_bare, neon_uimm2_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
-                             neon_uimm3_bare, neon_uimm2_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
-                             neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
-                             neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
-                             neon_uimm1_bare, neon_uimm0_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
-                             neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
-                             neon_uimm2_bare, neon_uimm1_bare>;
-defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
-                             neon_uimm1_bare, neon_uimm0_bare>;
-
-def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
-          (v2f32 (DUPELT2s
-            (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-            (i64 0)))>;
-def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
-          (v4f32 (DUPELT4s
-            (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-            (i64 0)))>;
-def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
-          (v2f64 (DUPELT2d
-            (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
-            (i64 0)))>;
-
-multiclass NeonI_DUP_pattern<Instruction DUPELT, ValueType ResTy,
-                             ValueType OpTy, RegisterClass OpRC,
-                             Operand OpNImm, SubRegIndex SubIndex> {
-def : Pat<(ResTy (Neon_vduplane (OpTy OpRC:$Rn), OpNImm:$Imm)),
-          (ResTy (DUPELT
-            (SUBREG_TO_REG (i64 0), OpRC:$Rn, SubIndex), OpNImm:$Imm))>;
-}
-
-defm : NeonI_DUP_pattern<DUPELT4h, v4i16, v1i16, FPR16, neon_uimm2_bare,sub_16>;
-defm : NeonI_DUP_pattern<DUPELT4s, v4i32, v1i32, FPR32, neon_uimm2_bare,sub_32>;
-defm : NeonI_DUP_pattern<DUPELT8b, v8i8, v1i8, FPR8, neon_uimm3_bare, sub_8>;
-defm : NeonI_DUP_pattern<DUPELT8h, v8i16, v1i16, FPR16, neon_uimm3_bare,sub_16>;
-defm : NeonI_DUP_pattern<DUPELT16b, v16i8, v1i8, FPR8, neon_uimm4_bare, sub_8>;
-
-class NeonI_DUP<bit Q, string asmop, string rdlane,
-                RegisterOperand ResVPR, ValueType ResTy,
-                RegisterClass OpGPR, ValueType OpTy>
-  : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
-               asmop # "\t$Rd" # rdlane # ", $Rn",
-               [(set (ResTy ResVPR:$Rd),
-                 (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
-  let Inst{20-16} = 0b00001;
-  // bits 17-20 are unspecified, but should be set to zero.
-}
-
-def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
-  let Inst{20-16} = 0b00010;
-  // bits 18-20 are unspecified, but should be set to zero.
-}
-
-def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
-  let Inst{20-16} = 0b00100;
-  // bits 19-20 are unspecified, but should be set to zero.
-}
-
-def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
-  let Inst{20-16} = 0b01000;
-  // bit 20 is unspecified, but should be set to zero.
-}
-
-def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
-  let Inst{20-16} = 0b00001;
-  // bits 17-20 are unspecified, but should be set to zero.
-}
-
-def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
-  let Inst{20-16} = 0b00010;
-  // bits 18-20 are unspecified, but should be set to zero.
-}
-
-def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
-  let Inst{20-16} = 0b00100;
-  // bits 19-20 are unspecified, but should be set to zero.
-}
-
-// patterns for CONCAT_VECTORS
-multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
-def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
-          (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
-def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
-          (INSELd
-            (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-            (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
-            (i64 1),
-            (i64 0))>;
-def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
-          (DUPELT2d
-            (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-            (i64 0))> ;
-}
-
-defm : Concat_Vector_Pattern<v16i8, v8i8>;
-defm : Concat_Vector_Pattern<v8i16, v4i16>;
-defm : Concat_Vector_Pattern<v4i32, v2i32>;
-defm : Concat_Vector_Pattern<v2i64, v1i64>;
-defm : Concat_Vector_Pattern<v4f32, v2f32>;
-defm : Concat_Vector_Pattern<v2f64, v1f64>;
-
-def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), undef)),
-          (v2i32 (SUBREG_TO_REG(i64 0), $Rn, sub_32))>;
-def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-          (EXTRACT_SUBREG 
-            (v4i32 (INSELs
-              (v4i32 (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32)),
-              (v4i32 (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
-              (i64 1),
-              (i64 0))),
-            sub_64)>;
-def : Pat<(v2i32 (concat_vectors (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rn))),
-          (DUPELT2s (v4i32 (SUBREG_TO_REG(i64 0), $Rn, sub_32)), 0)>;
-
-//patterns for EXTRACT_SUBVECTOR
-def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
-          (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
-          (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
-          (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
-          (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
-          (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
-          (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
-
-// The followings are for instruction class (3V Elem)
-
-// Variant 1
-
-class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
-             string asmop, string ResS, string OpS, string EleOpS,
-             Operand OpImm, RegisterOperand ResVPR,
-             RegisterOperand OpVPR, RegisterOperand EleOpVPR>
-  : NeonI_2VElem<q, u, size, opcode,
-                 (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
-                                         EleOpVPR:$Re, OpImm:$Index),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
-                 ", $Re." # EleOpS # "[$Index]",
-                 [],
-                 NoItinerary>,
-    Sched<[WriteFPMAC, ReadFPMAC, ReadFPMAC, ReadFPMAC]> {
-  bits<3> Index;
-  bits<5> Re;
-
-  let Constraints = "$src = $Rd";
-}
-
-multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
-  // vector register class for element is always 128-bit to cover the max index
-  def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
-                     neon_uimm2_bare, VPR64, VPR64, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
-                     neon_uimm2_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
-  def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
-                     neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-
-  def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
-                     neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-}
-
-defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
-defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
-
-// Pattern for lane in 128-bit vector
-class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
-                   RegisterOperand ResVPR, RegisterOperand OpVPR,
-                   RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
-                   ValueType EleOpTy>
-  : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
-          (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
-                  RegisterOperand ResVPR, RegisterOperand OpVPR,
-                  RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
-                  ValueType EleOpTy>
-  : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
-          (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST ResVPR:$src, OpVPR:$Rn,
-          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
-{
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
-                     op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32>;
-
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
-                     op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32>;
-
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
-                     op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
-
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
-                     op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
-                    op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32>;
-
-  def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
-                    op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
-}
-
-defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
-defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
-
-class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
-                 string asmop, string ResS, string OpS, string EleOpS,
-                 Operand OpImm, RegisterOperand ResVPR,
-                 RegisterOperand OpVPR, RegisterOperand EleOpVPR>
-  : NeonI_2VElem<q, u, size, opcode,
-                 (outs ResVPR:$Rd), (ins OpVPR:$Rn,
-                                         EleOpVPR:$Re, OpImm:$Index),
-                 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
-                 ", $Re." # EleOpS # "[$Index]",
-                 [],
-                 NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  bits<3> Index;
-  bits<5> Re;
-}
-
-multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
-  // vector register class for element is always 128-bit to cover the max index
-  def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
-                         neon_uimm2_bare, VPR64, VPR64, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
-                         neon_uimm2_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
-  def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
-                         neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-
-  def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
-                         neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-}
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
-defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
-defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
-}
-
-// Pattern for lane in 128-bit vector
-class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
-                       RegisterOperand OpVPR, RegisterOperand EleOpVPR,
-                       ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
-  : Pat<(ResTy (op (OpTy OpVPR:$Rn),
-          (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
-                      RegisterOperand OpVPR, RegisterOperand EleOpVPR,
-                      ValueType ResTy, ValueType OpTy, ValueType EleOpTy>
-  : Pat<(ResTy (op (OpTy OpVPR:$Rn),
-          (OpTy (Neon_vduplane (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST OpVPR:$Rn,
-          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
-                         op, VPR64, VPR128, v2i32, v2i32, v4i32>;
-
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
-                         op, VPR128, VPR128, v4i32, v4i32, v4i32>;
-
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
-                         op, VPR64, VPR128Lo, v4i16, v4i16, v8i16>;
-
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
-                         op, VPR128, VPR128Lo, v8i16, v8i16, v8i16>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
-                        op, VPR64, VPR64, v2i32, v2i32, v2i32>;
-
-  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
-                        op, VPR64, VPR64Lo, v4i16, v4i16, v4i16>;
-}
-
-defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
-defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
-defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
-
-// Variant 2
-
-multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
-  // vector register class for element is always 128-bit to cover the max index
-  def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
-                         neon_uimm2_bare, VPR64, VPR64, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
-                         neon_uimm2_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  // _1d2d doesn't exist!
-
-  def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
-                         neon_uimm1_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{0}};
-    let Inst{21} = 0b0;
-    let Inst{20-16} = Re;
-  }
-}
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
-defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
-}
-
-class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
-                         RegisterOperand OpVPR, RegisterOperand EleOpVPR,
-                         ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
-                         SDPatternOperator coreop>
-  : Pat<(ResTy (op (OpTy OpVPR:$Rn),
-          (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
-        (INST OpVPR:$Rn,
-          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
-
-multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
-                         op, VPR64, VPR128, v2f32, v2f32, v4f32>;
-
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
-                         op, VPR128, VPR128, v4f32, v4f32, v4f32>;
-
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
-                         op, VPR128, VPR128, v2f64, v2f64, v2f64>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
-                        op, VPR64, VPR64, v2f32, v2f32, v2f32>;
-
-  def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
-                           op, VPR128, VPR64, v2f64, v2f64, v1f64,
-                           BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
-}
-
-defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
-defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
-
-def : Pat<(v2f32 (fmul (v2f32 (Neon_vdup (f32 FPR32:$Re))),
-                       (v2f32 VPR64:$Rn))),
-          (FMULve_2s4s VPR64:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-def : Pat<(v4f32 (fmul (v4f32 (Neon_vdup (f32 FPR32:$Re))),
-                       (v4f32 VPR128:$Rn))),
-          (FMULve_4s4s VPR128:$Rn, (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-def : Pat<(v2f64 (fmul (v2f64 (Neon_vdup (f64 FPR64:$Re))),
-                       (v2f64 VPR128:$Rn))),
-          (FMULve_2d2d VPR128:$Rn, (SUBREG_TO_REG (i64 0), $Re, sub_64), 0)>;
-
-// The followings are patterns using fma
-// -ffp-contract=fast generates fma
-
-multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
-  // vector register class for element is always 128-bit to cover the max index
-  def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
-                     neon_uimm2_bare, VPR64, VPR64, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
-                     neon_uimm2_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  // _1d2d doesn't exist!
-
-  def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
-                     neon_uimm1_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{0}};
-    let Inst{21} = 0b0;
-    let Inst{20-16} = Re;
-  }
-}
-
-defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
-defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
-
-// Pattern for lane in 128-bit vector
-class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
-                       RegisterOperand ResVPR, RegisterOperand OpVPR,
-                       ValueType ResTy, ValueType OpTy,
-                       SDPatternOperator coreop>
-  : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
-                   (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
-        (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane 0
-class NI_2VEfma_lane0<Instruction INST, SDPatternOperator op,
-                      RegisterOperand ResVPR, ValueType ResTy>
-  : Pat<(ResTy (op (ResTy ResVPR:$Rn),
-                   (ResTy (Neon_vdup (f32 FPR32:$Re))),
-                   (ResTy ResVPR:$src))),
-        (INST ResVPR:$src, ResVPR:$Rn,
-              (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
-                      RegisterOperand ResVPR, RegisterOperand OpVPR,
-                      ValueType ResTy, ValueType OpTy,
-                      SDPatternOperator coreop>
-  : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
-                   (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
-        (INST ResVPR:$src, ResVPR:$Rn,
-          (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
-                           SDPatternOperator op,
-                           RegisterOperand ResVPR, RegisterOperand OpVPR,
-                           ValueType ResTy, ValueType OpTy,
-                           SDPatternOperator coreop>
-  : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
-                   (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
-        (INST ResVPR:$src, ResVPR:$Rn,
-          (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
-
-
-multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
-                         neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
-                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
-  def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_2s4s"),
-                        op, VPR64, v2f32>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
-                         neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
-                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
-  def : NI_2VEfma_lane0<!cast<Instruction>(subop # "_4s4s"),
-                        op, VPR128, v4f32>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
-                         neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
-                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
-                        neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
-                        BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
-
-  def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
-                             neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
-                             BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
-}
-
-defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
-
-// Pattern for lane 0
-class NI_2VEfms_lane0<Instruction INST, SDPatternOperator op,
-                      RegisterOperand ResVPR, ValueType ResTy>
-  : Pat<(ResTy (op (ResTy (fneg ResVPR:$Rn)),
-                   (ResTy (Neon_vdup (f32 FPR32:$Re))),
-                   (ResTy ResVPR:$src))),
-        (INST ResVPR:$src, ResVPR:$Rn,
-              (SUBREG_TO_REG (i32 0), $Re, sub_32), 0)>;
-
-multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
-{
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
-                         neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
-                         BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
-                         neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
-                         BinOpFrag<(Neon_vduplane
-                                     (fneg node:$LHS), node:$RHS)>>;
-
-  def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_2s4s"),
-                        op, VPR64, v2f32>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
-                         neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
-                         BinOpFrag<(fneg (Neon_vduplane
-                                     node:$LHS, node:$RHS))>>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
-                         neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
-                         BinOpFrag<(Neon_vduplane
-                                     (fneg node:$LHS), node:$RHS)>>;
-
-  def : NI_2VEfms_lane0<!cast<Instruction>(subop # "_4s4s"),
-                        op, VPR128, v4f32>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
-                         neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
-                         BinOpFrag<(fneg (Neon_vduplane
-                                     node:$LHS, node:$RHS))>>;
-
-  def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
-                         neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
-                         BinOpFrag<(Neon_vduplane
-                                     (fneg node:$LHS), node:$RHS)>>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
-                        neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
-                        BinOpFrag<(fneg (Neon_vduplane
-                                    node:$LHS, node:$RHS))>>;
-
-  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
-                        neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
-                        BinOpFrag<(Neon_vduplane
-                                    (fneg node:$LHS), node:$RHS)>>;
-
-  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
-                        neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
-                        BinOpFrag<(fneg (Neon_vduplane node:$LHS, node:$RHS))>>;
-
-  def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
-                        neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
-                        BinOpFrag<(Neon_vduplane (fneg node:$LHS), node:$RHS)>>;
-
-  def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
-                             neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
-                             BinOpFrag<(fneg (Neon_combine_2d
-                                         node:$LHS, node:$RHS))>>;
-
-  def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
-                             neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
-                             BinOpFrag<(Neon_combine_2d
-                                         (fneg node:$LHS), (fneg node:$RHS))>>;
-}
-
-defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
-
-// Variant 3: Long type
-// E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
-//      SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
-
-multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
-  // vector register class for element is always 128-bit to cover the max index
-  def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
-                     neon_uimm2_bare, VPR128, VPR64, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
-                     neon_uimm2_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
-  def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
-                     neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-
-  def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
-                     neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-}
-
-defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
-defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
-defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
-defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
-defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
-defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
-
-multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
-  // vector register class for element is always 128-bit to cover the max index
-  def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
-                         neon_uimm2_bare, VPR128, VPR64, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
-                         neon_uimm2_bare, VPR128, VPR128, VPR128> {
-    let Inst{11} = {Index{1}};
-    let Inst{21} = {Index{0}};
-    let Inst{20-16} = Re;
-  }
-
-  // Index operations on 16-bit(H) elements are restricted to using v0-v15.
-  def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
-                         neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-
-  def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
-                         neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
-    let Inst{11} = {Index{2}};
-    let Inst{21} = {Index{1}};
-    let Inst{20} = {Index{0}};
-    let Inst{19-16} = Re{3-0};
-  }
-}
-
-let SchedRW = [WriteFPMul, ReadFPMul, ReadFPMul] in {
-defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
-defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
-defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
-}
-
-def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
-          (FMOVdd $src)>;
-
-// Pattern for lane in 128-bit vector
-class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
-                     RegisterOperand EleOpVPR, ValueType ResTy,
-                     ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
-                     SDPatternOperator hiop>
-  : Pat<(ResTy (op (ResTy VPR128:$src),
-          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
-          (HalfOpTy (Neon_vduplane
-                      (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
-                    RegisterOperand EleOpVPR, ValueType ResTy,
-                    ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
-                    SDPatternOperator hiop>
-  : Pat<(ResTy (op (ResTy VPR128:$src),
-          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
-          (HalfOpTy (Neon_vduplane
-                      (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST VPR128:$src, VPR128:$Rn,
-          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-class NI_2VEL2_lane0<Instruction INST, SDPatternOperator op,
-                     ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
-                     SDPatternOperator hiop, Instruction DupInst>
-  : Pat<(ResTy (op (ResTy VPR128:$src),
-          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
-          (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
-        (INST VPR128:$src, VPR128:$Rn, (DupInst $Re), 0)>;
-
-multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
-                     op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
-
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
-                     op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32>;
-
-  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
-                       op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
-
-  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
-                       op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
-
-  def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
-                       op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
-
-  def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
-                       op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
-                    op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
-
-  def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
-                    op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32>;
-
-  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
-                      op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
-
-  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
-                      op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
-}
-
-defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
-defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
-defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
-defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
-
-// Pattern for lane in 128-bit vector
-class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
-                         RegisterOperand EleOpVPR, ValueType ResTy,
-                         ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
-                         SDPatternOperator hiop>
-  : Pat<(ResTy (op
-          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
-          (HalfOpTy (Neon_vduplane
-                      (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
-
-// Pattern for lane in 64-bit vector
-class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
-                        RegisterOperand EleOpVPR, ValueType ResTy,
-                        ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
-                        SDPatternOperator hiop>
-  : Pat<(ResTy (op
-          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
-          (HalfOpTy (Neon_vduplane
-                      (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
-        (INST VPR128:$Rn,
-          (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
-
-// Pattern for fixed lane 0
-class NI_2VEL2_mul_lane0<Instruction INST, SDPatternOperator op,
-                         ValueType ResTy, ValueType OpTy, ValueType HalfOpTy,
-                         SDPatternOperator hiop, Instruction DupInst>
-  : Pat<(ResTy (op
-          (HalfOpTy (hiop (OpTy VPR128:$Rn))),
-          (HalfOpTy (Neon_vdup (i32 GPR32:$Re))))),
-        (INST VPR128:$Rn, (DupInst $Re), 0)>;
-
-multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
-                         op, VPR64, VPR128Lo, v4i32, v4i16, v8i16>;
-
-  def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
-                         op, VPR64, VPR128, v2i64, v2i32, v4i32>;
-
-  def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
-                         op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
-
-  def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
-                           op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
-
-  def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_4s8h"),
-                           op, v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
-
-  def : NI_2VEL2_mul_lane0<!cast<Instruction>(subop # "_2d4s"),
-                           op, v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
-                        op, VPR64, VPR64Lo, v4i32, v4i16, v4i16>;
-
-  def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
-                        op, VPR64, VPR64, v2i64, v2i32, v2i32>;
-
-  def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
-                          op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
-
-  def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
-                          op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
-}
-
-defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
-defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
-defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
-
-multiclass NI_qdma<SDPatternOperator op> {
-  def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                    (op node:$Ra,
-                      (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
-
-  def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
-                    (op node:$Ra,
-                      (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
-}
-
-defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
-defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
-
-multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
-                     !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
-                     v4i32, v4i16, v8i16>;
-
-  def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
-                     !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
-                     v2i64, v2i32, v4i32>;
-
-  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
-                       !cast<PatFrag>(op # "_4s"), VPR128Lo,
-                       v4i32, v8i16, v8i16, v4i16, Neon_High8H>;
-
-  def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
-                       !cast<PatFrag>(op # "_2d"), VPR128,
-                       v2i64, v4i32, v4i32, v2i32, Neon_High4S>;
-
-  def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_4s8h"),
-                       !cast<PatFrag>(op # "_4s"),
-                       v4i32, v8i16, v4i16, Neon_High8H, DUP8h>;
-
-  def : NI_2VEL2_lane0<!cast<Instruction>(subop # "_2d4s"),
-                       !cast<PatFrag>(op # "_2d"),
-                       v2i64, v4i32, v2i32, Neon_High4S, DUP4s>;
-
-  // Index can only be half of the max value for lane in 64-bit vector
-
-  def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
-                    !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
-                    v4i32, v4i16, v4i16>;
-
-  def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
-                    !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
-                    v2i64, v2i32, v2i32>;
-
-  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
-                      !cast<PatFrag>(op # "_4s"), VPR64Lo,
-                      v4i32, v8i16, v4i16, v4i16, Neon_High8H>;
-
-  def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
-                      !cast<PatFrag>(op # "_2d"), VPR64,
-                      v2i64, v4i32, v2i32, v2i32, Neon_High4S>;
-}
-
-defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
-defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
-
-// End of implementation for instruction class (3V Elem)
-
-class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
-                bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
-                SDPatternOperator Neon_Rev>
-  : NeonI_2VMisc<Q, U, size, opcode,
-               (outs ResVPR:$Rd), (ins ResVPR:$Rn),
-               asmop # "\t$Rd." # Res # ", $Rn." # Res,
-               [(set (ResTy ResVPR:$Rd),
-                  (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
-               NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
-                          v16i8, Neon_rev64>;
-def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
-                         v8i16, Neon_rev64>;
-def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
-                         v4i32, Neon_rev64>;
-def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
-                         v8i8, Neon_rev64>;
-def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
-                         v4i16, Neon_rev64>;
-def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
-                         v2i32, Neon_rev64>;
-
-def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
-def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
-
-def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
-                          v16i8, Neon_rev32>;
-def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
-                          v8i16, Neon_rev32>;
-def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
-                         v8i8, Neon_rev32>;
-def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
-                         v4i16, Neon_rev32>;
-
-def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
-                          v16i8, Neon_rev16>;
-def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
-                         v8i8, Neon_rev16>;
-
-multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
-                             SDPatternOperator Neon_Padd> {
-  def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                           (outs VPR128:$Rd), (ins VPR128:$Rn),
-                           asmop # "\t$Rd.8h, $Rn.16b",
-                           [(set (v8i16 VPR128:$Rd),
-                              (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
-                           NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                          (outs VPR64:$Rd), (ins VPR64:$Rn),
-                          asmop # "\t$Rd.4h, $Rn.8b",
-                          [(set (v4i16 VPR64:$Rd),
-                             (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
-                          NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                           (outs VPR128:$Rd), (ins VPR128:$Rn),
-                           asmop # "\t$Rd.4s, $Rn.8h",
-                           [(set (v4i32 VPR128:$Rd),
-                              (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
-                           NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                          (outs VPR64:$Rd), (ins VPR64:$Rn),
-                          asmop # "\t$Rd.2s, $Rn.4h",
-                          [(set (v2i32 VPR64:$Rd),
-                             (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
-                          NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                           (outs VPR128:$Rd), (ins VPR128:$Rn),
-                           asmop # "\t$Rd.2d, $Rn.4s",
-                           [(set (v2i64 VPR128:$Rd),
-                              (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
-                           NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                          (outs VPR64:$Rd), (ins VPR64:$Rn),
-                          asmop # "\t$Rd.1d, $Rn.2s",
-                          [(set (v1i64 VPR64:$Rd),
-                             (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
-                          NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
-                                int_arm_neon_vpaddls>;
-defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
-                                int_arm_neon_vpaddlu>;
-
-def : Pat<(v1i64 (int_aarch64_neon_saddlv (v2i32 VPR64:$Rn))),
-          (SADDLP2s1d $Rn)>;
-def : Pat<(v1i64 (int_aarch64_neon_uaddlv (v2i32 VPR64:$Rn))),
-          (UADDLP2s1d $Rn)>;
-
-multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
-                             SDPatternOperator Neon_Padd> {
-  let Constraints = "$src = $Rd" in {
-    def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                             asmop # "\t$Rd.8h, $Rn.16b",
-                             [(set (v8i16 VPR128:$Rd),
-                                (v8i16 (Neon_Padd
-                                  (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
-                             NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                            (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
-                            asmop # "\t$Rd.4h, $Rn.8b",
-                            [(set (v4i16 VPR64:$Rd),
-                               (v4i16 (Neon_Padd
-                                 (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
-                            NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                            asmop # "\t$Rd.4s, $Rn.8h",
-                            [(set (v4i32 VPR128:$Rd),
-                               (v4i32 (Neon_Padd
-                                 (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
-                            NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                            (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
-                            asmop # "\t$Rd.2s, $Rn.4h",
-                            [(set (v2i32 VPR64:$Rd),
-                               (v2i32 (Neon_Padd
-                                 (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
-                            NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                            asmop # "\t$Rd.2d, $Rn.4s",
-                            [(set (v2i64 VPR128:$Rd),
-                               (v2i64 (Neon_Padd
-                                 (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
-                            NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                            (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
-                            asmop # "\t$Rd.1d, $Rn.2s",
-                            [(set (v1i64 VPR64:$Rd),
-                               (v1i64 (Neon_Padd
-                                 (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
-                            NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
-                                   int_arm_neon_vpadals>;
-defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
-                                   int_arm_neon_vpadalu>;
-
-multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
-  def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                         (outs VPR128:$Rd), (ins VPR128:$Rn),
-                         asmop # "\t$Rd.16b, $Rn.16b",
-                         [], NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.8h, $Rn.8h",
-                        [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.4s, $Rn.4s",
-                        [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.2d, $Rn.2d",
-                        [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                         (outs VPR64:$Rd), (ins VPR64:$Rn),
-                         asmop # "\t$Rd.8b, $Rn.8b",
-                         [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.4h, $Rn.4h",
-                        [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.2s, $Rn.2s",
-                        [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
-defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
-defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
-defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
-
-multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
-                                          SDPatternOperator Neon_Op> {
-  def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
-            (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
-
-  def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
-            (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
-
-  def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
-            (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
-
-  def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
-            (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
-
-  def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
-            (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
-
-  def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
-            (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
-
-  def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
-            (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
-}
-
-defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
-defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
-defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
-
-def : Pat<(v16i8 (sub
-            (v16i8 Neon_AllZero),
-            (v16i8 VPR128:$Rn))),
-          (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (sub
-            (v8i8 Neon_AllZero),
-            (v8i8 VPR64:$Rn))),
-          (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
-def : Pat<(v8i16 (sub
-            (v8i16 (bitconvert (v16i8 Neon_AllZero))),
-            (v8i16 VPR128:$Rn))),
-          (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
-def : Pat<(v4i16 (sub
-            (v4i16 (bitconvert (v8i8 Neon_AllZero))),
-            (v4i16 VPR64:$Rn))),
-          (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
-def : Pat<(v4i32 (sub
-            (v4i32 (bitconvert (v16i8 Neon_AllZero))),
-            (v4i32 VPR128:$Rn))),
-          (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
-def : Pat<(v2i32 (sub
-            (v2i32 (bitconvert (v8i8 Neon_AllZero))),
-            (v2i32 VPR64:$Rn))),
-          (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
-def : Pat<(v2i64 (sub
-            (v2i64 (bitconvert (v16i8 Neon_AllZero))),
-            (v2i64 VPR128:$Rn))),
-          (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
-
-multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
-  let Constraints = "$src = $Rd" in {
-    def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                           asmop # "\t$Rd.16b, $Rn.16b",
-                           [], NoItinerary>,
-              Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                          (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                          asmop # "\t$Rd.8h, $Rn.8h",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                          (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                          asmop # "\t$Rd.4s, $Rn.4s",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
-                          (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                          asmop # "\t$Rd.2d, $Rn.2d",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                          (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
-                          asmop # "\t$Rd.8b, $Rn.8b",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                          (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
-                          asmop # "\t$Rd.4h, $Rn.4h",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                          (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
-                          asmop # "\t$Rd.2s, $Rn.2s",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
-defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
-
-multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
-                                           SDPatternOperator Neon_Op> {
-  def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
-            (v16i8 (!cast<Instruction>(Prefix # 16b)
-              (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
-
-  def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
-            (v8i16 (!cast<Instruction>(Prefix # 8h)
-              (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
-
-  def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
-            (v4i32 (!cast<Instruction>(Prefix # 4s)
-              (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
-
-  def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
-            (v2i64 (!cast<Instruction>(Prefix # 2d)
-              (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
-
-  def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
-            (v8i8 (!cast<Instruction>(Prefix # 8b)
-              (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
-
-  def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
-            (v4i16 (!cast<Instruction>(Prefix # 4h)
-              (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
-
-  def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
-            (v2i32 (!cast<Instruction>(Prefix # 2s)
-              (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
-}
-
-defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
-defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
-
-multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
-                          SDPatternOperator Neon_Op> {
-  def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
-                         (outs VPR128:$Rd), (ins VPR128:$Rn),
-                         asmop # "\t$Rd.16b, $Rn.16b",
-                         [(set (v16i8 VPR128:$Rd),
-                            (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
-                         NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.8h, $Rn.8h",
-                        [(set (v8i16 VPR128:$Rd),
-                           (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.4s, $Rn.4s",
-                        [(set (v4i32 VPR128:$Rd),
-                           (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.8b, $Rn.8b",
-                        [(set (v8i8 VPR64:$Rd),
-                           (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.4h, $Rn.4h",
-                        [(set (v4i16 VPR64:$Rd),
-                           (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.2s, $Rn.2s",
-                        [(set (v2i32 VPR64:$Rd),
-                           (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
-defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
-
-multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
-                              bits<5> Opcode> {
-  def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
-                         (outs VPR128:$Rd), (ins VPR128:$Rn),
-                         asmop # "\t$Rd.16b, $Rn.16b",
-                         [], NoItinerary>,
-            Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.8b, $Rn.8b",
-                        [], NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
-defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
-defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
-
-def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
-                    (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
-def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
-                    (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
-
-def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
-          (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
-          (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
-
-def : Pat<(v16i8 (xor
-            (v16i8 VPR128:$Rn),
-            (v16i8 Neon_AllOne))),
-          (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (xor
-            (v8i8 VPR64:$Rn),
-            (v8i8 Neon_AllOne))),
-          (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
-def : Pat<(v8i16 (xor
-            (v8i16 VPR128:$Rn),
-            (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
-          (NOT16b VPR128:$Rn)>;
-def : Pat<(v4i16 (xor
-            (v4i16 VPR64:$Rn),
-            (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
-          (NOT8b VPR64:$Rn)>;
-def : Pat<(v4i32 (xor
-            (v4i32 VPR128:$Rn),
-            (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
-          (NOT16b VPR128:$Rn)>;
-def : Pat<(v2i32 (xor
-            (v2i32 VPR64:$Rn),
-            (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
-          (NOT8b VPR64:$Rn)>;
-def : Pat<(v2i64 (xor
-            (v2i64 VPR128:$Rn),
-            (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
-          (NOT16b VPR128:$Rn)>;
-
-def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
-          (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
-def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
-          (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
-
-multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
-                                SDPatternOperator Neon_Op> {
-  def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.4s, $Rn.4s",
-                        [(set (v4f32 VPR128:$Rd),
-                           (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.2d, $Rn.2d",
-                        [(set (v2f64 VPR128:$Rd),
-                           (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.2s, $Rn.2s",
-                        [(set (v2f32 VPR64:$Rd),
-                           (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
-defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
-
-multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
-  def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                          (outs VPR64:$Rd), (ins VPR128:$Rn),
-                          asmop # "\t$Rd.8b, $Rn.8h",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                          (outs VPR64:$Rd), (ins VPR128:$Rn),
-                          asmop # "\t$Rd.4h, $Rn.4s",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                          (outs VPR64:$Rd), (ins VPR128:$Rn),
-                          asmop # "\t$Rd.2s, $Rn.2d",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  let Constraints = "$Rd = $src" in {
-    def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                             asmop # "2\t$Rd.16b, $Rn.8h",
-                             [], NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                            asmop # "2\t$Rd.8h, $Rn.4s",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                            asmop # "2\t$Rd.4s, $Rn.2d",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
-defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
-defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
-defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
-
-multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix,
-                                        SDPatternOperator Neon_Op> {
-  def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
-            (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
-
-  def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
-            (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
-
-  def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
-            (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
-
-  def : Pat<(v16i8 (concat_vectors
-              (v8i8 VPR64:$src),
-              (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
-            (!cast<Instruction>(Prefix # 8h16b)
-              (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
-              VPR128:$Rn)>;
-
-  def : Pat<(v8i16 (concat_vectors
-              (v4i16 VPR64:$src),
-              (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
-            (!cast<Instruction>(Prefix # 4s8h)
-              (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
-              VPR128:$Rn)>;
-
-  def : Pat<(v4i32 (concat_vectors
-              (v2i32 VPR64:$src),
-              (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
-            (!cast<Instruction>(Prefix # 2d4s)
-              (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
-              VPR128:$Rn)>;
-}
-
-defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
-defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
-defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
-defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
-
-multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
-  let DecoderMethod = "DecodeSHLLInstruction" in {
-    def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                            (outs VPR128:$Rd),
-                            (ins VPR64:$Rn, uimm_exact8:$Imm),
-                            asmop # "\t$Rd.8h, $Rn.8b, $Imm",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]>;
-
-    def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                            (outs VPR128:$Rd),
-                            (ins VPR64:$Rn, uimm_exact16:$Imm),
-                            asmop # "\t$Rd.4s, $Rn.4h, $Imm",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]>;
-
-    def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
-                            (outs VPR128:$Rd),
-                            (ins VPR64:$Rn, uimm_exact32:$Imm),
-                            asmop # "\t$Rd.2d, $Rn.2s, $Imm",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]>;
-
-    def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                            (outs VPR128:$Rd),
-                            (ins VPR128:$Rn, uimm_exact8:$Imm),
-                            asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
-                            [], NoItinerary>,
-                Sched<[WriteFPALU, ReadFPALU]>;
-
-    def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                            (outs VPR128:$Rd),
-                            (ins VPR128:$Rn, uimm_exact16:$Imm),
-                            asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]>;
-
-    def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
-                            (outs VPR128:$Rd),
-                            (ins VPR128:$Rn, uimm_exact32:$Imm),
-                            asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU]>;
-  }
-}
-
-defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
-
-class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
-                          SDPatternOperator ExtOp, Operand Neon_Imm,
-                          string suffix>
-  : Pat<(DesTy (shl
-          (DesTy (ExtOp (OpTy VPR64:$Rn))),
-            (DesTy (Neon_vdup
-              (i32 Neon_Imm:$Imm))))),
-        (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
-
-class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
-                               SDPatternOperator ExtOp, Operand Neon_Imm,
-                               string suffix, PatFrag GetHigh>
-  : Pat<(DesTy (shl
-          (DesTy (ExtOp
-            (OpTy (GetHigh VPR128:$Rn)))),
-              (DesTy (Neon_vdup
-                (i32 Neon_Imm:$Imm))))),
-        (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
-
-def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
-def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
-def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
-def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
-def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
-def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
-def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
-                               Neon_High16B>;
-def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
-                               Neon_High16B>;
-def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
-                               Neon_High8H>;
-def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
-                               Neon_High8H>;
-def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
-                               Neon_High4S>;
-def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
-                               Neon_High4S>;
-
-multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
-  def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                          (outs VPR64:$Rd), (ins VPR128:$Rn),
-                          asmop # "\t$Rd.4h, $Rn.4s",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                          (outs VPR64:$Rd), (ins VPR128:$Rn),
-                          asmop # "\t$Rd.2s, $Rn.2d",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  let Constraints = "$src = $Rd" in {
-    def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                            asmop # "2\t$Rd.8h, $Rn.4s",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-
-    def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                            asmop # "2\t$Rd.4s, $Rn.2d",
-                            [], NoItinerary>,
-               Sched<[WriteFPALU, ReadFPALU, ReadFPALU]>;
-  }
-}
-
-defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
-
-multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
-                                       SDPatternOperator f32_to_f16_Op,
-                                       SDPatternOperator f64_to_f32_Op> {
-
-  def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
-              (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
-
-  def : Pat<(v8i16 (concat_vectors
-                (v4i16 VPR64:$src),
-                (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
-                  (!cast<Instruction>(prefix # "4s8h")
-                    (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
-                    (v4f32 VPR128:$Rn))>;
-
-  def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
-            (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
-
-  def : Pat<(v4f32 (concat_vectors
-              (v2f32 VPR64:$src),
-              (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
-                (!cast<Instruction>(prefix # "2d4s")
-                  (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
-                  (v2f64 VPR128:$Rn))>;
-}
-
-defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
-
-multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
-                                 bits<5> opcode> {
-  def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                          (outs VPR64:$Rd), (ins VPR128:$Rn),
-                          asmop # "\t$Rd.2s, $Rn.2d",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                          (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                          asmop # "2\t$Rd.4s, $Rn.2d",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-    let Constraints = "$src = $Rd";
-  }
-
-  def : Pat<(v2f32 (int_aarch64_neon_vcvtxn (v2f64 VPR128:$Rn))),
-            (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
-
-  def : Pat<(v4f32 (concat_vectors
-              (v2f32 VPR64:$src),
-              (v2f32 (int_aarch64_neon_vcvtxn (v2f64 VPR128:$Rn))))),
-            (!cast<Instruction>(prefix # "2d4s")
-               (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
-               VPR128:$Rn)>;
-}
-
-defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
-
-def Neon_High4Float : PatFrag<(ops node:$in),
-                              (extract_subvector (v4f32 node:$in), (iPTR 2))>;
-
-multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
-  def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
-                          (outs VPR128:$Rd), (ins VPR64:$Rn),
-                          asmop # "\t$Rd.4s, $Rn.4h",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
-                          (outs VPR128:$Rd), (ins VPR64:$Rn),
-                          asmop # "\t$Rd.2d, $Rn.2s",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
-                          (outs VPR128:$Rd), (ins VPR128:$Rn),
-                          asmop # "2\t$Rd.4s, $Rn.8h",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
-                          (outs VPR128:$Rd), (ins VPR128:$Rn),
-                          asmop # "2\t$Rd.2d, $Rn.4s",
-                          [], NoItinerary>,
-             Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
-
-multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
-  def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
-            (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
-
-  def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
-              (v4i16 (Neon_High8H
-                (v8i16 VPR128:$Rn))))),
-            (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
-
-  def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
-            (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
-
-  def : Pat<(v2f64 (fextend
-              (v2f32 (Neon_High4Float
-                (v4f32 VPR128:$Rn))))),
-            (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
-}
-
-defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
-
-multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
-                                ValueType ResTy4s, ValueType OpTy4s,
-                                ValueType ResTy2d, ValueType OpTy2d,
-                                ValueType ResTy2s, ValueType OpTy2s,
-                                SDPatternOperator Neon_Op> {
-
-  def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.4s, $Rn.4s",
-                        [(set (ResTy4s VPR128:$Rd),
-                           (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.2d, $Rn.2d",
-                        [(set (ResTy2d VPR128:$Rd),
-                           (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.2s, $Rn.2s",
-                        [(set (ResTy2s VPR64:$Rd),
-                           (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
-                                  bits<5> opcode, SDPatternOperator Neon_Op> {
-  defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
-                                v2f64, v2i32, v2f32, Neon_Op>;
-}
-
-defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
-                                     int_arm_neon_vcvtns>;
-defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
-                                     int_arm_neon_vcvtnu>;
-defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
-                                     int_arm_neon_vcvtps>;
-defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
-                                     int_arm_neon_vcvtpu>;
-defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
-                                     int_arm_neon_vcvtms>;
-defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
-                                     int_arm_neon_vcvtmu>;
-defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
-defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
-defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
-                                     int_arm_neon_vcvtas>;
-defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
-                                     int_arm_neon_vcvtau>;
-
-multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
-                                  bits<5> opcode, SDPatternOperator Neon_Op> {
-  defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
-                                v2i64, v2f32, v2i32, Neon_Op>;
-}
-
-defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
-defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
-
-multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
-                                 bits<5> opcode, SDPatternOperator Neon_Op> {
-  defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
-                                v2f64, v2f32, v2f32, Neon_Op>;
-}
-
-defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
-                                     int_aarch64_neon_frintn>;
-defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
-defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
-defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
-defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
-defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
-defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
-defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
-                                    int_arm_neon_vrecpe>;
-defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
-                                     int_arm_neon_vrsqrte>;
-let SchedRW = [WriteFPSqrt, ReadFPSqrt] in {
-defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111, fsqrt>;
-}
-
-multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
-                               bits<5> opcode, SDPatternOperator Neon_Op> {
-  def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
-                        (outs VPR128:$Rd), (ins VPR128:$Rn),
-                        asmop # "\t$Rd.4s, $Rn.4s",
-                        [(set (v4i32 VPR128:$Rd),
-                           (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-
-  def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
-                        (outs VPR64:$Rd), (ins VPR64:$Rn),
-                        asmop # "\t$Rd.2s, $Rn.2s",
-                        [(set (v2i32 VPR64:$Rd),
-                           (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
-                        NoItinerary>,
-           Sched<[WriteFPALU, ReadFPALU]>;
-}
-
-defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
-                                  int_arm_neon_vrecpe>;
-defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
-                                   int_arm_neon_vrsqrte>;
-
-// Crypto Class
-class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
-                         string asmop, SDPatternOperator opnode>
-  : NeonI_Crypto_AES<size, opcode,
-                     (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                     asmop # "\t$Rd.16b, $Rn.16b",
-                     [(set (v16i8 VPR128:$Rd),
-                        (v16i8 (opnode (v16i8 VPR128:$src),
-                                       (v16i8 VPR128:$Rn))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  let Predicates = [HasNEON, HasCrypto];
-}
-
-def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
-def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
-
-class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
-                      string asmop, SDPatternOperator opnode>
-  : NeonI_Crypto_AES<size, opcode,
-                     (outs VPR128:$Rd), (ins VPR128:$Rn),
-                     asmop # "\t$Rd.16b, $Rn.16b",
-                     [(set (v16i8 VPR128:$Rd),
-                        (v16i8 (opnode (v16i8 VPR128:$Rn))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]>;
-
-def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
-def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
-
-class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
-                         string asmop, SDPatternOperator opnode>
-  : NeonI_Crypto_SHA<size, opcode,
-                     (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
-                     asmop # "\t$Rd.4s, $Rn.4s",
-                     [(set (v4i32 VPR128:$Rd),
-                        (v4i32 (opnode (v4i32 VPR128:$src),
-                                       (v4i32 VPR128:$Rn))))],
-                     NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
-                                 int_arm_neon_sha1su1>;
-def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
-                                   int_arm_neon_sha256su0>;
-
-class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
-                         string asmop, SDPatternOperator opnode>
-  : NeonI_Crypto_SHA<size, opcode,
-                     (outs FPR32:$Rd), (ins FPR32:$Rn),
-                     asmop # "\t$Rd, $Rn",
-                     [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU]> {
-  let Predicates = [HasNEON, HasCrypto];
-  let hasSideEffects = 0;
-}
-
-def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
-def : Pat<(i32 (int_arm_neon_sha1h i32:$Rn)),
-          (COPY_TO_REGCLASS (SHA1H (COPY_TO_REGCLASS i32:$Rn, FPR32)), GPR32)>;
-
-
-class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
-                           SDPatternOperator opnode>
-  : NeonI_Crypto_3VSHA<size, opcode,
-                       (outs VPR128:$Rd),
-                       (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
-                       asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
-                       [(set (v4i32 VPR128:$Rd),
-                          (v4i32 (opnode (v4i32 VPR128:$src),
-                                         (v4i32 VPR128:$Rn),
-                                         (v4i32 VPR128:$Rm))))],
-                       NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
-                                   int_arm_neon_sha1su0>;
-def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
-                                     int_arm_neon_sha256su1>;
-
-class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
-                           SDPatternOperator opnode>
-  : NeonI_Crypto_3VSHA<size, opcode,
-                       (outs FPR128:$Rd),
-                       (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
-                       asmop # "\t$Rd, $Rn, $Rm.4s",
-                       [(set (v4i32 FPR128:$Rd),
-                          (v4i32 (opnode (v4i32 FPR128:$src),
-                                         (v4i32 FPR128:$Rn),
-                                         (v4i32 VPR128:$Rm))))],
-                       NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
-                                   int_arm_neon_sha256h>;
-def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
-                                    int_arm_neon_sha256h2>;
-
-class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop>
-  : NeonI_Crypto_3VSHA<size, opcode,
-                       (outs FPR128:$Rd),
-                       (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
-                       asmop # "\t$Rd, $Rn, $Rm.4s",
-                       [], NoItinerary>,
-    Sched<[WriteFPALU, ReadFPALU, ReadFPALU, ReadFPALU]> {
-  let Constraints = "$src = $Rd";
-  let hasSideEffects = 0;
-  let Predicates = [HasNEON, HasCrypto];
-}
-
-def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c">;
-def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p">;
-def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m">;
-
-def : Pat<(int_arm_neon_sha1c v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
-          (SHA1C v4i32:$hash_abcd,
-                 (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
-def : Pat<(int_arm_neon_sha1m v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
-          (SHA1M v4i32:$hash_abcd,
-                 (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
-def : Pat<(int_arm_neon_sha1p v4i32:$hash_abcd, i32:$hash_e, v4i32:$wk),
-          (SHA1P v4i32:$hash_abcd,
-                 (COPY_TO_REGCLASS i32:$hash_e, FPR32), v4i32:$wk)>;
-
-// Additional patterns to match shl to USHL.
-def : Pat<(v8i8 (shl (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
-          (USHLvvv_8B $Rn, $Rm)>;
-def : Pat<(v4i16 (shl (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
-          (USHLvvv_4H $Rn, $Rm)>;
-def : Pat<(v2i32 (shl (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
-          (USHLvvv_2S $Rn, $Rm)>;
-def : Pat<(v1i64 (shl (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
-          (USHLddd $Rn, $Rm)>;
-def : Pat<(v16i8 (shl (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
-          (USHLvvv_16B $Rn, $Rm)>;
-def : Pat<(v8i16 (shl (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
-          (USHLvvv_8H $Rn, $Rm)>;
-def : Pat<(v4i32 (shl (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
-          (USHLvvv_4S $Rn, $Rm)>;
-def : Pat<(v2i64 (shl (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
-          (USHLvvv_2D $Rn, $Rm)>;
-
-def : Pat<(v1i8 (shl (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
-          (EXTRACT_SUBREG
-              (USHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                          (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8)),
-              sub_8)>;
-def : Pat<(v1i16 (shl (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-          (EXTRACT_SUBREG
-              (USHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                          (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16)),
-              sub_16)>;
-def : Pat<(v1i32 (shl (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-          (EXTRACT_SUBREG
-              (USHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                          (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32)),
-              sub_32)>;
-
-// Additional patterns to match sra, srl.
-// For a vector right shift by vector, the shift amounts of SSHL/USHL are
-// negative. Negate the vector of shift amount first.
-def : Pat<(v8i8 (srl (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
-          (USHLvvv_8B $Rn, (NEG8b $Rm))>;
-def : Pat<(v4i16 (srl (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
-          (USHLvvv_4H $Rn, (NEG4h $Rm))>;
-def : Pat<(v2i32 (srl (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
-          (USHLvvv_2S $Rn, (NEG2s $Rm))>;
-def : Pat<(v1i64 (srl (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
-          (USHLddd $Rn, (NEGdd $Rm))>;
-def : Pat<(v16i8 (srl (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
-          (USHLvvv_16B $Rn, (NEG16b $Rm))>;
-def : Pat<(v8i16 (srl (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
-          (USHLvvv_8H $Rn, (NEG8h $Rm))>;
-def : Pat<(v4i32 (srl (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
-          (USHLvvv_4S $Rn, (NEG4s $Rm))>;
-def : Pat<(v2i64 (srl (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
-          (USHLvvv_2D $Rn, (NEG2d $Rm))>;
-
-def : Pat<(v1i8 (srl (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
-          (EXTRACT_SUBREG
-              (USHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                          (NEG8b (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8))),
-              sub_8)>;
-def : Pat<(v1i16 (srl (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-          (EXTRACT_SUBREG
-              (USHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                          (NEG4h (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16))),
-              sub_16)>;
-def : Pat<(v1i32 (srl (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-          (EXTRACT_SUBREG
-              (USHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                          (NEG2s (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32))),
-              sub_32)>;
-
-def : Pat<(v8i8 (sra (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
-          (SSHLvvv_8B $Rn, (NEG8b $Rm))>;
-def : Pat<(v4i16 (sra (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
-          (SSHLvvv_4H $Rn, (NEG4h $Rm))>;
-def : Pat<(v2i32 (sra (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
-          (SSHLvvv_2S $Rn, (NEG2s $Rm))>;
-def : Pat<(v1i64 (sra (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
-          (SSHLddd $Rn, (NEGdd $Rm))>;
-def : Pat<(v16i8 (sra (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
-          (SSHLvvv_16B $Rn, (NEG16b $Rm))>;
-def : Pat<(v8i16 (sra (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
-          (SSHLvvv_8H $Rn, (NEG8h $Rm))>;
-def : Pat<(v4i32 (sra (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
-          (SSHLvvv_4S $Rn, (NEG4s $Rm))>;
-def : Pat<(v2i64 (sra (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
-          (SSHLvvv_2D $Rn, (NEG2d $Rm))>;
-
-def : Pat<(v1i8 (sra (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
-          (EXTRACT_SUBREG
-              (SSHLvvv_8B (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8),
-                          (NEG8b (SUBREG_TO_REG (i64 0), FPR8:$Rm, sub_8))),
-              sub_8)>;
-def : Pat<(v1i16 (sra (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
-          (EXTRACT_SUBREG
-              (SSHLvvv_4H (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16),
-                          (NEG4h (SUBREG_TO_REG (i64 0), FPR16:$Rm, sub_16))),
-              sub_16)>;
-def : Pat<(v1i32 (sra (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
-          (EXTRACT_SUBREG
-              (SSHLvvv_2S (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
-                          (NEG2s (SUBREG_TO_REG (i64 0), FPR32:$Rm, sub_32))),
-              sub_32)>;
-
-//
-// Patterns for handling half-precision values
-//
-
-// Convert between f16 value and f32 value
-def : Pat<(f32 (f16_to_f32 (i32 GPR32:$Rn))),
-          (FCVTsh (EXTRACT_SUBREG (FMOVsw $Rn), sub_16))>;
-def : Pat<(i32 (f32_to_f16 (f32 FPR32:$Rn))),
-          (FMOVws (SUBREG_TO_REG (i64 0), (f16 (FCVThs $Rn)), sub_16))>;
-
-// Convert f16 value coming in as i16 value to f32
-def : Pat<(f32 (f16_to_f32 (i32 (and (i32 GPR32:$Rn), 65535)))),
-          (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
-def : Pat<(f32 (f16_to_f32 (i32 (assertzext GPR32:$Rn)))),
-          (FCVTsh (EXTRACT_SUBREG (FMOVsw GPR32:$Rn), sub_16))>;
-
-def : Pat<(f32 (f16_to_f32 (i32 (assertzext (i32 (
-            f32_to_f16 (f32 FPR32:$Rn))))))),
-          (f32 FPR32:$Rn)>;
-
-// Patterns for vector extract of half-precision FP value in i16 storage type
-def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
-            (v4i16 VPR64:$Rn), neon_uimm2_bare:$Imm)), 65535)))),
-          (FCVTsh (f16 (DUPhv_H
-            (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-            neon_uimm2_bare:$Imm)))>;
-
-def : Pat<(f32 (f16_to_f32 ( i32 (and (i32 (vector_extract
-            (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)), 65535)))),
-          (FCVTsh (f16 (DUPhv_H (v8i16 VPR128:$Rn), neon_uimm3_bare:$Imm)))>;
-
-// Patterns for vector insert of half-precision FP value 0 in i16 storage type
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
-            (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
-            (neon_uimm3_bare:$Imm))),
-          (v8i16 (INSELh (v8i16 VPR128:$Rn),
-            (v8i16 (SUBREG_TO_REG (i64 0),
-              (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
-              sub_16)),
-            neon_uimm3_bare:$Imm, 0))>;
-
-def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
-            (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 0))))))),
-            (neon_uimm2_bare:$Imm))),
-          (v4i16 (EXTRACT_SUBREG
-            (v8i16 (INSELh
-              (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              (v8i16 (SUBREG_TO_REG (i64 0),
-                (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 WZR))), sub_16)),
-                sub_16)),
-              neon_uimm2_bare:$Imm, 0)),
-            sub_64))>;
-
-// Patterns for vector insert of half-precision FP value in i16 storage type
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
-            (i32 (assertsext (i32 (fp_to_sint
-              (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
-            (neon_uimm3_bare:$Imm))),
-          (v8i16 (INSELh (v8i16 VPR128:$Rn),
-            (v8i16 (SUBREG_TO_REG (i64 0),
-              (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
-              sub_16)),
-            neon_uimm3_bare:$Imm, 0))>;
-
-def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
-            (i32 (assertsext (i32 (fp_to_sint
-              (f32 (f16_to_f32 (i32 (and (i32 GPR32:$src), 65535)))))))),
-            (neon_uimm2_bare:$Imm))),
-          (v4i16 (EXTRACT_SUBREG
-            (v8i16 (INSELh
-              (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              (v8i16 (SUBREG_TO_REG (i64 0),
-                (f16 (EXTRACT_SUBREG (f32 (FMOVsw (i32 GPR32:$src))), sub_16)),
-                sub_16)),
-              neon_uimm2_bare:$Imm, 0)),
-            sub_64))>;
-
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
-            (i32 (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
-              (neon_uimm3_bare:$Imm1))),
-          (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
-            neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
-
-// Patterns for vector copy of half-precision FP value in i16 storage type
-def : Pat<(v8i16 (vector_insert (v8i16 VPR128:$Rn),
-            (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
-              (vector_extract (v8i16 VPR128:$src), neon_uimm3_bare:$Imm2)),
-              65535)))))))),
-            (neon_uimm3_bare:$Imm1))),
-          (v8i16 (INSELh (v8i16 VPR128:$Rn), (v8i16 VPR128:$src),
-            neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2))>;
-
-def : Pat<(v4i16 (vector_insert (v4i16 VPR64:$Rn),
-            (i32 (assertsext (i32 (fp_to_sint(f32 (f16_to_f32 (i32 (and (i32
-              (vector_extract (v4i16 VPR64:$src), neon_uimm3_bare:$Imm2)),
-              65535)))))))),
-            (neon_uimm3_bare:$Imm1))),
-          (v4i16 (EXTRACT_SUBREG
-            (v8i16 (INSELh
-              (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
-              (v8i16 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
-              neon_uimm3_bare:$Imm1, neon_uimm3_bare:$Imm2)),
-            sub_64))>;
-
-

Removed: llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp (removed)
@@ -1,157 +0,0 @@
-//===-- AArch64MCInstLower.cpp - Convert AArch64 MachineInstr to an MCInst -==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower AArch64 MachineInstrs to their corresponding
-// MCInst records.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64AsmPrinter.h"
-#include "AArch64TargetMachine.h"
-#include "MCTargetDesc/AArch64MCExpr.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/IR/Mangler.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-
-using namespace llvm;
-
-MCOperand
-AArch64AsmPrinter::lowerSymbolOperand(const MachineOperand &MO,
-                                      const MCSymbol *Sym) const {
-  const MCExpr *Expr = nullptr;
-
-  Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, OutContext);
-
-  switch (MO.getTargetFlags()) {
-  case AArch64II::MO_GOT:
-    Expr = AArch64MCExpr::CreateGOT(Expr, OutContext);
-    break;
-  case AArch64II::MO_GOT_LO12:
-    Expr = AArch64MCExpr::CreateGOTLo12(Expr, OutContext);
-    break;
-  case AArch64II::MO_LO12:
-    Expr = AArch64MCExpr::CreateLo12(Expr, OutContext);
-    break;
-  case AArch64II::MO_DTPREL_G1:
-    Expr = AArch64MCExpr::CreateDTPREL_G1(Expr, OutContext);
-    break;
-  case AArch64II::MO_DTPREL_G0_NC:
-    Expr = AArch64MCExpr::CreateDTPREL_G0_NC(Expr, OutContext);
-    break;
-  case AArch64II::MO_GOTTPREL:
-    Expr = AArch64MCExpr::CreateGOTTPREL(Expr, OutContext);
-    break;
-  case AArch64II::MO_GOTTPREL_LO12:
-    Expr = AArch64MCExpr::CreateGOTTPRELLo12(Expr, OutContext);
-    break;
-  case AArch64II::MO_TLSDESC:
-    Expr = AArch64MCExpr::CreateTLSDesc(Expr, OutContext);
-    break;
-  case AArch64II::MO_TLSDESC_LO12:
-    Expr = AArch64MCExpr::CreateTLSDescLo12(Expr, OutContext);
-    break;
-  case AArch64II::MO_TPREL_G1:
-    Expr = AArch64MCExpr::CreateTPREL_G1(Expr, OutContext);
-    break;
-  case AArch64II::MO_TPREL_G0_NC:
-    Expr = AArch64MCExpr::CreateTPREL_G0_NC(Expr, OutContext);
-    break;
-  case AArch64II::MO_ABS_G3:
-    Expr = AArch64MCExpr::CreateABS_G3(Expr, OutContext);
-    break;
-  case AArch64II::MO_ABS_G2_NC:
-    Expr = AArch64MCExpr::CreateABS_G2_NC(Expr, OutContext);
-    break;
-  case AArch64II::MO_ABS_G1_NC:
-    Expr = AArch64MCExpr::CreateABS_G1_NC(Expr, OutContext);
-    break;
-  case AArch64II::MO_ABS_G0_NC:
-    Expr = AArch64MCExpr::CreateABS_G0_NC(Expr, OutContext);
-    break;
-  case AArch64II::MO_NO_FLAG:
-    // Expr is already correct
-    break;
-  default:
-    llvm_unreachable("Unexpected MachineOperand flag");
-  }
-
-  if (!MO.isJTI() && MO.getOffset())
-    Expr = MCBinaryExpr::CreateAdd(Expr,
-                                   MCConstantExpr::Create(MO.getOffset(),
-                                                          OutContext),
-                                   OutContext);
-
-  return MCOperand::CreateExpr(Expr);
-}
-
-bool AArch64AsmPrinter::lowerOperand(const MachineOperand &MO,
-                                     MCOperand &MCOp) const {
-  switch (MO.getType()) {
-  default: llvm_unreachable("unknown operand type");
-  case MachineOperand::MO_Register:
-    if (MO.isImplicit())
-      return false;
-    assert(!MO.getSubReg() && "Subregs should be eliminated!");
-    MCOp = MCOperand::CreateReg(MO.getReg());
-    break;
-  case MachineOperand::MO_Immediate:
-    MCOp = MCOperand::CreateImm(MO.getImm());
-    break;
-  case MachineOperand::MO_FPImmediate: {
-    assert(MO.getFPImm()->isZero() && "Only fp imm 0.0 is supported");
-    MCOp = MCOperand::CreateFPImm(0.0);
-    break;
-  }
-  case MachineOperand::MO_BlockAddress:
-    MCOp = lowerSymbolOperand(MO, GetBlockAddressSymbol(MO.getBlockAddress()));
-    break;
-  case MachineOperand::MO_ExternalSymbol:
-    MCOp = lowerSymbolOperand(MO, GetExternalSymbolSymbol(MO.getSymbolName()));
-    break;
-  case MachineOperand::MO_GlobalAddress:
-    MCOp = lowerSymbolOperand(MO, getSymbol(MO.getGlobal()));
-    break;
-  case MachineOperand::MO_MachineBasicBlock:
-    MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
-                                   MO.getMBB()->getSymbol(), OutContext));
-    break;
-  case MachineOperand::MO_JumpTableIndex:
-    MCOp = lowerSymbolOperand(MO, GetJTISymbol(MO.getIndex()));
-    break;
-  case MachineOperand::MO_ConstantPoolIndex:
-    MCOp = lowerSymbolOperand(MO, GetCPISymbol(MO.getIndex()));
-    break;
-  case MachineOperand::MO_RegisterMask:
-    // Ignore call clobbers
-    return false;
-
-  }
-
-  return true;
-}
-
-void llvm::LowerAArch64MachineInstrToMCInst(const MachineInstr *MI,
-                                            MCInst &OutMI,
-                                            AArch64AsmPrinter &AP) {
-  OutMI.setOpcode(MI->getOpcode());
-
-  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-    const MachineOperand &MO = MI->getOperand(i);
-
-    MCOperand MCOp;
-    if (AP.lowerOperand(MO, MCOp))
-      OutMI.addOperand(MCOp);
-  }
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp (removed)
@@ -1,18 +0,0 @@
-//===-- AArch64MachineFuctionInfo.cpp - AArch64 machine function info -----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file just contains the anchor for the AArch64MachineFunctionInfo to
-// force vtable emission.
-//
-//===----------------------------------------------------------------------===//
-#include "AArch64MachineFunctionInfo.h"
-
-using namespace llvm;
-
-void AArch64MachineFunctionInfo::anchor() { }

Removed: llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64MachineFunctionInfo.h (removed)
@@ -1,149 +0,0 @@
-//=- AArch64MachineFuctionInfo.h - AArch64 machine function info -*- C++ -*-==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares AArch64-specific per-machine-function information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef AARCH64MACHINEFUNCTIONINFO_H
-#define AARCH64MACHINEFUNCTIONINFO_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-
-namespace llvm {
-
-/// This class is derived from MachineFunctionInfo and contains private AArch64
-/// target-specific information for each MachineFunction.
-class AArch64MachineFunctionInfo : public MachineFunctionInfo {
-  virtual void anchor();
-
-  /// Number of bytes of arguments this function has on the stack. If the callee
-  /// is expected to restore the argument stack this should be a multiple of 16,
-  /// all usable during a tail call.
-  ///
-  /// The alternative would forbid tail call optimisation in some cases: if we
-  /// want to transfer control from a function with 8-bytes of stack-argument
-  /// space to a function with 16-bytes then misalignment of this value would
-  /// make a stack adjustment necessary, which could not be undone by the
-  /// callee.
-  unsigned BytesInStackArgArea;
-
-  /// The number of bytes to restore to deallocate space for incoming
-  /// arguments. Canonically 0 in the C calling convention, but non-zero when
-  /// callee is expected to pop the args.
-  unsigned ArgumentStackToRestore;
-
-  /// If the stack needs to be adjusted on frame entry in two stages, this
-  /// records the size of the first adjustment just prior to storing
-  /// callee-saved registers. The callee-saved slots are addressed assuming
-  /// SP == <incoming-SP> - InitialStackAdjust.
-  unsigned InitialStackAdjust;
-
-  /// Number of local-dynamic TLS accesses.
-  unsigned NumLocalDynamics;
-
-  /// @see AArch64 Procedure Call Standard, B.3
-  ///
-  /// The Frame index of the area where LowerFormalArguments puts the
-  /// general-purpose registers that might contain variadic parameters.
-  int VariadicGPRIdx;
-
-  /// @see AArch64 Procedure Call Standard, B.3
-  ///
-  /// The size of the frame object used to store the general-purpose registers
-  /// which might contain variadic arguments. This is the offset from
-  /// VariadicGPRIdx to what's stored in __gr_top.
-  unsigned VariadicGPRSize;
-
-  /// @see AArch64 Procedure Call Standard, B.3
-  ///
-  /// The Frame index of the area where LowerFormalArguments puts the
-  /// floating-point registers that might contain variadic parameters.
-  int VariadicFPRIdx;
-
-  /// @see AArch64 Procedure Call Standard, B.3
-  ///
-  /// The size of the frame object used to store the floating-point registers
-  /// which might contain variadic arguments. This is the offset from
-  /// VariadicFPRIdx to what's stored in __vr_top.
-  unsigned VariadicFPRSize;
-
-  /// @see AArch64 Procedure Call Standard, B.3
-  ///
-  /// The Frame index of an object pointing just past the last known stacked
-  /// argument on entry to a variadic function. This goes into the __stack field
-  /// of the va_list type.
-  int VariadicStackIdx;
-
-  /// The offset of the frame pointer from the stack pointer on function
-  /// entry. This is expected to be negative.
-  int FramePointerOffset;
-
-public:
-  AArch64MachineFunctionInfo()
-    : BytesInStackArgArea(0),
-      ArgumentStackToRestore(0),
-      InitialStackAdjust(0),
-      NumLocalDynamics(0),
-      VariadicGPRIdx(0),
-      VariadicGPRSize(0),
-      VariadicFPRIdx(0),
-      VariadicFPRSize(0),
-      VariadicStackIdx(0),
-      FramePointerOffset(0) {}
-
-  explicit AArch64MachineFunctionInfo(MachineFunction &MF)
-    : BytesInStackArgArea(0),
-      ArgumentStackToRestore(0),
-      InitialStackAdjust(0),
-      NumLocalDynamics(0),
-      VariadicGPRIdx(0),
-      VariadicGPRSize(0),
-      VariadicFPRIdx(0),
-      VariadicFPRSize(0),
-      VariadicStackIdx(0),
-      FramePointerOffset(0) {}
-
-  unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
-  void setBytesInStackArgArea (unsigned bytes) { BytesInStackArgArea = bytes;}
-
-  unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
-  void setArgumentStackToRestore(unsigned bytes) {
-    ArgumentStackToRestore = bytes;
-  }
-
-  unsigned getInitialStackAdjust() const { return InitialStackAdjust; }
-  void setInitialStackAdjust(unsigned bytes) { InitialStackAdjust = bytes; }
-
-  unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamics; }
-  void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamics; }
-
-  int getVariadicGPRIdx() const { return VariadicGPRIdx; }
-  void setVariadicGPRIdx(int Idx) { VariadicGPRIdx = Idx; }
-
-  unsigned getVariadicGPRSize() const { return VariadicGPRSize; }
-  void setVariadicGPRSize(unsigned Size) { VariadicGPRSize = Size; }
-
-  int getVariadicFPRIdx() const { return VariadicFPRIdx; }
-  void setVariadicFPRIdx(int Idx) { VariadicFPRIdx = Idx; }
-
-  unsigned getVariadicFPRSize() const { return VariadicFPRSize; }
-  void setVariadicFPRSize(unsigned Size) { VariadicFPRSize = Size; }
-
-  int getVariadicStackIdx() const { return VariadicStackIdx; }
-  void setVariadicStackIdx(int Idx) { VariadicStackIdx = Idx; }
-
-  int getFramePointerOffset() const { return FramePointerOffset; }
-  void setFramePointerOffset(int Idx) { FramePointerOffset = Idx; }
-
-};
-
-} // End llvm namespace
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp (removed)
@@ -1,186 +0,0 @@
-//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the AArch64 implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "AArch64RegisterInfo.h"
-#include "AArch64FrameLowering.h"
-#include "AArch64MachineFunctionInfo.h"
-#include "AArch64TargetMachine.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-
-using namespace llvm;
-
-#define GET_REGINFO_TARGET_DESC
-#include "AArch64GenRegisterInfo.inc"
-
-AArch64RegisterInfo::AArch64RegisterInfo()
-  : AArch64GenRegisterInfo(AArch64::X30) {
-}
-
-const MCPhysReg *
-AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
-  return CSR_PCS_SaveList;
-}
-
-const uint32_t*
-AArch64RegisterInfo::getCallPreservedMask(CallingConv::ID) const {
-  return CSR_PCS_RegMask;
-}
-
-const uint32_t *AArch64RegisterInfo::getTLSDescCallPreservedMask() const {
-  return TLSDesc_RegMask;
-}
-
-const TargetRegisterClass *
-AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
-  if (RC == &AArch64::FlagClassRegClass)
-    return &AArch64::GPR64RegClass;
-
-  return RC;
-}
-
-
-
-BitVector
-AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
-  BitVector Reserved(getNumRegs());
-  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
-  Reserved.set(AArch64::XSP);
-  Reserved.set(AArch64::WSP);
-
-  Reserved.set(AArch64::XZR);
-  Reserved.set(AArch64::WZR);
-
-  if (TFI->hasFP(MF)) {
-    Reserved.set(AArch64::X29);
-    Reserved.set(AArch64::W29);
-  }
-
-  return Reserved;
-}
-
-static bool hasFrameOffset(int opcode) {
-  return opcode != AArch64::LD1x2_8B  && opcode != AArch64::LD1x3_8B  &&
-         opcode != AArch64::LD1x4_8B  && opcode != AArch64::ST1x2_8B  &&
-         opcode != AArch64::ST1x3_8B  && opcode != AArch64::ST1x4_8B  &&
-         opcode != AArch64::LD1x2_16B && opcode != AArch64::LD1x3_16B &&
-         opcode != AArch64::LD1x4_16B && opcode != AArch64::ST1x2_16B &&
-         opcode != AArch64::ST1x3_16B && opcode != AArch64::ST1x4_16B;
-}
-
-void
-AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI,
-                                         int SPAdj,
-                                         unsigned FIOperandNum,
-                                         RegScavenger *RS) const {
-  assert(SPAdj == 0 && "Cannot deal with nonzero SPAdj yet");
-  MachineInstr &MI = *MBBI;
-  MachineBasicBlock &MBB = *MI.getParent();
-  MachineFunction &MF = *MBB.getParent();
-  MachineFrameInfo *MFI = MF.getFrameInfo();
-  const AArch64FrameLowering *TFI =
-   static_cast<const AArch64FrameLowering *>(MF.getTarget().getFrameLowering());
-
-  // In order to work out the base and offset for addressing, the FrameLowering
-  // code needs to know (sometimes) whether the instruction is storing/loading a
-  // callee-saved register, or whether it's a more generic
-  // operation. Fortunately the frame indices are used *only* for that purpose
-  // and are contiguous, so we can check here.
-  const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
-  int MinCSFI = 0;
-  int MaxCSFI = -1;
-
-  if (CSI.size()) {
-    MinCSFI = CSI[0].getFrameIdx();
-    MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
-  }
-
-  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
-  bool IsCalleeSaveOp = FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI;
-
-  unsigned FrameReg;
-  int64_t Offset;
-  Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj,
-                                           IsCalleeSaveOp);
-  // A vector load/store instruction doesn't have an offset operand.
-  bool HasOffsetOp = hasFrameOffset(MI.getOpcode());
-  if (HasOffsetOp)
-    Offset += MI.getOperand(FIOperandNum + 1).getImm();
-
-  // DBG_VALUE instructions have no real restrictions so they can be handled
-  // easily.
-  if (MI.isDebugValue()) {
-    MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*isDef=*/ false);
-    MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
-    return;
-  }
-
-  const AArch64InstrInfo &TII =
-    *static_cast<const AArch64InstrInfo*>(MF.getTarget().getInstrInfo());
-  int MinOffset, MaxOffset, OffsetScale;
-  if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s || !HasOffsetOp) {
-    MinOffset = 0;
-    MaxOffset = 0xfff;
-    OffsetScale = 1;
-  } else {
-    // Load/store of a stack object
-    TII.getAddressConstraints(MI, OffsetScale, MinOffset, MaxOffset);
-  }
-
-  // There are two situations we don't use frame + offset directly in the
-  // instruction:
-  // (1) The offset can't really be scaled
-  // (2) Can't encode offset as it doesn't have an offset operand
-  if ((Offset % OffsetScale != 0 || Offset < MinOffset || Offset > MaxOffset) ||
-      (!HasOffsetOp && Offset != 0)) {
-    unsigned BaseReg =
-      MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
-    emitRegUpdate(MBB, MBBI, MBBI->getDebugLoc(), TII,
-                  BaseReg, FrameReg, BaseReg, Offset);
-    FrameReg = BaseReg;
-    Offset = 0;
-  }
-
-  // Negative offsets are expected if we address from FP, but for
-  // now this checks nothing has gone horribly wrong.
-  assert(Offset >= 0 && "Unexpected negative offset from SP");
-
-  MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, true);
-  if (HasOffsetOp)
-    MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset / OffsetScale);
-}
-
-unsigned
-AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
-  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
-  if (TFI->hasFP(MF))
-    return AArch64::X29;
-  else
-    return AArch64::XSP;
-}
-
-bool
-AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
-  const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-  const AArch64FrameLowering *AFI
-    = static_cast<const AArch64FrameLowering*>(TFI);
-  return AFI->useFPForAddressing(MF);
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.h (removed)
@@ -1,79 +0,0 @@
-//==- AArch64RegisterInfo.h - AArch64 Register Information Impl -*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the AArch64 implementation of the MCRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_AARCH64REGISTERINFO_H
-#define LLVM_TARGET_AARCH64REGISTERINFO_H
-
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#include "AArch64GenRegisterInfo.inc"
-
-namespace llvm {
-
-class AArch64InstrInfo;
-class AArch64Subtarget;
-
-struct AArch64RegisterInfo : public AArch64GenRegisterInfo {
-  AArch64RegisterInfo();
-
-  const MCPhysReg *
-  getCalleeSavedRegs(const MachineFunction *MF =nullptr) const override;
-  const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
-
-  unsigned getCSRFirstUseCost() const override {
-    // The cost will be compared against BlockFrequency where entry has the
-    // value of 1 << 14. A value of 5 will choose to spill or split really
-    // cold path instead of using a callee-saved register.
-    return 5;
-  }
-
-  const uint32_t *getTLSDescCallPreservedMask() const;
-
-  BitVector getReservedRegs(const MachineFunction &MF) const override;
-  unsigned getFrameRegister(const MachineFunction &MF) const override;
-
-  void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
-                           unsigned FIOperandNum,
-                           RegScavenger *Rs = nullptr) const override;
-
-  /// getCrossCopyRegClass - Returns a legal register class to copy a register
-  /// in the specified class to or from. Returns original class if it is
-  /// possible to copy between a two registers of the specified class.
-  const TargetRegisterClass *
-  getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
-
-  /// getLargestLegalSuperClass - Returns the largest super class of RC that is
-  /// legal to use in the current sub-target and has the same spill size.
-  const TargetRegisterClass*
-  getLargestLegalSuperClass(const TargetRegisterClass *RC) const override {
-    if (RC == &AArch64::tcGPR64RegClass)
-      return &AArch64::GPR64RegClass;
-
-    return RC;
-  }
-
-  bool requiresRegisterScavenging(const MachineFunction &MF) const override {
-    return true;
-  }
-
-  bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
-    return true;
-  }
-
-  bool useFPForScavengingIndex(const MachineFunction &MF) const override;
-};
-
-} // end namespace llvm
-
-#endif // LLVM_TARGET_AARCH64REGISTERINFO_H

Removed: llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.td?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.td (removed)
@@ -1,290 +0,0 @@
-//===- AArch64RegisterInfo.td - ARM Register defs ----------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//  This file contains declarations that describe the AArch64 register file
-//
-//===----------------------------------------------------------------------===//
-
-let Namespace = "AArch64" in {
-def sub_128 : SubRegIndex<128>;
-def sub_64 : SubRegIndex<64>;
-def sub_32 : SubRegIndex<32>;
-def sub_16 : SubRegIndex<16>;
-def sub_8  : SubRegIndex<8>;
-
-// Note: Code depends on these having consecutive numbers.
-def qqsub : SubRegIndex<256, 256>;
-
-def qsub_0 : SubRegIndex<128>;
-def qsub_1 : SubRegIndex<128, 128>;
-def qsub_2 : ComposedSubRegIndex<qqsub, qsub_0>;
-def qsub_3 : ComposedSubRegIndex<qqsub, qsub_1>;
-
-def dsub_0 : SubRegIndex<64>;
-def dsub_1 : SubRegIndex<64, 64>;
-def dsub_2 : ComposedSubRegIndex<qsub_1, dsub_0>;
-def dsub_3 : ComposedSubRegIndex<qsub_1, dsub_1>;
-}
-
-// Registers are identified with 5-bit ID numbers.
-class AArch64Reg<bits<16> enc, string n> : Register<n> {
-  let HWEncoding = enc;
-  let Namespace = "AArch64";
-}
-
-class AArch64RegWithSubs<bits<16> enc, string n, list<Register> subregs = [],
-                         list<SubRegIndex> inds = []>
-      : AArch64Reg<enc, n> {
-  let SubRegs = subregs;
-  let SubRegIndices = inds;
-}
-
-//===----------------------------------------------------------------------===//
-//  Integer registers: w0-w30, wzr, wsp, x0-x30, xzr, sp
-//===----------------------------------------------------------------------===//
-
-foreach Index = 0-30 in {
-  def W#Index : AArch64Reg< Index, "w"#Index>, DwarfRegNum<[Index]>;
-}
-
-def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
-def WZR : AArch64Reg<31, "wzr">;
-
-// Could be combined with previous loop, but this way leaves w and x registers
-// consecutive as LLVM register numbers, which makes for easier debugging.
-foreach Index = 0-30 in {
-  def X#Index : AArch64RegWithSubs<Index, "x"#Index,
-                                   [!cast<Register>("W"#Index)], [sub_32]>,
-                DwarfRegNum<[Index]>;
-}
-
-def XSP : AArch64RegWithSubs<31, "sp", [WSP], [sub_32]>, DwarfRegNum<[31]>;
-def XZR : AArch64RegWithSubs<31, "xzr", [WZR], [sub_32]>;
-
-// Most instructions treat register 31 as zero for reads and a black-hole for
-// writes.
-
-// Note that the order of registers is important for the Disassembler here:
-// tablegen uses it to form MCRegisterClass::getRegister, which we assume can
-// take an encoding value.
-def GPR32 : RegisterClass<"AArch64", [i32], 32,
-                          (add (sequence "W%u", 0, 30), WZR)> {
-}
-
-def GPR64 : RegisterClass<"AArch64", [i64], 64,
-                          (add (sequence "X%u", 0, 30), XZR)> {
-}
-
-def GPR32nowzr : RegisterClass<"AArch64", [i32], 32,
-                               (sequence "W%u", 0, 30)> {
-}
-
-def GPR64noxzr : RegisterClass<"AArch64", [i64], 64,
-                               (sequence "X%u", 0, 30)> {
-}
-
-// For tail calls, we can't use callee-saved registers or the structure-return
-// register, as they are supposed to be live across function calls and may be
-// clobbered by the epilogue.
-def tcGPR64 : RegisterClass<"AArch64", [i64], 64,
-                            (add (sequence "X%u", 0, 7),
-                                 (sequence "X%u", 9, 18))> {
-}
-
-
-// Certain addressing-useful instructions accept sp directly. Again the order of
-// registers is important to the Disassembler.
-def GPR32wsp : RegisterClass<"AArch64", [i32], 32,
-                             (add (sequence "W%u", 0, 30), WSP)> {
-}
-
-def GPR64xsp : RegisterClass<"AArch64", [i64], 64,
-                             (add (sequence "X%u", 0, 30), XSP)> {
-}
-
-// Some aliases *only* apply to SP (e.g. MOV uses different encoding for SP and
-// non-SP variants). We can't use a bare register in those patterns because
-// TableGen doesn't like it, so we need a class containing just stack registers
-def Rxsp : RegisterClass<"AArch64", [i64], 64,
-                         (add XSP)> {
-}
-
-def Rwsp : RegisterClass<"AArch64", [i32], 32,
-                         (add WSP)> {
-}
-
-//===----------------------------------------------------------------------===//
-//  Scalar registers in the vector unit:
-//  b0-b31, h0-h31, s0-s31, d0-d31, q0-q31
-//===----------------------------------------------------------------------===//
-
-foreach Index = 0-31 in {
-  def B # Index : AArch64Reg< Index, "b" # Index>,
-                  DwarfRegNum<[!add(Index, 64)]>;
-
-  def H # Index : AArch64RegWithSubs<Index, "h" # Index,
-                                     [!cast<Register>("B" # Index)], [sub_8]>,
-                  DwarfRegNum<[!add(Index, 64)]>;
-
-  def S # Index : AArch64RegWithSubs<Index, "s" # Index,
-                                     [!cast<Register>("H" # Index)], [sub_16]>,
-                  DwarfRegNum<[!add(Index, 64)]>;
-
-  def D # Index : AArch64RegWithSubs<Index, "d" # Index,
-                                     [!cast<Register>("S" # Index)], [sub_32]>,
-                  DwarfRegNum<[!add(Index, 64)]>;
-
-  def Q # Index : AArch64RegWithSubs<Index, "q" # Index,
-                                     [!cast<Register>("D" # Index)], [sub_64]>,
-                  DwarfRegNum<[!add(Index, 64)]>;
-}
-
-
-def FPR8 : RegisterClass<"AArch64", [v1i8], 8,
-                          (sequence "B%u", 0, 31)> {
-}
-
-def FPR16 : RegisterClass<"AArch64", [f16, v1i16], 16,
-                          (sequence "H%u", 0, 31)> {
-}
-
-def FPR32 : RegisterClass<"AArch64", [f32, v1i32], 32,
-                          (sequence "S%u", 0, 31)> {
-}
-
-def FPR64 : RegisterClass<"AArch64",
-                          [f64, v2f32, v2i32, v4i16, v8i8, v1i64, v1f64],
-                          64, (sequence "D%u", 0, 31)>;
-
-def FPR128 : RegisterClass<"AArch64",
-                           [f128, v2f64, v2i64, v4f32, v4i32, v8i16, v16i8],
-                           128, (sequence "Q%u", 0, 31)>;
-
-def FPR64Lo : RegisterClass<"AArch64",
-                            [f64, v2f32, v2i32, v4i16, v8i8, v1i64, v1f64],
-                            64, (sequence "D%u", 0, 15)>;
-
-def FPR128Lo : RegisterClass<"AArch64",
-                             [f128, v2f64, v2i64, v4f32, v4i32, v8i16, v16i8],
-                             128, (sequence "Q%u", 0, 15)>;
-
-//===----------------------------------------------------------------------===//
-//  Vector registers:
-//===----------------------------------------------------------------------===//
-
-def VPR64AsmOperand : AsmOperandClass {
-  let Name = "VPR";
-  let PredicateMethod = "isReg";
-  let RenderMethod = "addRegOperands";
-}
-
-def VPR64 : RegisterOperand<FPR64, "printVPRRegister">;
-
-def VPR128 : RegisterOperand<FPR128, "printVPRRegister">;
-
-def VPR64Lo : RegisterOperand<FPR64Lo, "printVPRRegister">;
-
-def VPR128Lo : RegisterOperand<FPR128Lo, "printVPRRegister">;
-
-// Flags register
-def NZCV : Register<"nzcv"> {
-  let Namespace = "AArch64";
-}
-
-def FlagClass : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
-  let CopyCost = -1;
-  let isAllocatable = 0;
-}
-
-//===----------------------------------------------------------------------===//
-//  Consecutive vector registers
-//===----------------------------------------------------------------------===//
-// 2 Consecutive 64-bit registers: D0_D1, D1_D2, ..., D31_D0
-def Tuples2D : RegisterTuples<[dsub_0, dsub_1],
-                              [(rotl FPR64, 0), (rotl FPR64, 1)]>;
-                              
-// 3 Consecutive 64-bit registers: D0_D1_D2, ..., D31_D0_D1
-def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2],
-                              [(rotl FPR64, 0), (rotl FPR64, 1),
-                               (rotl FPR64, 2)]>;
-                               
-// 4 Consecutive 64-bit registers: D0_D1_D2_D3, ..., D31_D0_D1_D2
-def Tuples4D : RegisterTuples<[dsub_0, dsub_1, dsub_2, dsub_3],
-                              [(rotl FPR64, 0), (rotl FPR64, 1),
-                               (rotl FPR64, 2), (rotl FPR64, 3)]>;
-
-// 2 Consecutive 128-bit registers: Q0_Q1, Q1_Q2, ..., Q30_Q31
-def Tuples2Q : RegisterTuples<[qsub_0, qsub_1],
-                              [(rotl FPR128, 0), (rotl FPR128, 1)]>;
-
-// 3 Consecutive 128-bit registers: Q0_Q1_Q2, ..., Q31_Q0_Q1
-def Tuples3Q : RegisterTuples<[qsub_0, qsub_1, qsub_2],
-                              [(rotl FPR128, 0), (rotl FPR128, 1),
-                               (rotl FPR128, 2)]>;
-                               
-// 4 Consecutive 128-bit registers: Q0_Q1_Q2_Q3, ..., Q31_Q0_Q1_Q2
-def Tuples4Q : RegisterTuples<[qsub_0, qsub_1, qsub_2, qsub_3],
-                              [(rotl FPR128, 0), (rotl FPR128, 1),
-                               (rotl FPR128, 2), (rotl FPR128, 3)]>;
-
-// The followings are super register classes to model 2/3/4 consecutive
-// 64-bit/128-bit registers.
-
-def DPair : RegisterClass<"AArch64", [v2i64], 64, (add Tuples2D)>;
-
-def DTriple : RegisterClass<"AArch64", [untyped], 64, (add Tuples3D)> {
-  let Size = 192; // 3 x 64 bits, we have no predefined type of that size.
-}
-
-def DQuad : RegisterClass<"AArch64", [v4i64], 64, (add Tuples4D)>;
-
-def QPair : RegisterClass<"AArch64", [v4i64], 128, (add Tuples2Q)>;
-
-def QTriple : RegisterClass<"AArch64", [untyped], 128, (add Tuples3Q)> {
-  let Size = 384; // 3 x 128 bits, we have no predefined type of that size.
-}
-
-def QQuad : RegisterClass<"AArch64", [v8i64], 128, (add Tuples4Q)>;
-
-
-// The followings are vector list operands
-multiclass VectorList_operands<string PREFIX, string LAYOUT, int Count,
-                               RegisterClass RegList> {
-  def _asmoperand : AsmOperandClass {
-    let Name = PREFIX # LAYOUT # Count;
-    let RenderMethod = "addVectorListOperands";
-    let PredicateMethod = 
-        "isVectorList<A64Layout::VL_" # LAYOUT # ", " # Count # ">";
-    let ParserMethod = "ParseVectorList";
-  }
-
-  def _operand : RegisterOperand<RegList,
-        "printVectorList<A64Layout::VL_" # LAYOUT # ", " # Count # ">"> {
-    let ParserMatchClass =
-      !cast<AsmOperandClass>(PREFIX # LAYOUT # "_asmoperand");
-  }
-}
-
-multiclass VectorList_BHSD<string PREFIX, int Count, RegisterClass DRegList,
-                           RegisterClass QRegList> {
-  defm 8B : VectorList_operands<PREFIX, "8B", Count, DRegList>;
-  defm 4H : VectorList_operands<PREFIX, "4H", Count, DRegList>;
-  defm 2S : VectorList_operands<PREFIX, "2S", Count, DRegList>;
-  defm 1D : VectorList_operands<PREFIX, "1D", Count, DRegList>;
-  defm 16B : VectorList_operands<PREFIX, "16B", Count, QRegList>;
-  defm 8H : VectorList_operands<PREFIX, "8H", Count, QRegList>;
-  defm 4S : VectorList_operands<PREFIX, "4S", Count, QRegList>;
-  defm 2D : VectorList_operands<PREFIX, "2D", Count, QRegList>;
-}
-
-// Vector list operand with 1/2/3/4 registers: VOne8B_operand,..., VQuad2D_operand
-defm VOne : VectorList_BHSD<"VOne", 1, FPR64, FPR128>;
-defm VPair : VectorList_BHSD<"VPair", 2, DPair, QPair>;
-defm VTriple : VectorList_BHSD<"VTriple", 3, DTriple, QTriple>;
-defm VQuad : VectorList_BHSD<"VQuad", 4, DQuad, QQuad>;

Removed: llvm/trunk/lib/Target/AArch64/AArch64Schedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64Schedule.td?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64Schedule.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64Schedule.td (removed)
@@ -1,80 +0,0 @@
-//===- AArch64Schedule.td - AArch64 Scheduling Definitions -*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Generic processor itineraries for legacy compatibility.
-
-def GenericItineraries : ProcessorItineraries<[], [], []>;
-
-
-//===----------------------------------------------------------------------===//
-// Base SchedReadWrite types
-
-// Basic ALU
-def WriteALU : SchedWrite;  // Generic: may contain shift and/or ALU operation
-def WriteALUs : SchedWrite; // Shift only with no ALU operation
-def ReadALU : SchedRead;    // Operand not needed for shifting
-def ReadALUs : SchedRead;   // Operand needed for shifting
-
-// Multiply with optional accumulate
-def WriteMAC : SchedWrite;
-def ReadMAC : SchedRead;
-
-// Compares
-def WriteCMP : SchedWrite;
-def ReadCMP : SchedRead;
-
-// Division
-def WriteDiv : SchedWrite;
-def ReadDiv : SchedRead;
-
-// Loads
-def WriteLd : SchedWrite;
-def WritePreLd : SchedWrite;
-def WriteVecLd : SchedWrite;
-def ReadLd : SchedRead;
-def ReadPreLd : SchedRead;
-def ReadVecLd : SchedRead;
-
-// Stores
-def WriteSt : SchedWrite;
-def WriteVecSt : SchedWrite;
-def ReadSt : SchedRead;
-def ReadVecSt : SchedRead;
-
-// Branches
-def WriteBr : SchedWrite;
-def WriteBrL : SchedWrite;
-def ReadBr : SchedRead;
-
-// Floating Point ALU
-def WriteFPALU : SchedWrite;
-def ReadFPALU : SchedRead;
-
-// Floating Point MAC, Mul, Div, Sqrt
-//   Most processors will simply send all of these down a dedicated pipe, but
-//   they're explicitly separated here for flexibility of modeling later. May
-//   consider consolidating them into a single WriteFPXXXX type in the future.
-def WriteFPMAC : SchedWrite;
-def WriteFPMul : SchedWrite;
-def WriteFPDiv : SchedWrite;
-def WriteFPSqrt : SchedWrite;
-def ReadFPMAC : SchedRead;
-def ReadFPMul : SchedRead;
-def ReadFPDiv : SchedRead;
-def ReadFPSqrt : SchedRead;
-
-// Noop
-def WriteNoop : SchedWrite;
-
-
-//===----------------------------------------------------------------------===//
-// Subtarget specific Machine Models.
-
-include "AArch64ScheduleA53.td"

Removed: llvm/trunk/lib/Target/AArch64/AArch64ScheduleA53.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ScheduleA53.td?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ScheduleA53.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ScheduleA53.td (removed)
@@ -1,144 +0,0 @@
-//=- AArch64ScheduleA53.td - ARM Cortex-A53 Scheduling Definitions -*- tablegen -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the itinerary class data for the ARM Cortex A53 processors.
-//
-//===----------------------------------------------------------------------===//
-
-// ===---------------------------------------------------------------------===//
-// The following definitions describe the simpler per-operand machine model.
-// This works with MachineScheduler. See MCSchedModel.h for details.
-
-// Cortex-A53 machine model for scheduling and other instruction cost heuristics.
-def CortexA53Model : SchedMachineModel {
-  let IssueWidth = 2;  // 2 micro-ops are dispatched per cycle.
-  let MinLatency = 1 ; // OperandCycles are interpreted as MinLatency.
-  let LoadLatency = 2; // Optimistic load latency assuming bypass.
-                       // This is overriden by OperandCycles if the
-                       // Itineraries are queried instead.
-  let MispredictPenalty = 9; // Based on "Cortex-A53 Software Optimisation
-                             // Specification - Instruction Timings"
-                             // v 1.0 Spreadsheet
-}
-
-
-//===----------------------------------------------------------------------===//
-// Define each kind of processor resource and number available.
-
-// Modeling each pipeline as a ProcResource using the default BufferSize = -1.
-// Cortex-A53 is in-order and therefore should be using BufferSize = 0. The
-// current configuration performs better with the basic latencies provided so
-// far. Will revisit BufferSize once the latency information is more accurate.
-
-let SchedModel = CortexA53Model in {
-
-def A53UnitALU    : ProcResource<2>;                        // Int ALU
-def A53UnitMAC    : ProcResource<1>;                        // Int MAC
-def A53UnitDiv    : ProcResource<1>;                        // Int Division
-def A53UnitLdSt   : ProcResource<1>;                        // Load/Store
-def A53UnitB      : ProcResource<1>;                        // Branch
-def A53UnitFPALU  : ProcResource<1>;                        // FP ALU
-def A53UnitFPMDS  : ProcResource<1>;                        // FP Mult/Div/Sqrt
-
-
-//===----------------------------------------------------------------------===//
-// Subtarget-specific SchedWrite types which both map the ProcResources and
-// set the latency.
-
-// Issue - Every instruction must consume an A53WriteIssue. Optionally,
-//         instructions that cannot be dual-issued will also include the
-//         A53WriteIssue2nd in their SchedRW list. That second WriteRes will
-//         ensure that a second issue slot is consumed.
-def A53WriteIssue : SchedWriteRes<[]>;
-def A53WriteIssue2nd : SchedWriteRes<[]> { let Latency = 0; }
-
-// ALU - These are reduced to 1 despite a true latency of 4 in order to easily
-//       model forwarding logic. Once forwarding is properly modelled, then
-//       they'll be corrected.
-def : WriteRes<WriteALU, [A53UnitALU]> { let Latency = 1; }
-def : WriteRes<WriteALUs, [A53UnitALU]> { let Latency = 1; }
-def : WriteRes<WriteCMP, [A53UnitALU]> { let Latency = 1; }
-
-// MAC
-def : WriteRes<WriteMAC, [A53UnitMAC]> { let Latency = 4; }
-
-// Div
-def : WriteRes<WriteDiv, [A53UnitDiv]> { let Latency = 4; }
-
-// Load - Note: Vector loads take 1-5 cycles to issue. For the WriteVecLd below,
-//        choosing the median of 3 which makes the latency 6. May model this more
-//        carefully in the future.
-def : WriteRes<WriteLd, [A53UnitLdSt]> { let Latency = 4; }
-def : WriteRes<WritePreLd, [A53UnitLdSt]> { let Latency = 4; }
-def : WriteRes<WriteVecLd, [A53UnitLdSt]> { let Latency = 6; }
-
-// Store - Note: Vector stores take 1-3 cycles to issue. For the ReadVecSt below,
-//         choosing the median of 2 which makes the latency 5. May model this more
-//         carefully in the future.
-def : WriteRes<WriteSt, [A53UnitLdSt]> { let Latency = 4; }
-def : WriteRes<WriteVecSt, [A53UnitLdSt]> { let Latency = 5; }
-
-// Branch
-def : WriteRes<WriteBr, [A53UnitB]>;
-def : WriteRes<WriteBrL, [A53UnitB]>;
-
-// FP ALU
-def : WriteRes<WriteFPALU, [A53UnitFPALU]> {let Latency = 6; }
-
-// FP MAC, Mul, Div, Sqrt
-//   Using Double Precision numbers for now as a worst case. Additionally, not
-//   modeling the exact hazard but instead treating the whole pipe as a hazard.
-//   As an example VMUL, VMLA, and others are actually pipelined. VDIV and VSQRT
-//   have a total latency of 33 and 32 respectively but only a hazard of 29 and
-//   28 (double-prescion example).
-def : WriteRes<WriteFPMAC, [A53UnitFPMDS]> { let Latency = 10; }
-def : WriteRes<WriteFPMul, [A53UnitFPMDS]> { let Latency = 6; }
-def : WriteRes<WriteFPDiv, [A53UnitFPMDS]> { let Latency = 33;
-                                             let ResourceCycles = [29]; }
-def : WriteRes<WriteFPSqrt, [A53UnitFPMDS]> { let Latency = 32;
-                                              let ResourceCycles = [28]; }
-
-
-//===----------------------------------------------------------------------===//
-// Subtarget-specific SchedRead types.
-
-// No forwarding defined for ReadALU yet.
-def : ReadAdvance<ReadALU, 0>;
-
-// No forwarding defined for ReadCMP yet.
-def : ReadAdvance<ReadCMP, 0>;
-
-// No forwarding defined for ReadBr yet.
-def : ReadAdvance<ReadBr, 0>;
-
-// No forwarding defined for ReadMAC yet.
-def : ReadAdvance<ReadMAC, 0>;
-
-// No forwarding defined for ReadDiv yet.
-def : ReadAdvance<ReadDiv, 0>;
-
-// No forwarding defined for ReadLd, ReadPreLd, ReadVecLd yet.
-def : ReadAdvance<ReadLd, 0>;
-def : ReadAdvance<ReadPreLd, 0>;
-def : ReadAdvance<ReadVecLd, 0>;
-
-// No forwarding defined for ReadSt and ReadVecSt yet.
-def : ReadAdvance<ReadSt, 0>;
-def : ReadAdvance<ReadVecSt, 0>;
-
-// No forwarding defined for ReadFPALU yet.
-def : ReadAdvance<ReadFPALU, 0>;
-
-// No forwarding defined for ReadFPMAC/Mul/Div/Sqrt yet.
-def : ReadAdvance<ReadFPMAC, 0>;
-def : ReadAdvance<ReadFPMul, 0>;
-def : ReadAdvance<ReadFPDiv, 0>;
-def : ReadAdvance<ReadFPSqrt, 0>;
-
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp (removed)
@@ -1,26 +0,0 @@
-//===-- AArch64SelectionDAGInfo.cpp - AArch64 SelectionDAG Info -----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the AArch64SelectionDAGInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64TargetMachine.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "arm-selectiondag-info"
-
-AArch64SelectionDAGInfo::AArch64SelectionDAGInfo(const AArch64TargetMachine &TM)
-  : TargetSelectionDAGInfo(TM),
-    Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
-}
-
-AArch64SelectionDAGInfo::~AArch64SelectionDAGInfo() {
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64SelectionDAGInfo.h (removed)
@@ -1,32 +0,0 @@
-//===-- AArch64SelectionDAGInfo.h - AArch64 SelectionDAG Info ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the AArch64 subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64SELECTIONDAGINFO_H
-#define LLVM_AARCH64SELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-class AArch64TargetMachine;
-
-class AArch64SelectionDAGInfo : public TargetSelectionDAGInfo {
-  const AArch64Subtarget *Subtarget;
-public:
-  explicit AArch64SelectionDAGInfo(const AArch64TargetMachine &TM);
-  ~AArch64SelectionDAGInfo();
-};
-
-}
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp (removed)
@@ -1,99 +0,0 @@
-//===-- AArch64Subtarget.cpp - AArch64 Subtarget Information --------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the AArch64 specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64Subtarget.h"
-#include "AArch64RegisterInfo.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "aarch64-subtarget"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "AArch64GenSubtargetInfo.inc"
-
-enum AlignMode {
-  DefaultAlign,
-  StrictAlign,
-  NoStrictAlign
-};
-
-static cl::opt<AlignMode>
-Align(cl::desc("Load/store alignment support"),
-      cl::Hidden, cl::init(DefaultAlign),
-      cl::values(
-          clEnumValN(DefaultAlign,  "aarch64-default-align",
-                     "Generate unaligned accesses only on hardware/OS "
-                     "combinations that are known to support them"),
-          clEnumValN(StrictAlign,   "aarch64-strict-align",
-                     "Disallow all unaligned memory accesses"),
-          clEnumValN(NoStrictAlign, "aarch64-no-strict-align",
-                     "Allow unaligned memory accesses"),
-          clEnumValEnd));
-
-// Pin the vtable to this file.
-void AArch64Subtarget::anchor() {}
-
-AArch64Subtarget::AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS,
-                                   bool LittleEndian)
-    : AArch64GenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
-      HasFPARMv8(false), HasNEON(false), HasCrypto(false), TargetTriple(TT),
-      CPUString(CPU), IsLittleEndian(LittleEndian) {
-
-  initializeSubtargetFeatures(CPU, FS);
-}
-
-void AArch64Subtarget::initializeSubtargetFeatures(StringRef CPU,
-                                                   StringRef FS) {
-  AllowsUnalignedMem = false;
-
-  if (CPU.empty())
-    CPUString = "generic";
-
-  std::string FullFS = FS;
-  if (CPUString == "generic") {
-    // Enable FP by default.
-    if (FullFS.empty())
-      FullFS = "+fp-armv8";
-    else
-      FullFS = "+fp-armv8," + FullFS;
-  }
-
-  ParseSubtargetFeatures(CPU, FullFS);
-
-  switch (Align) {
-    case DefaultAlign:
-      // Linux targets support unaligned accesses on AARCH64
-      AllowsUnalignedMem = isTargetLinux();
-      break;
-    case StrictAlign:
-      AllowsUnalignedMem = false;
-      break;
-    case NoStrictAlign:
-      AllowsUnalignedMem = true;
-      break;
-  }
-}
-
-bool AArch64Subtarget::GVIsIndirectSymbol(const GlobalValue *GV,
-                                          Reloc::Model RelocM) const {
-  if (RelocM == Reloc::Static)
-    return false;
-
-  return !GV->hasLocalLinkage() && !GV->hasHiddenVisibility();
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h (removed)
@@ -1,89 +0,0 @@
-//==-- AArch64Subtarget.h - Define Subtarget for the AArch64 ---*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the AArch64 specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_AARCH64_SUBTARGET_H
-#define LLVM_TARGET_AARCH64_SUBTARGET_H
-
-#include "llvm/ADT/Triple.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include <string>
-
-#define GET_SUBTARGETINFO_HEADER
-#include "AArch64GenSubtargetInfo.inc"
-
-namespace llvm {
-class StringRef;
-class GlobalValue;
-
-class AArch64Subtarget : public AArch64GenSubtargetInfo {
-  virtual void anchor();
-protected:
-  enum ARMProcFamilyEnum {Others, CortexA53, CortexA57};
-
-  /// ARMProcFamily - ARM processor family: Cortex-A53, Cortex-A57, and others.
-  ARMProcFamilyEnum ARMProcFamily;
-
-  bool HasFPARMv8;
-  bool HasNEON;
-  bool HasCrypto;
-
-  /// AllowsUnalignedMem - If true, the subtarget allows unaligned memory
-  /// accesses for some types.  For details, see
-  /// AArch64TargetLowering::allowsUnalignedMemoryAccesses().
-  bool AllowsUnalignedMem;
-
-  /// TargetTriple - What processor and OS we're targeting.
-  Triple TargetTriple;
-
-  /// CPUString - String name of used CPU.
-  std::string CPUString;
-
-  /// IsLittleEndian - The target is Little Endian
-  bool IsLittleEndian;
-
-private:
-  void initializeSubtargetFeatures(StringRef CPU, StringRef FS);
-
-public:
-  /// This constructor initializes the data members to match that
-  /// of the specified triple.
-  ///
-  AArch64Subtarget(StringRef TT, StringRef CPU, StringRef FS,
-                   bool LittleEndian);
-
-  bool enableMachineScheduler() const override {
-    return true;
-  }
-
-  /// ParseSubtargetFeatures - Parses features string setting specified
-  /// subtarget options.  Definition of function is auto generated by tblgen.
-  void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
-  bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const;
-
-  bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
-  bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
-
-  bool hasFPARMv8() const { return HasFPARMv8; }
-  bool hasNEON() const { return HasNEON; }
-  bool hasCrypto() const { return HasCrypto; }
-
-  bool allowsUnalignedMem() const { return AllowsUnalignedMem; }
-
-  bool isLittle() const { return IsLittleEndian; }
-
-  const std::string & getCPUString() const { return CPUString; }
-};
-} // End llvm namespace
-
-#endif  // LLVM_TARGET_AARCH64_SUBTARGET_H

Removed: llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp (removed)
@@ -1,121 +0,0 @@
-//===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the implementation of the AArch64TargetMachine
-// methods. Principally just setting up the passes needed to generate correct
-// code on this architecture.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "AArch64TargetMachine.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Transforms/Scalar.h"
-
-using namespace llvm;
-
-extern "C" void LLVMInitializeAArch64Target() {
-  RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
-  RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
-}
-
-AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
-                                           StringRef CPU, StringRef FS,
-                                           const TargetOptions &Options,
-                                           Reloc::Model RM, CodeModel::Model CM,
-                                           CodeGenOpt::Level OL,
-                                           bool LittleEndian)
-  : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
-    Subtarget(TT, CPU, FS, LittleEndian),
-    InstrInfo(Subtarget),
-    DL(LittleEndian ?
-       "e-m:e-i64:64-i128:128-n32:64-S128" :
-       "E-m:e-i64:64-i128:128-n32:64-S128"),
-    TLInfo(*this),
-    TSInfo(*this),
-    FrameLowering(Subtarget) {
-  initAsmInfo();
-}
-
-void AArch64leTargetMachine::anchor() { }
-
-AArch64leTargetMachine::
-AArch64leTargetMachine(const Target &T, StringRef TT,
-                       StringRef CPU, StringRef FS, const TargetOptions &Options,
-                       Reloc::Model RM, CodeModel::Model CM,
-                       CodeGenOpt::Level OL)
-  : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
-
-void AArch64beTargetMachine::anchor() { }
-
-AArch64beTargetMachine::
-AArch64beTargetMachine(const Target &T, StringRef TT,
-                       StringRef CPU, StringRef FS, const TargetOptions &Options,
-                       Reloc::Model RM, CodeModel::Model CM,
-                       CodeGenOpt::Level OL)
-  : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
-
-void AArch64TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
-  // Add first the target-independent BasicTTI pass, then our AArch64 pass. This
-  // allows the AArch64 pass to delegate to the target independent layer when
-  // appropriate.
-  PM.add(createBasicTargetTransformInfoPass(this));
-  PM.add(createAArch64TargetTransformInfoPass(this));
-}
-
-namespace {
-/// AArch64 Code Generator Pass Configuration Options.
-class AArch64PassConfig : public TargetPassConfig {
-public:
-  AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
-    : TargetPassConfig(TM, PM) {}
-
-  AArch64TargetMachine &getAArch64TargetMachine() const {
-    return getTM<AArch64TargetMachine>();
-  }
-
-  const AArch64Subtarget &getAArch64Subtarget() const {
-    return *getAArch64TargetMachine().getSubtargetImpl();
-  }
-
-  bool addPreISel() override;
-  bool addInstSelector() override;
-  bool addPreEmitPass() override;
-};
-} // namespace
-
-bool AArch64PassConfig::addPreISel() {
-  if (TM->getOptLevel() != CodeGenOpt::None)
-    addPass(createGlobalMergePass(TM));
-
-  return false;
-}
-
-TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
-  return new AArch64PassConfig(this, PM);
-}
-
-bool AArch64PassConfig::addPreEmitPass() {
-  addPass(&UnpackMachineBundlesID);
-  addPass(createAArch64BranchFixupPass());
-  return true;
-}
-
-bool AArch64PassConfig::addInstSelector() {
-  addPass(createAArch64ISelDAG(getAArch64TargetMachine(), getOptLevel()));
-
-  // For ELF, cleanup any local-dynamic TLS accesses.
-  if (getAArch64Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None)
-    addPass(createAArch64CleanupLocalDynamicTLSPass());
-
-  return false;
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.h (removed)
@@ -1,94 +0,0 @@
-//=== AArch64TargetMachine.h - Define TargetMachine for AArch64 -*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the AArch64 specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64TARGETMACHINE_H
-#define LLVM_AARCH64TARGETMACHINE_H
-
-#include "AArch64FrameLowering.h"
-#include "AArch64ISelLowering.h"
-#include "AArch64InstrInfo.h"
-#include "AArch64SelectionDAGInfo.h"
-#include "AArch64Subtarget.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-class AArch64TargetMachine : public LLVMTargetMachine {
-  AArch64Subtarget          Subtarget;
-  AArch64InstrInfo          InstrInfo;
-  const DataLayout          DL;
-  AArch64TargetLowering     TLInfo;
-  AArch64SelectionDAGInfo   TSInfo;
-  AArch64FrameLowering      FrameLowering;
-
-public:
-  AArch64TargetMachine(const Target &T, StringRef TT, StringRef CPU,
-                       StringRef FS, const TargetOptions &Options,
-                       Reloc::Model RM, CodeModel::Model CM,
-                       CodeGenOpt::Level OL,
-                       bool LittleEndian);
-
-  const AArch64InstrInfo *getInstrInfo() const override {
-    return &InstrInfo;
-  }
-
-  const AArch64FrameLowering *getFrameLowering() const override {
-    return &FrameLowering;
-  }
-
-  const AArch64TargetLowering *getTargetLowering() const override {
-    return &TLInfo;
-  }
-
-  const AArch64SelectionDAGInfo *getSelectionDAGInfo() const override {
-    return &TSInfo;
-  }
-
-  const AArch64Subtarget *getSubtargetImpl() const override { return &Subtarget; }
-
-  const DataLayout *getDataLayout() const override { return &DL; }
-
-  const TargetRegisterInfo *getRegisterInfo() const override {
-    return &InstrInfo.getRegisterInfo();
-  }
-  TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
-
-  void addAnalysisPasses(PassManagerBase &PM) override;
-};
-
-// AArch64leTargetMachine - AArch64 little endian target machine.
-//
-class AArch64leTargetMachine : public AArch64TargetMachine {
-  virtual void anchor();
-public:
-  AArch64leTargetMachine(const Target &T, StringRef TT,
-                         StringRef CPU, StringRef FS, const TargetOptions &Options,
-                         Reloc::Model RM, CodeModel::Model CM,
-                         CodeGenOpt::Level OL);
-};
-
-// AArch64beTargetMachine - AArch64 big endian target machine.
-//
-class AArch64beTargetMachine : public AArch64TargetMachine {
-  virtual void anchor();
-public:
-  AArch64beTargetMachine(const Target &T, StringRef TT,
-                         StringRef CPU, StringRef FS, const TargetOptions &Options,
-                         Reloc::Model RM, CodeModel::Model CM,
-                         CodeGenOpt::Level OL);
-};
-
-} // End llvm namespace
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.cpp (removed)
@@ -1,24 +0,0 @@
-//===-- AArch64TargetObjectFile.cpp - AArch64 Object Info -----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file deals with any AArch64 specific requirements on object files.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "AArch64TargetObjectFile.h"
-
-using namespace llvm;
-
-void
-AArch64ElfTargetObjectFile::Initialize(MCContext &Ctx,
-                                       const TargetMachine &TM) {
-  TargetLoweringObjectFileELF::Initialize(Ctx, TM);
-  InitializeELF(TM.Options.UseInitArray);
-}

Removed: llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetObjectFile.h (removed)
@@ -1,31 +0,0 @@
-//===-- AArch64TargetObjectFile.h - AArch64 Object Info ---------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file deals with any AArch64 specific requirements on object files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_AARCH64_TARGETOBJECTFILE_H
-#define LLVM_TARGET_AARCH64_TARGETOBJECTFILE_H
-
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-  /// AArch64ElfTargetObjectFile - This implementation is used for ELF
-  /// AArch64 targets.
-  class AArch64ElfTargetObjectFile : public TargetLoweringObjectFileELF {
-    void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
-  };
-
-} // end namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp (removed)
@@ -1,109 +0,0 @@
-//===- AArch64TargetTransformInfo.cpp - AArch64 specific TTI pass ---------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-/// This file implements a TargetTransformInfo analysis pass specific to the
-/// AArch64 target machine. It uses the target's detailed information to provide
-/// more precise answers to certain TTI queries, while letting the target
-/// independent and default TTI implementations handle the rest.
-///
-//===----------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "AArch64TargetMachine.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Target/CostTable.h"
-#include "llvm/Target/TargetLowering.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "aarch64tti"
-
-// Declare the pass initialization routine locally as target-specific passes
-// don't have a target-wide initialization entry point, and so we rely on the
-// pass constructor initialization.
-namespace llvm {
-void initializeAArch64TTIPass(PassRegistry &);
-}
-
-namespace {
-
-class AArch64TTI final : public ImmutablePass, public TargetTransformInfo {
-  const AArch64Subtarget *ST;
-  const AArch64TargetLowering *TLI;
-
-public:
-  AArch64TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
-    llvm_unreachable("This pass cannot be directly constructed");
-  }
-
-  AArch64TTI(const AArch64TargetMachine *TM)
-      : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
-        TLI(TM->getTargetLowering()) {
-    initializeAArch64TTIPass(*PassRegistry::getPassRegistry());
-  }
-
-  virtual void initializePass() override {
-    pushTTIStack(this);
-  }
-
-  virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
-    TargetTransformInfo::getAnalysisUsage(AU);
-  }
-
-  /// Pass identification.
-  static char ID;
-
-  /// Provide necessary pointer adjustments for the two base classes.
-  virtual void *getAdjustedAnalysisPointer(const void *ID) override {
-    if (ID == &TargetTransformInfo::ID)
-      return (TargetTransformInfo*)this;
-    return this;
-  }
-
-  /// \name Scalar TTI Implementations
-  /// @{
-
-  /// @}
-
-
-  /// \name Vector TTI Implementations
-  /// @{
-
-  unsigned getNumberOfRegisters(bool Vector) const override {
-    if (Vector) {
-      if (ST->hasNEON())
-        return 32;
-      return 0;
-    }
-    return 32;
-  }
-
-  unsigned getRegisterBitWidth(bool Vector) const override {
-    if (Vector) {
-      if (ST->hasNEON())
-        return 128;
-      return 0;
-    }
-    return 64;
-  }
-
-  unsigned getMaximumUnrollFactor() const override { return 2; }
-  /// @}
-};
-
-} // end anonymous namespace
-
-INITIALIZE_AG_PASS(AArch64TTI, TargetTransformInfo, "aarch64tti",
-                   "AArch64 Target Transform Info", true, true, false)
-char AArch64TTI::ID = 0;
-
-ImmutablePass *
-llvm::createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM) {
-  return new AArch64TTI(TM);
-}

Removed: llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp (removed)
@@ -1,2677 +0,0 @@
-//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the (GNU-style) assembly parser for the AArch64
-// architecture.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "MCTargetDesc/AArch64MCExpr.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-
-class AArch64Operand;
-
-class AArch64AsmParser : public MCTargetAsmParser {
-  MCSubtargetInfo &STI;
-  MCAsmParser &Parser;
-
-#define GET_ASSEMBLER_HEADER
-#include "AArch64GenAsmMatcher.inc"
-
-public:
-  enum AArch64MatchResultTy {
-    Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
-#define GET_OPERAND_DIAGNOSTIC_TYPES
-#include "AArch64GenAsmMatcher.inc"
-  };
-
-  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
-                   const MCInstrInfo &MII,
-                   const MCTargetOptions &Options)
-      : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
-    MCAsmParserExtension::Initialize(_Parser);
-
-    // Initialize the set of available features.
-    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
-  }
-
-  // These are the public interface of the MCTargetAsmParser
-  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
-  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
-                       SMLoc NameLoc,
-                       SmallVectorImpl<MCParsedAsmOperand*> &Operands) override;
-
-  bool ParseDirective(AsmToken DirectiveID) override;
-  bool ParseDirectiveTLSDescCall(SMLoc L);
-  bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
-  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                               MCStreamer&Out, unsigned &ErrorInfo,
-                               bool MatchingInlineAsm) override;
-
-  // The rest of the sub-parsers have more freedom over interface: they return
-  // an OperandMatchResultTy because it's less ambiguous than true/false or
-  // -1/0/1 even if it is more verbose
-  OperandMatchResultTy
-  ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-               StringRef Mnemonic);
-
-  OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
-
-  OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
-
-  OperandMatchResultTy
-  ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                uint32_t NumLanes);
-
-  OperandMatchResultTy
-  ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                uint32_t &NumLanes);
-
-  OperandMatchResultTy
-  ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseFPImm0AndImm0Operand( SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  template<typename SomeNamedImmMapper> OperandMatchResultTy
-  ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-    return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
-  }
-
-  OperandMatchResultTy
-  ParseNamedImmOperand(const NamedImmMapper &Mapper,
-                       SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  OperandMatchResultTy
-  ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
-                      SMLoc &LayoutLoc);
-
-  OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
-
-  bool validateInstruction(MCInst &Inst,
-                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
-  /// Scan the next token (which had better be an identifier) and determine
-  /// whether it represents a general-purpose or vector register. It returns
-  /// true if an identifier was found and populates its reference arguments. It
-  /// does not consume the token.
-  bool
-  IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
-                   SMLoc &LayoutLoc) const;
-
-};
-
-}
-
-namespace {
-
-/// Instances of this class represent a parsed AArch64 machine instruction.
-class AArch64Operand : public MCParsedAsmOperand {
-private:
-  enum KindTy {
-    k_ImmWithLSL,     // #uimm {, LSL #amt }
-    k_CondCode,       // eq/ne/...
-    k_FPImmediate,    // Limited-precision floating-point imm
-    k_Immediate,      // Including expressions referencing symbols
-    k_Register,
-    k_ShiftExtend,
-    k_VectorList,     // A sequential list of 1 to 4 registers.
-    k_SysReg,         // The register operand of MRS and MSR instructions
-    k_Token,          // The mnemonic; other raw tokens the auto-generated
-    k_WrappedRegister // Load/store exclusive permit a wrapped register.
-  } Kind;
-
-  SMLoc StartLoc, EndLoc;
-
-  struct ImmWithLSLOp {
-    const MCExpr *Val;
-    unsigned ShiftAmount;
-    bool ImplicitAmount;
-  };
-
-  struct CondCodeOp {
-    A64CC::CondCodes Code;
-  };
-
-  struct FPImmOp {
-    double Val;
-  };
-
-  struct ImmOp {
-    const MCExpr *Val;
-  };
-
-  struct RegOp {
-    unsigned RegNum;
-  };
-
-  struct ShiftExtendOp {
-    A64SE::ShiftExtSpecifiers ShiftType;
-    unsigned Amount;
-    bool ImplicitAmount;
-  };
-
-  // A vector register list is a sequential list of 1 to 4 registers.
-  struct VectorListOp {
-    unsigned RegNum;
-    unsigned Count;
-    A64Layout::VectorLayout Layout;
-  };
-
-  struct SysRegOp {
-    const char *Data;
-    unsigned Length;
-  };
-
-  struct TokOp {
-    const char *Data;
-    unsigned Length;
-  };
-
-  union {
-    struct ImmWithLSLOp ImmWithLSL;
-    struct CondCodeOp CondCode;
-    struct FPImmOp FPImm;
-    struct ImmOp Imm;
-    struct RegOp Reg;
-    struct ShiftExtendOp ShiftExtend;
-    struct VectorListOp VectorList;
-    struct SysRegOp SysReg;
-    struct TokOp Tok;
-  };
-
-  AArch64Operand(KindTy K, SMLoc S, SMLoc E)
-    : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
-
-public:
-  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
-  }
-
-  SMLoc getStartLoc() const override { return StartLoc; }
-  SMLoc getEndLoc() const override { return EndLoc; }
-  void print(raw_ostream&) const override;
-  void dump() const override;
-
-  StringRef getToken() const {
-    assert(Kind == k_Token && "Invalid access!");
-    return StringRef(Tok.Data, Tok.Length);
-  }
-
-  unsigned getReg() const override {
-    assert((Kind == k_Register || Kind == k_WrappedRegister)
-           && "Invalid access!");
-    return Reg.RegNum;
-  }
-
-  const MCExpr *getImm() const {
-    assert(Kind == k_Immediate && "Invalid access!");
-    return Imm.Val;
-  }
-
-  A64CC::CondCodes getCondCode() const {
-    assert(Kind == k_CondCode && "Invalid access!");
-    return CondCode.Code;
-  }
-
-  static bool isNonConstantExpr(const MCExpr *E,
-                                AArch64MCExpr::VariantKind &Variant) {
-    if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
-      Variant = A64E->getKind();
-      return true;
-    } else if (!isa<MCConstantExpr>(E)) {
-      Variant = AArch64MCExpr::VK_AARCH64_None;
-      return true;
-    }
-
-    return false;
-  }
-
-  bool isCondCode() const { return Kind == k_CondCode; }
-  bool isToken() const override { return Kind == k_Token; }
-  bool isReg() const override { return Kind == k_Register; }
-  bool isImm() const override { return Kind == k_Immediate; }
-  bool isMem() const override { return false; }
-  bool isFPImm() const { return Kind == k_FPImmediate; }
-  bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
-  bool isSysReg() const { return Kind == k_SysReg; }
-  bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
-  bool isWrappedReg() const { return Kind == k_WrappedRegister; }
-
-  bool isAddSubImmLSL0() const {
-    if (!isImmWithLSL()) return false;
-    if (ImmWithLSL.ShiftAmount != 0) return false;
-
-    AArch64MCExpr::VariantKind Variant;
-    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
-      return Variant == AArch64MCExpr::VK_AARCH64_LO12
-          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
-          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
-          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
-          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
-          || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
-    }
-
-    // Otherwise it should be a real immediate in range:
-    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
-    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
-  }
-
-  bool isAddSubImmLSL12() const {
-    if (!isImmWithLSL()) return false;
-    if (ImmWithLSL.ShiftAmount != 12) return false;
-
-    AArch64MCExpr::VariantKind Variant;
-    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
-      return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
-          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
-    }
-
-    // Otherwise it should be a real immediate in range:
-    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
-    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
-  }
-
-  template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
-    if (!isShiftOrExtend()) return false;
-
-    A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
-    if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
-      return false;
-
-    if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
-      return false;
-
-    return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
-  }
-
-  bool isAdrpLabel() const {
-    if (!isImm()) return false;
-
-    AArch64MCExpr::VariantKind Variant;
-    if (isNonConstantExpr(getImm(), Variant)) {
-      return Variant == AArch64MCExpr::VK_AARCH64_None
-        || Variant == AArch64MCExpr::VK_AARCH64_GOT
-        || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
-        || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
-    }
-
-    return isLabel<21, 4096>();
-  }
-
-  template<unsigned RegWidth>  bool isBitfieldWidth() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
-  }
-
-  template<int RegWidth>
-  bool isCVTFixedPos() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
-  }
-
-  bool isFMOVImm() const {
-    if (!isFPImm()) return false;
-
-    APFloat RealVal(FPImm.Val);
-    uint32_t ImmVal;
-    return A64Imms::isFPImm(RealVal, ImmVal);
-  }
-
-  bool isFPZero() const {
-    if (!isFPImm()) return false;
-
-    APFloat RealVal(FPImm.Val);
-    return RealVal.isPosZero();
-  }
-
-  template<unsigned field_width, unsigned scale>
-  bool isLabel() const {
-    if (!isImm()) return false;
-
-    if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
-      return true;
-    } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
-      int64_t Val = CE->getValue();
-      int64_t Min = - (scale * (1LL << (field_width - 1)));
-      int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
-      return (Val % scale) == 0 && Val >= Min && Val <= Max;
-    }
-
-    // N.b. this disallows explicit relocation specifications via an
-    // AArch64MCExpr. Users needing that behaviour
-    return false;
-  }
-
-  bool isLane1() const {
-    if (!isImm()) return false;
-
-    // Because it's come through custom assembly parsing, it must always be a
-    // constant expression.
-    return cast<MCConstantExpr>(getImm())->getValue() == 1;
-  }
-
-  bool isLoadLitLabel() const {
-    if (!isImm()) return false;
-
-    AArch64MCExpr::VariantKind Variant;
-    if (isNonConstantExpr(getImm(), Variant)) {
-      return Variant == AArch64MCExpr::VK_AARCH64_None
-          || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
-    }
-
-    return isLabel<19, 4>();
-  }
-
-  template<unsigned RegWidth> bool isLogicalImm() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
-    if (!CE) return false;
-
-    uint32_t Bits;
-    return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
-  }
-
-  template<unsigned RegWidth> bool isLogicalImmMOV() const {
-    if (!isLogicalImm<RegWidth>()) return false;
-
-    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
-
-    // The move alias for ORR is only valid if the immediate cannot be
-    // represented with a move (immediate) instruction; they take priority.
-    int UImm16, Shift;
-    return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
-      && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
-  }
-
-  template<int MemSize>
-  bool isOffsetUImm12() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-
-    // Assume they know what they're doing for now if they've given us a
-    // non-constant expression. In principle we could check for ridiculous
-    // things that can't possibly work or relocations that would almost
-    // certainly break resulting code.
-    if (!CE)
-      return true;
-
-    int64_t Val = CE->getValue();
-
-    // Must be a multiple of the access size in bytes.
-    if ((Val & (MemSize - 1)) != 0) return false;
-
-    // Must be 12-bit unsigned
-    return Val >= 0 && Val <= 0xfff * MemSize;
-  }
-
-  template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
-  bool isShift() const {
-    if (!isShiftOrExtend()) return false;
-
-    if (ShiftExtend.ShiftType != SHKind)
-      return false;
-
-    return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
-  }
-
-  bool isMOVN32Imm() const {
-    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
-      AArch64MCExpr::VK_AARCH64_SABS_G0,
-      AArch64MCExpr::VK_AARCH64_SABS_G1,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
-      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G0,
-    };
-    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
-    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
-  }
-
-  bool isMOVN64Imm() const {
-    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
-      AArch64MCExpr::VK_AARCH64_SABS_G0,
-      AArch64MCExpr::VK_AARCH64_SABS_G1,
-      AArch64MCExpr::VK_AARCH64_SABS_G2,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
-      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G2,
-      AArch64MCExpr::VK_AARCH64_TPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G0,
-    };
-    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
-    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
-  }
-
-
-  bool isMOVZ32Imm() const {
-    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
-      AArch64MCExpr::VK_AARCH64_ABS_G0,
-      AArch64MCExpr::VK_AARCH64_ABS_G1,
-      AArch64MCExpr::VK_AARCH64_SABS_G0,
-      AArch64MCExpr::VK_AARCH64_SABS_G1,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
-      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G0,
-    };
-    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
-    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
-  }
-
-  bool isMOVZ64Imm() const {
-    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
-      AArch64MCExpr::VK_AARCH64_ABS_G0,
-      AArch64MCExpr::VK_AARCH64_ABS_G1,
-      AArch64MCExpr::VK_AARCH64_ABS_G2,
-      AArch64MCExpr::VK_AARCH64_ABS_G3,
-      AArch64MCExpr::VK_AARCH64_SABS_G0,
-      AArch64MCExpr::VK_AARCH64_SABS_G1,
-      AArch64MCExpr::VK_AARCH64_SABS_G2,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
-      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G2,
-      AArch64MCExpr::VK_AARCH64_TPREL_G1,
-      AArch64MCExpr::VK_AARCH64_TPREL_G0,
-    };
-    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
-    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
-  }
-
-  bool isMOVK32Imm() const {
-    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
-      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
-      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
-      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
-      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
-      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
-    };
-    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
-    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
-  }
-
-  bool isMOVK64Imm() const {
-    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
-      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
-      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
-      AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
-      AArch64MCExpr::VK_AARCH64_ABS_G3,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
-      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
-      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
-      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
-      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
-    };
-    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
-
-    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
-  }
-
-  bool isMoveWideImm(unsigned RegWidth,
-                     const AArch64MCExpr::VariantKind *PermittedModifiers,
-                     unsigned NumModifiers) const {
-    if (!isImmWithLSL()) return false;
-
-    if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
-    if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
-
-    AArch64MCExpr::VariantKind Modifier;
-    if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
-      // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
-      if (!ImmWithLSL.ImplicitAmount) return false;
-
-      for (unsigned i = 0; i < NumModifiers; ++i)
-        if (PermittedModifiers[i] == Modifier) return true;
-
-      return false;
-    }
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
-    return CE && CE->getValue() >= 0  && CE->getValue() <= 0xffff;
-  }
-
-  template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
-  bool isMoveWideMovAlias() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    int UImm16, Shift;
-    uint64_t Value = CE->getValue();
-
-    // If this is a 32-bit instruction then all bits above 32 should be the
-    // same: either of these is fine because signed/unsigned values should be
-    // permitted.
-    if (RegWidth == 32) {
-      if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
-        return false;
-
-      Value &= 0xffffffffULL;
-    }
-
-    return isValidImm(RegWidth, Value, UImm16, Shift);
-  }
-
-  bool isMSRWithReg() const {
-    if (!isSysReg()) return false;
-
-    bool IsKnownRegister;
-    StringRef Name(SysReg.Data, SysReg.Length);
-    A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
-
-    return IsKnownRegister;
-  }
-
-  bool isMSRPState() const {
-    if (!isSysReg()) return false;
-
-    bool IsKnownRegister;
-    StringRef Name(SysReg.Data, SysReg.Length);
-    A64PState::PStateMapper().fromString(Name, IsKnownRegister);
-
-    return IsKnownRegister;
-  }
-
-  bool isMRS() const {
-    if (!isSysReg()) return false;
-
-    // First check against specific MSR-only (write-only) registers
-    bool IsKnownRegister;
-    StringRef Name(SysReg.Data, SysReg.Length);
-    A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
-
-    return IsKnownRegister;
-  }
-
-  bool isPRFM() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-
-    if (!CE)
-      return false;
-
-    return CE->getValue() >= 0 && CE->getValue() <= 31;
-  }
-
-  template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
-    if (!isShiftOrExtend()) return false;
-
-    if (ShiftExtend.ShiftType != SHKind)
-      return false;
-
-    return ShiftExtend.Amount <= 4;
-  }
-
-  bool isRegExtendLSL() const {
-    if (!isShiftOrExtend()) return false;
-
-    if (ShiftExtend.ShiftType != A64SE::LSL)
-      return false;
-
-    return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
-  }
-
-  // if 0 < value <= w, return true
-  bool isShrFixedWidth(int w) const {
-    if (!isImm())
-      return false;
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE)
-      return false;
-    int64_t Value = CE->getValue();
-    return Value > 0 && Value <= w;
-  }
-
-  bool isShrImm8() const { return isShrFixedWidth(8); }
-
-  bool isShrImm16() const { return isShrFixedWidth(16); }
-
-  bool isShrImm32() const { return isShrFixedWidth(32); }
-
-  bool isShrImm64() const { return isShrFixedWidth(64); }
-
-  // if 0 <= value < w, return true
-  bool isShlFixedWidth(int w) const {
-    if (!isImm())
-      return false;
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE)
-      return false;
-    int64_t Value = CE->getValue();
-    return Value >= 0 && Value < w;
-  }
-
-  bool isShlImm8() const { return isShlFixedWidth(8); }
-
-  bool isShlImm16() const { return isShlFixedWidth(16); }
-
-  bool isShlImm32() const { return isShlFixedWidth(32); }
-
-  bool isShlImm64() const { return isShlFixedWidth(64); }
-
-  bool isNeonMovImmShiftLSL() const {
-    if (!isShiftOrExtend())
-      return false;
-
-    if (ShiftExtend.ShiftType != A64SE::LSL)
-      return false;
-
-    // Valid shift amount is 0, 8, 16 and 24.
-    return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
-  }
-
-  bool isNeonMovImmShiftLSLH() const {
-    if (!isShiftOrExtend())
-      return false;
-
-    if (ShiftExtend.ShiftType != A64SE::LSL)
-      return false;
-
-    // Valid shift amount is 0 and 8.
-    return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
-  }
-
-  bool isNeonMovImmShiftMSL() const {
-    if (!isShiftOrExtend())
-      return false;
-
-    if (ShiftExtend.ShiftType != A64SE::MSL)
-      return false;
-
-    // Valid shift amount is 8 and 16.
-    return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
-  }
-
-  template <A64Layout::VectorLayout Layout, unsigned Count>
-  bool isVectorList() const {
-    return Kind == k_VectorList && VectorList.Layout == Layout &&
-           VectorList.Count == Count;
-  }
-
-  template <int MemSize> bool isSImm7Scaled() const {
-    if (!isImm())
-      return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    int64_t Val = CE->getValue();
-    if (Val % MemSize != 0) return false;
-
-    Val /= MemSize;
-
-    return Val >= -64 && Val < 64;
-  }
-
-  template<int BitWidth>
-  bool isSImm() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    return CE->getValue() >= -(1LL << (BitWidth - 1))
-      && CE->getValue() < (1LL << (BitWidth - 1));
-  }
-
-  template<int bitWidth>
-  bool isUImm() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
-  }
-
-  bool isUImm() const {
-    if (!isImm()) return false;
-
-    return isa<MCConstantExpr>(getImm());
-  }
-
-  bool isNeonUImm64Mask() const {
-    if (!isImm())
-      return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE)
-      return false;
-
-    uint64_t Value = CE->getValue();
-
-    // i64 value with each byte being either 0x00 or 0xff.
-    for (unsigned i = 0; i < 8; ++i, Value >>= 8)
-      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
-        return false;
-    return true;
-  }
-
-  // if value == N, return true
-  template<int N>
-  bool isExactImm() const {
-    if (!isImm()) return false;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    if (!CE) return false;
-
-    return CE->getValue() == N;
-  }
-
-  bool isFPZeroIZero() const {
-    return isFPZero();
-  }
-
-  static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
-                                          unsigned ShiftAmount,
-                                          bool ImplicitAmount,
-										  SMLoc S,SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
-    Op->ImmWithLSL.Val = Val;
-    Op->ImmWithLSL.ShiftAmount = ShiftAmount;
-    Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
-    return Op;
-  }
-
-  static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
-                                        SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
-    Op->CondCode.Code = Code;
-    return Op;
-  }
-
-  static AArch64Operand *CreateFPImm(double Val,
-                                     SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
-    Op->FPImm.Val = Val;
-    return Op;
-  }
-
-  static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
-    Op->Imm.Val = Val;
-    return Op;
-  }
-
-  static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
-    Op->Reg.RegNum = RegNum;
-    return Op;
-  }
-
-  static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
-    Op->Reg.RegNum = RegNum;
-    return Op;
-  }
-
-  static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
-                                           unsigned Amount,
-                                           bool ImplicitAmount,
-                                           SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
-    Op->ShiftExtend.ShiftType = ShiftTyp;
-    Op->ShiftExtend.Amount = Amount;
-    Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
-    return Op;
-  }
-
-  static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
-    AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
-    Op->Tok.Data = Str.data();
-    Op->Tok.Length = Str.size();
-    return Op;
-  }
-
-  static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
-                                          A64Layout::VectorLayout Layout,
-                                          SMLoc S, SMLoc E) {
-    AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
-    Op->VectorList.RegNum = RegNum;
-    Op->VectorList.Count = Count;
-    Op->VectorList.Layout = Layout;
-    Op->StartLoc = S;
-    Op->EndLoc = E;
-    return Op;
-  }
-
-  static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
-    AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
-    Op->Tok.Data = Str.data();
-    Op->Tok.Length = Str.size();
-    return Op;
-  }
-
-
-  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
-    // Add as immediates when possible.
-    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
-      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
-    else
-      Inst.addOperand(MCOperand::CreateExpr(Expr));
-  }
-
-  template<unsigned RegWidth>
-  void addBFILSBOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
-    Inst.addOperand(MCOperand::CreateImm(EncodedVal));
-  }
-
-  void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
-  }
-
-  void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-
-    Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
-  }
-
-  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
-  }
-
-  void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
-  }
-
-  void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    APFloat RealVal(FPImm.Val);
-    uint32_t ImmVal;
-    A64Imms::isFPImm(RealVal, ImmVal);
-
-    Inst.addOperand(MCOperand::CreateImm(ImmVal));
-  }
-
-  void addFPZeroOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands");
-    Inst.addOperand(MCOperand::CreateImm(0));
-  }
-
-  void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const {
-    addFPZeroOperands(Inst, N);
-  }
-
-  void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    unsigned Encoded = A64InvertCondCode(getCondCode());
-    Inst.addOperand(MCOperand::CreateImm(Encoded));
-  }
-
-  void addRegOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    Inst.addOperand(MCOperand::CreateReg(getReg()));
-  }
-
-  void addImmOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    addExpr(Inst, getImm());
-  }
-
-  template<int MemSize>
-  void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    uint64_t Val = CE->getValue() / MemSize;
-    Inst.addOperand(MCOperand::CreateImm(Val  & 0x7f));
-  }
-
-  template<int BitWidth>
-  void addSImmOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    uint64_t Val = CE->getValue();
-    Inst.addOperand(MCOperand::CreateImm(Val  & ((1ULL << BitWidth) - 1)));
-  }
-
-  void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
-    assert (N == 1 && "Invalid number of operands!");
-
-    addExpr(Inst, ImmWithLSL.Val);
-  }
-
-  template<unsigned field_width, unsigned scale>
-  void addLabelOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
-
-    if (!CE) {
-      addExpr(Inst, Imm.Val);
-      return;
-    }
-
-    int64_t Val = CE->getValue();
-    assert(Val % scale == 0 && "Unaligned immediate in instruction");
-    Val /= scale;
-
-    Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
-  }
-
-  template<int MemSize>
-  void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
-      Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
-    } else {
-      Inst.addOperand(MCOperand::CreateExpr(getImm()));
-    }
-  }
-
-  template<unsigned RegWidth>
-  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands");
-    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
-
-    uint32_t Bits;
-    A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
-
-    Inst.addOperand(MCOperand::CreateImm(Bits));
-  }
-
-  void addMRSOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    bool Valid;
-    StringRef Name(SysReg.Data, SysReg.Length);
-    uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
-
-    Inst.addOperand(MCOperand::CreateImm(Bits));
-  }
-
-  void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    bool Valid;
-    StringRef Name(SysReg.Data, SysReg.Length);
-    uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
-
-    Inst.addOperand(MCOperand::CreateImm(Bits));
-  }
-
-  void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    bool Valid;
-    StringRef Name(SysReg.Data, SysReg.Length);
-    uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
-
-    Inst.addOperand(MCOperand::CreateImm(Bits));
-  }
-
-  void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 2 && "Invalid number of operands!");
-
-    addExpr(Inst, ImmWithLSL.Val);
-
-    AArch64MCExpr::VariantKind Variant;
-    if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
-      Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
-      return;
-    }
-
-    // We know it's relocated
-    switch (Variant) {
-    case AArch64MCExpr::VK_AARCH64_ABS_G0:
-    case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
-    case AArch64MCExpr::VK_AARCH64_SABS_G0:
-    case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
-    case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
-    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
-    case AArch64MCExpr::VK_AARCH64_TPREL_G0:
-    case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
-      Inst.addOperand(MCOperand::CreateImm(0));
-      break;
-    case AArch64MCExpr::VK_AARCH64_ABS_G1:
-    case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
-    case AArch64MCExpr::VK_AARCH64_SABS_G1:
-    case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
-    case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
-    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
-    case AArch64MCExpr::VK_AARCH64_TPREL_G1:
-    case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
-      Inst.addOperand(MCOperand::CreateImm(1));
-      break;
-    case AArch64MCExpr::VK_AARCH64_ABS_G2:
-    case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
-    case AArch64MCExpr::VK_AARCH64_SABS_G2:
-    case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
-    case AArch64MCExpr::VK_AARCH64_TPREL_G2:
-      Inst.addOperand(MCOperand::CreateImm(2));
-      break;
-    case AArch64MCExpr::VK_AARCH64_ABS_G3:
-      Inst.addOperand(MCOperand::CreateImm(3));
-      break;
-    default: llvm_unreachable("Inappropriate move wide relocation");
-    }
-  }
-
-  template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
-  void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 2 && "Invalid number of operands!");
-    int UImm16, Shift;
-
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    uint64_t Value = CE->getValue();
-
-    if (RegWidth == 32) {
-      Value &= 0xffffffffULL;
-    }
-
-    bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
-    (void)Valid;
-    assert(Valid && "Invalid immediates should have been weeded out by now");
-
-    Inst.addOperand(MCOperand::CreateImm(UImm16));
-    Inst.addOperand(MCOperand::CreateImm(Shift));
-  }
-
-  void addPRFMOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
-    assert(CE->getValue() >= 0 && CE->getValue() <= 31
-           && "PRFM operand should be 5-bits");
-
-    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
-  }
-
-  // For Add-sub (extended register) operands.
-  void addRegExtendOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
-  }
-
-  // For Vector Immediates shifted imm operands.
-  void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
-      llvm_unreachable("Invalid shift amount for vector immediate inst.");
-
-    // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
-    int64_t Imm = ShiftExtend.Amount / 8;
-    Inst.addOperand(MCOperand::CreateImm(Imm));
-  }
-
-  void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
-      llvm_unreachable("Invalid shift amount for vector immediate inst.");
-
-    // Encode LSLH shift amount 0, 8  as 0, 1.
-    int64_t Imm = ShiftExtend.Amount / 8;
-    Inst.addOperand(MCOperand::CreateImm(Imm));
-  }
-
-  void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
-      llvm_unreachable("Invalid shift amount for vector immediate inst.");
-
-    // Encode MSL shift amount 8, 16  as 0, 1.
-    int64_t Imm = ShiftExtend.Amount / 8 - 1;
-    Inst.addOperand(MCOperand::CreateImm(Imm));
-  }
-
-  // For the extend in load-store (register offset) instructions.
-  template<unsigned MemSize>
-  void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
-    addAddrRegExtendOperands(Inst, N, MemSize);
-  }
-
-  void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
-                                unsigned MemSize) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    // First bit of Option is set in instruction classes, the high two bits are
-    // as follows:
-    unsigned OptionHi = 0;
-    switch (ShiftExtend.ShiftType) {
-    case A64SE::UXTW:
-    case A64SE::LSL:
-      OptionHi = 1;
-      break;
-    case A64SE::SXTW:
-    case A64SE::SXTX:
-      OptionHi = 3;
-      break;
-    default:
-      llvm_unreachable("Invalid extend type for register offset");
-    }
-
-    unsigned S = 0;
-    if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
-      S = 1;
-    else if (MemSize != 1 && ShiftExtend.Amount != 0)
-      S = 1;
-
-    Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
-  }
-  void addShiftOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
-  }
-
-  void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-
-    // A bit from each byte in the constant forms the encoded immediate
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
-    uint64_t Value = CE->getValue();
-
-    unsigned Imm = 0;
-    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
-      Imm |= (Value & 1) << i;
-    }
-    Inst.addOperand(MCOperand::CreateImm(Imm));
-  }
-
-  void addVectorListOperands(MCInst &Inst, unsigned N) const {
-    assert(N == 1 && "Invalid number of operands!");
-    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
-  }
-};
-
-} // end anonymous namespace.
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                               StringRef Mnemonic) {
-
-  // See if the operand has a custom parser
-  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
-
-  // It could either succeed, fail or just not care.
-  if (ResTy != MatchOperand_NoMatch)
-    return ResTy;
-
-  switch (getLexer().getKind()) {
-  default:
-    Error(Parser.getTok().getLoc(), "unexpected token in operand");
-    return MatchOperand_ParseFail;
-  case AsmToken::Identifier: {
-    // It might be in the LSL/UXTB family ...
-    OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
-
-    // We can only continue if no tokens were eaten.
-    if (GotShift != MatchOperand_NoMatch)
-      return GotShift;
-
-    // ... or it might be a register ...
-    uint32_t NumLanes = 0;
-    OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
-    assert(GotReg != MatchOperand_ParseFail
-           && "register parsing shouldn't partially succeed");
-
-    if (GotReg == MatchOperand_Success) {
-      if (Parser.getTok().is(AsmToken::LBrac))
-        return ParseNEONLane(Operands, NumLanes);
-      else
-        return MatchOperand_Success;
-    }
-    // ... or it might be a symbolish thing
-  }
-    // Fall through
-  case AsmToken::LParen:  // E.g. (strcmp-4)
-  case AsmToken::Integer: // 1f, 2b labels
-  case AsmToken::String:  // quoted labels
-  case AsmToken::Dot:     // . is Current location
-  case AsmToken::Dollar:  // $ is PC
-  case AsmToken::Colon: {
-    SMLoc StartLoc  = Parser.getTok().getLoc();
-    SMLoc EndLoc;
-    const MCExpr *ImmVal = nullptr;
-
-    if (ParseImmediate(ImmVal) != MatchOperand_Success)
-      return MatchOperand_ParseFail;
-
-    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
-    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
-    return MatchOperand_Success;
-  }
-  case AsmToken::Hash: {   // Immediates
-    SMLoc StartLoc = Parser.getTok().getLoc();
-    SMLoc EndLoc;
-    const MCExpr *ImmVal = nullptr;
-    Parser.Lex();
-
-    if (ParseImmediate(ImmVal) != MatchOperand_Success)
-      return MatchOperand_ParseFail;
-
-    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
-    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
-    return MatchOperand_Success;
-  }
-  case AsmToken::LBrac: {
-    SMLoc Loc = Parser.getTok().getLoc();
-    Operands.push_back(AArch64Operand::CreateToken("[", Loc));
-    Parser.Lex(); // Eat '['
-
-    // There's no comma after a '[', so we can parse the next operand
-    // immediately.
-    return ParseOperand(Operands, Mnemonic);
-  }
-  // The following will likely be useful later, but not in very early cases
-  case AsmToken::LCurly: // SIMD vector list is not parsed here
-    llvm_unreachable("Don't know how to deal with '{' in operand");
-    return MatchOperand_ParseFail;
-  }
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
-  if (getLexer().is(AsmToken::Colon)) {
-    AArch64MCExpr::VariantKind RefKind;
-
-    OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
-    if (ResTy != MatchOperand_Success)
-      return ResTy;
-
-    const MCExpr *SubExprVal;
-    if (getParser().parseExpression(SubExprVal))
-      return MatchOperand_ParseFail;
-
-    ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
-    return MatchOperand_Success;
-  }
-
-  // No weird AArch64MCExpr prefix
-  return getParser().parseExpression(ExprVal)
-    ? MatchOperand_ParseFail : MatchOperand_Success;
-}
-
-// A lane attached to a NEON register. "[N]", which should yield three tokens:
-// '[', N, ']'. A hash is not allowed to precede the immediate here.
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                                uint32_t NumLanes) {
-  SMLoc Loc = Parser.getTok().getLoc();
-
-  assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
-  Operands.push_back(AArch64Operand::CreateToken("[", Loc));
-  Parser.Lex(); // Eat '['
-
-  if (Parser.getTok().isNot(AsmToken::Integer)) {
-    Error(Parser.getTok().getLoc(), "expected lane number");
-    return MatchOperand_ParseFail;
-  }
-
-  if (Parser.getTok().getIntVal() >= NumLanes) {
-    Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
-    return MatchOperand_ParseFail;
-  }
-
-  const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
-                                              getContext());
-  SMLoc S = Parser.getTok().getLoc();
-  Parser.Lex(); // Eat actual lane
-  SMLoc E = Parser.getTok().getLoc();
-  Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
-
-
-  if (Parser.getTok().isNot(AsmToken::RBrac)) {
-    Error(Parser.getTok().getLoc(), "expected ']' after lane");
-    return MatchOperand_ParseFail;
-  }
-
-  Operands.push_back(AArch64Operand::CreateToken("]", Loc));
-  Parser.Lex(); // Eat ']'
-
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
-  assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
-  Parser.Lex();
-
-  if (getLexer().isNot(AsmToken::Identifier)) {
-    Error(Parser.getTok().getLoc(),
-          "expected relocation specifier in operand after ':'");
-    return MatchOperand_ParseFail;
-  }
-
-  std::string LowerCase = Parser.getTok().getIdentifier().lower();
-  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
-    .Case("got",              AArch64MCExpr::VK_AARCH64_GOT)
-    .Case("got_lo12",         AArch64MCExpr::VK_AARCH64_GOT_LO12)
-    .Case("lo12",             AArch64MCExpr::VK_AARCH64_LO12)
-    .Case("abs_g0",           AArch64MCExpr::VK_AARCH64_ABS_G0)
-    .Case("abs_g0_nc",        AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
-    .Case("abs_g1",           AArch64MCExpr::VK_AARCH64_ABS_G1)
-    .Case("abs_g1_nc",        AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
-    .Case("abs_g2",           AArch64MCExpr::VK_AARCH64_ABS_G2)
-    .Case("abs_g2_nc",        AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
-    .Case("abs_g3",           AArch64MCExpr::VK_AARCH64_ABS_G3)
-    .Case("abs_g0_s",         AArch64MCExpr::VK_AARCH64_SABS_G0)
-    .Case("abs_g1_s",         AArch64MCExpr::VK_AARCH64_SABS_G1)
-    .Case("abs_g2_s",         AArch64MCExpr::VK_AARCH64_SABS_G2)
-    .Case("dtprel_g2",        AArch64MCExpr::VK_AARCH64_DTPREL_G2)
-    .Case("dtprel_g1",        AArch64MCExpr::VK_AARCH64_DTPREL_G1)
-    .Case("dtprel_g1_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
-    .Case("dtprel_g0",        AArch64MCExpr::VK_AARCH64_DTPREL_G0)
-    .Case("dtprel_g0_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
-    .Case("dtprel_hi12",      AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
-    .Case("dtprel_lo12",      AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
-    .Case("dtprel_lo12_nc",   AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
-    .Case("gottprel_g1",      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
-    .Case("gottprel_g0_nc",   AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
-    .Case("gottprel",         AArch64MCExpr::VK_AARCH64_GOTTPREL)
-    .Case("gottprel_lo12",    AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
-    .Case("tprel_g2",         AArch64MCExpr::VK_AARCH64_TPREL_G2)
-    .Case("tprel_g1",         AArch64MCExpr::VK_AARCH64_TPREL_G1)
-    .Case("tprel_g1_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
-    .Case("tprel_g0",         AArch64MCExpr::VK_AARCH64_TPREL_G0)
-    .Case("tprel_g0_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
-    .Case("tprel_hi12",       AArch64MCExpr::VK_AARCH64_TPREL_HI12)
-    .Case("tprel_lo12",       AArch64MCExpr::VK_AARCH64_TPREL_LO12)
-    .Case("tprel_lo12_nc",    AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
-    .Case("tlsdesc",          AArch64MCExpr::VK_AARCH64_TLSDESC)
-    .Case("tlsdesc_lo12",     AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
-    .Default(AArch64MCExpr::VK_AARCH64_None);
-
-  if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
-    Error(Parser.getTok().getLoc(),
-          "expected relocation specifier in operand after ':'");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex(); // Eat identifier
-
-  if (getLexer().isNot(AsmToken::Colon)) {
-    Error(Parser.getTok().getLoc(),
-          "expected ':' after relocation specifier");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex();
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseImmWithLSLOperand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-
-  SMLoc S = Parser.getTok().getLoc();
-
-  if (Parser.getTok().is(AsmToken::Hash))
-    Parser.Lex(); // Eat '#'
-  else if (Parser.getTok().isNot(AsmToken::Integer))
-    // Operand should start from # or should be integer, emit error otherwise.
-    return MatchOperand_NoMatch;
-
-  const MCExpr *Imm;
-  if (ParseImmediate(Imm) != MatchOperand_Success)
-    return MatchOperand_ParseFail;
-  else if (Parser.getTok().isNot(AsmToken::Comma)) {
-    SMLoc E = Parser.getTok().getLoc();
-    Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
-    return MatchOperand_Success;
-  }
-
-  // Eat ','
-  Parser.Lex();
-
-  // The optional operand must be "lsl #N" where N is non-negative.
-  if (Parser.getTok().is(AsmToken::Identifier)
-      && Parser.getTok().getIdentifier().equals_lower("lsl")) {
-    Parser.Lex();
-
-    if (Parser.getTok().is(AsmToken::Hash)) {
-      Parser.Lex();
-
-      if (Parser.getTok().isNot(AsmToken::Integer)) {
-        Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
-        return MatchOperand_ParseFail;
-      }
-    }
-  }
-
-  int64_t ShiftAmount = Parser.getTok().getIntVal();
-
-  if (ShiftAmount < 0) {
-    Error(Parser.getTok().getLoc(), "positive shift amount required");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex(); // Eat the number
-
-  SMLoc E = Parser.getTok().getLoc();
-  Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
-                                                      false, S, E));
-  return MatchOperand_Success;
-}
-
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseCondCodeOperand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  if (Parser.getTok().isNot(AsmToken::Identifier))
-    return MatchOperand_NoMatch;
-
-  StringRef Tok = Parser.getTok().getIdentifier();
-  A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
-
-  if (CondCode == A64CC::Invalid)
-    return MatchOperand_NoMatch;
-
-  SMLoc S = Parser.getTok().getLoc();
-  Parser.Lex(); // Eat condition code
-  SMLoc E = Parser.getTok().getLoc();
-
-  Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseCRxOperand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  SMLoc S = Parser.getTok().getLoc();
-  if (Parser.getTok().isNot(AsmToken::Identifier)) {
-    Error(S, "Expected cN operand where 0 <= N <= 15");
-    return MatchOperand_ParseFail;
-  }
-
-  StringRef Tok = Parser.getTok().getIdentifier();
-  if (Tok[0] != 'c' && Tok[0] != 'C') {
-    Error(S, "Expected cN operand where 0 <= N <= 15");
-    return MatchOperand_ParseFail;
-  }
-
-  uint32_t CRNum;
-  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
-  if (BadNum || CRNum > 15) {
-    Error(S, "Expected cN operand where 0 <= N <= 15");
-    return MatchOperand_ParseFail;
-  }
-
-  const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
-
-  Parser.Lex();
-  SMLoc E = Parser.getTok().getLoc();
-
-  Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseFPImmOperand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-
-  SMLoc S = Parser.getTok().getLoc();
-
-  bool Hash = false;
-  if (Parser.getTok().is(AsmToken::Hash)) {
-    Parser.Lex(); // Eat '#'
-    Hash = true;
-  }
-
-  bool Negative = false;
-  if (Parser.getTok().is(AsmToken::Minus)) {
-    Negative = true;
-    Parser.Lex(); // Eat '-'
-  } else if (Parser.getTok().is(AsmToken::Plus)) {
-    Parser.Lex(); // Eat '+'
-  }
-
-  if (Parser.getTok().isNot(AsmToken::Real)) {
-    if (!Hash)
-      return MatchOperand_NoMatch;
-    Error(S, "Expected floating-point immediate");
-    return MatchOperand_ParseFail;
-  }
-
-  APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
-  if (Negative) RealVal.changeSign();
-  double DblVal = RealVal.convertToDouble();
-
-  Parser.Lex(); // Eat real number
-  SMLoc E = Parser.getTok().getLoc();
-
-  Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseFPImm0AndImm0Operand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-
-  SMLoc S = Parser.getTok().getLoc();
-
-  bool Hash = false;
-  if (Parser.getTok().is(AsmToken::Hash)) {
-    Parser.Lex(); // Eat '#'
-    Hash = true;
-  }
-
-  APFloat RealVal(0.0);
-  if (Parser.getTok().is(AsmToken::Real)) {
-    if(Parser.getTok().getString() != "0.0") {
-      Error(S, "only #0.0 is acceptable as immediate");
-      return MatchOperand_ParseFail;
-    }
-  }
-  else if (Parser.getTok().is(AsmToken::Integer)) {
-    if(Parser.getTok().getIntVal() != 0) {
-      Error(S, "only #0.0 is acceptable as immediate");
-      return MatchOperand_ParseFail;
-    }
-  }
-  else {
-    if (!Hash)
-      return MatchOperand_NoMatch;
-    Error(S, "only #0.0 is acceptable as immediate");
-    return MatchOperand_ParseFail;
-  }
-
-  Parser.Lex(); // Eat real number
-  SMLoc E = Parser.getTok().getLoc();
-
-  Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E));
-  return MatchOperand_Success;
-}
-
-// Automatically generated
-static unsigned MatchRegisterName(StringRef Name);
-
-bool
-AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
-                                   StringRef &Layout,
-                                   SMLoc &LayoutLoc) const {
-  const AsmToken &Tok = Parser.getTok();
-
-  if (Tok.isNot(AsmToken::Identifier))
-    return false;
-
-  std::string LowerReg = Tok.getString().lower();
-  size_t DotPos = LowerReg.find('.');
-
-  bool IsVec128 = false;
-  SMLoc S = Tok.getLoc();
-  RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
-
-  if (DotPos == std::string::npos) {
-    Layout = StringRef();
-  } else {
-    // Everything afterwards needs to be a literal token, expected to be
-    // '.2d','.b' etc for vector registers.
-
-    // This StringSwitch validates the input and (perhaps more importantly)
-    // gives us a permanent string to use in the token (a pointer into LowerReg
-    // would go out of scope when we return).
-    LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
-    StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
-
-    // See if it's a 128-bit layout first.
-    Layout = StringSwitch<const char *>(LayoutText)
-      .Case(".q", ".q").Case(".1q", ".1q")
-      .Case(".d", ".d").Case(".2d", ".2d")
-      .Case(".s", ".s").Case(".4s", ".4s")
-      .Case(".h", ".h").Case(".8h", ".8h")
-      .Case(".b", ".b").Case(".16b", ".16b")
-      .Default("");
-
-    if (Layout.size() != 0)
-      IsVec128 = true;
-    else {
-      Layout = StringSwitch<const char *>(LayoutText)
-                   .Case(".1d", ".1d")
-                   .Case(".2s", ".2s")
-                   .Case(".4h", ".4h")
-                   .Case(".8b", ".8b")
-                   .Default("");
-    }
-
-    if (Layout.size() == 0) {
-      // If we've still not pinned it down the register is malformed.
-      return false;
-    }
-  }
-
-  RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
-  if (RegNum == AArch64::NoRegister) {
-    RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
-      .Case("ip0", AArch64::X16)
-      .Case("ip1", AArch64::X17)
-      .Case("fp", AArch64::X29)
-      .Case("lr", AArch64::X30)
-      .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
-      .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
-      .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
-      .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
-      .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
-      .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
-      .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
-      .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
-      .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
-      .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
-      .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
-      .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
-      .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
-      .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
-      .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
-      .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
-      .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
-      .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
-      .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
-      .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
-      .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
-      .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
-      .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
-      .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
-      .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
-      .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
-      .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
-      .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
-      .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
-      .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
-      .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
-      .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
-      .Default(AArch64::NoRegister);
-  }
-  if (RegNum == AArch64::NoRegister)
-    return false;
-
-  return true;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                                uint32_t &NumLanes) {
-  unsigned RegNum;
-  StringRef Layout;
-  SMLoc RegEndLoc, LayoutLoc;
-  SMLoc S = Parser.getTok().getLoc();
-
-  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
-    return MatchOperand_NoMatch;
-
-  Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
-
-  if (Layout.size() != 0) {
-    unsigned long long TmpLanes = 0;
-    llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
-    if (TmpLanes != 0) {
-      NumLanes = TmpLanes;
-    } else {
-      // If the number of lanes isn't specified explicitly, a valid instruction
-      // will have an element specifier and be capable of acting on the entire
-      // vector register.
-      switch (Layout.back()) {
-      default: llvm_unreachable("Invalid layout specifier");
-      case 'b': NumLanes = 16; break;
-      case 'h': NumLanes = 8; break;
-      case 's': NumLanes = 4; break;
-      case 'd': NumLanes = 2; break;
-      case 'q': NumLanes = 1; break;
-      }
-    }
-
-    Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
-  }
-
-  Parser.Lex();
-  return MatchOperand_Success;
-}
-
-bool
-AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
-                                SMLoc &EndLoc) {
-  // This callback is used for things like DWARF frame directives in
-  // assembly. They don't care about things like NEON layouts or lanes, they
-  // just want to be able to produce the DWARF register number.
-  StringRef LayoutSpec;
-  SMLoc RegEndLoc, LayoutLoc;
-  StartLoc = Parser.getTok().getLoc();
-
-  if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
-    return true;
-
-  Parser.Lex();
-  EndLoc = Parser.getTok().getLoc();
-
-  return false;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  // Since these operands occur in very limited circumstances, without
-  // alternatives, we actually signal an error if there is no match. If relaxing
-  // this, beware of unintended consequences: an immediate will be accepted
-  // during matching, no matter how it gets into the AArch64Operand.
-  const AsmToken &Tok = Parser.getTok();
-  SMLoc S = Tok.getLoc();
-
-  if (Tok.is(AsmToken::Identifier)) {
-    bool ValidName;
-    uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
-
-    if (!ValidName) {
-      Error(S, "operand specifier not recognised");
-      return MatchOperand_ParseFail;
-    }
-
-    Parser.Lex(); // We're done with the identifier. Eat it
-
-    SMLoc E = Parser.getTok().getLoc();
-    const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
-    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
-    return MatchOperand_Success;
-  } else if (Tok.is(AsmToken::Hash)) {
-    Parser.Lex();
-
-    const MCExpr *ImmVal;
-    if (ParseImmediate(ImmVal) != MatchOperand_Success)
-      return MatchOperand_ParseFail;
-
-    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
-    if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
-      Error(S, "Invalid immediate for instruction");
-      return MatchOperand_ParseFail;
-    }
-
-    SMLoc E = Parser.getTok().getLoc();
-    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
-    return MatchOperand_Success;
-  }
-
-  Error(S, "unexpected operand for instruction");
-  return MatchOperand_ParseFail;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseSysRegOperand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  const AsmToken &Tok = Parser.getTok();
-
-  // Any MSR/MRS operand will be an identifier, and we want to store it as some
-  // kind of string: SPSel is valid for two different forms of MSR with two
-  // different encodings. There's no collision at the moment, but the potential
-  // is there.
-  if (!Tok.is(AsmToken::Identifier)) {
-    return MatchOperand_NoMatch;
-  }
-
-  SMLoc S = Tok.getLoc();
-  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
-  Parser.Lex(); // Eat identifier
-
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseLSXAddressOperand(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  SMLoc S = Parser.getTok().getLoc();
-
-  unsigned RegNum;
-  SMLoc RegEndLoc, LayoutLoc;
-  StringRef Layout;
-  if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
-     || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
-     || Layout.size() != 0) {
-    // Check Layout.size because we don't want to let "x3.4s" or similar
-    // through.
-    return MatchOperand_NoMatch;
-  }
-  Parser.Lex(); // Eat register
-
-  if (Parser.getTok().is(AsmToken::RBrac)) {
-    // We're done
-    SMLoc E = Parser.getTok().getLoc();
-    Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
-    return MatchOperand_Success;
-  }
-
-  // Otherwise, only ", #0" is valid
-
-  if (Parser.getTok().isNot(AsmToken::Comma)) {
-    Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex(); // Eat ','
-
-  if (Parser.getTok().isNot(AsmToken::Hash)) {
-    Error(Parser.getTok().getLoc(), "expected '#0'");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex(); // Eat '#'
-
-  if (Parser.getTok().isNot(AsmToken::Integer)
-      || Parser.getTok().getIntVal() != 0 ) {
-    Error(Parser.getTok().getLoc(), "expected '#0'");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex(); // Eat '0'
-
-  SMLoc E = Parser.getTok().getLoc();
-  Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
-  return MatchOperand_Success;
-}
-
-AArch64AsmParser::OperandMatchResultTy
-AArch64AsmParser::ParseShiftExtend(
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  StringRef IDVal = Parser.getTok().getIdentifier();
-  std::string LowerID = IDVal.lower();
-
-  A64SE::ShiftExtSpecifiers Spec =
-      StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
-        .Case("lsl", A64SE::LSL)
-	.Case("msl", A64SE::MSL)
-	.Case("lsr", A64SE::LSR)
-	.Case("asr", A64SE::ASR)
-	.Case("ror", A64SE::ROR)
-	.Case("uxtb", A64SE::UXTB)
-	.Case("uxth", A64SE::UXTH)
-	.Case("uxtw", A64SE::UXTW)
-	.Case("uxtx", A64SE::UXTX)
-	.Case("sxtb", A64SE::SXTB)
-	.Case("sxth", A64SE::SXTH)
-	.Case("sxtw", A64SE::SXTW)
-	.Case("sxtx", A64SE::SXTX)
-	.Default(A64SE::Invalid);
-
-  if (Spec == A64SE::Invalid)
-    return MatchOperand_NoMatch;
-
-  // Eat the shift
-  SMLoc S, E;
-  S = Parser.getTok().getLoc();
-  Parser.Lex();
-
-  if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
-      Spec != A64SE::ROR && Spec != A64SE::MSL) {
-    // The shift amount can be omitted for the extending versions, but not real
-    // shifts:
-    //     add x0, x0, x0, uxtb
-    // is valid, and equivalent to
-    //     add x0, x0, x0, uxtb #0
-
-    if (Parser.getTok().is(AsmToken::Comma) ||
-        Parser.getTok().is(AsmToken::EndOfStatement) ||
-        Parser.getTok().is(AsmToken::RBrac)) {
-      Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
-                                                           S, E));
-      return MatchOperand_Success;
-    }
-  }
-
-  // Eat # at beginning of immediate
-  if (!Parser.getTok().is(AsmToken::Hash)) {
-    Error(Parser.getTok().getLoc(),
-          "expected #imm after shift specifier");
-    return MatchOperand_ParseFail;
-  }
-  Parser.Lex();
-
-  // Make sure we do actually have a number
-  if (!Parser.getTok().is(AsmToken::Integer)) {
-    Error(Parser.getTok().getLoc(),
-          "expected integer shift amount");
-    return MatchOperand_ParseFail;
-  }
-  unsigned Amount = Parser.getTok().getIntVal();
-  Parser.Lex();
-  E = Parser.getTok().getLoc();
-
-  Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
-                                                       S, E));
-
-  return MatchOperand_Success;
-}
-
-/// Try to parse a vector register token, If it is a vector register,
-/// the token is eaten and return true. Otherwise return false.
-bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
-                                      StringRef &Layout, SMLoc &LayoutLoc) {
-  bool IsVector = true;
-
-  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
-    IsVector = false;
-  else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
-                .contains(RegNum) &&
-           !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
-                .contains(RegNum))
-    IsVector = false;
-  else if (Layout.size() == 0)
-    IsVector = false;
-
-  if (!IsVector)
-    Error(Parser.getTok().getLoc(), "expected vector type register");
-
-  Parser.Lex(); // Eat this token.
-  return IsVector;
-}
-
-
-// A vector list contains 1-4 consecutive registers.
-// Now there are two kinds of vector list when number of vector > 1:
-//   (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
-//   (2) {Vn.layout - Vm.layout}
-// If the layout is like .b/.h/.s/.d, also parse the lane.
-AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
-    SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
-  if (Parser.getTok().isNot(AsmToken::LCurly)) {
-    Error(Parser.getTok().getLoc(), "'{' expected");
-    return MatchOperand_ParseFail;
-  }
-  SMLoc SLoc = Parser.getTok().getLoc();
-  Parser.Lex(); // Eat '{' token.
-
-  unsigned Reg, Count = 1;
-  StringRef LayoutStr;
-  SMLoc RegEndLoc, LayoutLoc;
-  if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
-    return MatchOperand_ParseFail;
-
-  if (Parser.getTok().is(AsmToken::Minus)) {
-    Parser.Lex(); // Eat the minus.
-
-    unsigned Reg2;
-    StringRef LayoutStr2;
-    SMLoc RegEndLoc2, LayoutLoc2;
-    SMLoc RegLoc2 = Parser.getTok().getLoc();
-
-    if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
-      return MatchOperand_ParseFail;
-    unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
-
-    if (LayoutStr != LayoutStr2) {
-      Error(LayoutLoc2, "expected the same vector layout");
-      return MatchOperand_ParseFail;
-    }
-    if (Space == 0 || Space > 3) {
-      Error(RegLoc2, "invalid number of vectors");
-      return MatchOperand_ParseFail;
-    }
-
-    Count += Space;
-  } else {
-    unsigned LastReg = Reg;
-    while (Parser.getTok().is(AsmToken::Comma)) {
-      Parser.Lex(); // Eat the comma.
-      unsigned Reg2;
-      StringRef LayoutStr2;
-      SMLoc RegEndLoc2, LayoutLoc2;
-      SMLoc RegLoc2 = Parser.getTok().getLoc();
-
-      if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
-        return MatchOperand_ParseFail;
-      unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
-                                        : (Reg2 + 32 - LastReg);
-      Count++;
-
-      // The space between two vectors should be 1. And they should have the same layout.
-      // Total count shouldn't be great than 4
-      if (Space != 1) {
-        Error(RegLoc2, "invalid space between two vectors");
-        return MatchOperand_ParseFail;
-      }
-      if (LayoutStr != LayoutStr2) {
-        Error(LayoutLoc2, "expected the same vector layout");
-        return MatchOperand_ParseFail;
-      }
-      if (Count > 4) {
-        Error(RegLoc2, "invalid number of vectors");
-        return MatchOperand_ParseFail;
-      }
-
-      LastReg = Reg2;
-    }
-  }
-
-  if (Parser.getTok().isNot(AsmToken::RCurly)) {
-    Error(Parser.getTok().getLoc(), "'}' expected");
-    return MatchOperand_ParseFail;
-  }
-  SMLoc ELoc = Parser.getTok().getLoc();
-  Parser.Lex(); // Eat '}' token.
-
-  A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
-  if (Count > 1) { // If count > 1, create vector list using super register.
-    bool IsVec64 = (Layout < A64Layout::VL_16B);
-    static unsigned SupRegIDs[3][2] = {
-      { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
-      { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
-      { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
-    };
-    unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
-    unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
-    const MCRegisterInfo *MRI = getContext().getRegisterInfo();
-    Reg = MRI->getMatchingSuperReg(Reg, Sub0,
-                                   &AArch64MCRegisterClasses[SupRegID]);
-  }
-  Operands.push_back(
-      AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
-
-  if (Parser.getTok().is(AsmToken::LBrac)) {
-    uint32_t NumLanes = 0;
-    switch(Layout) {
-    case A64Layout::VL_B : NumLanes = 16; break;
-    case A64Layout::VL_H : NumLanes = 8; break;
-    case A64Layout::VL_S : NumLanes = 4; break;
-    case A64Layout::VL_D : NumLanes = 2; break;
-    default:
-      SMLoc Loc = getLexer().getLoc();
-      Error(Loc, "expected comma before next operand");
-      return MatchOperand_ParseFail;
-    }
-    return ParseNEONLane(Operands, NumLanes);
-  } else {
-    return MatchOperand_Success;
-  }
-}
-
-// FIXME: We would really like to be able to tablegen'erate this.
-bool AArch64AsmParser::
-validateInstruction(MCInst &Inst,
-                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  switch (Inst.getOpcode()) {
-  case AArch64::BFIwwii:
-  case AArch64::BFIxxii:
-  case AArch64::SBFIZwwii:
-  case AArch64::SBFIZxxii:
-  case AArch64::UBFIZwwii:
-  case AArch64::UBFIZxxii:  {
-    unsigned ImmOps = Inst.getNumOperands() - 2;
-    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
-    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
-
-    if (ImmR != 0 && ImmS >= ImmR) {
-      return Error(Operands[4]->getStartLoc(),
-                   "requested insert overflows register");
-    }
-    return false;
-  }
-  case AArch64::BFXILwwii:
-  case AArch64::BFXILxxii:
-  case AArch64::SBFXwwii:
-  case AArch64::SBFXxxii:
-  case AArch64::UBFXwwii:
-  case AArch64::UBFXxxii: {
-    unsigned ImmOps = Inst.getNumOperands() - 2;
-    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
-    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
-    int64_t RegWidth = 0;
-    switch (Inst.getOpcode()) {
-    case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
-      RegWidth = 64;
-      break;
-    case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
-      RegWidth = 32;
-      break;
-    }
-
-    if (ImmS >= RegWidth || ImmS < ImmR) {
-      return Error(Operands[4]->getStartLoc(),
-                   "requested extract overflows register");
-    }
-    return false;
-  }
-  case AArch64::ICix: {
-    int64_t ImmVal = Inst.getOperand(0).getImm();
-    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
-    if (!A64IC::NeedsRegister(ICOp)) {
-      return Error(Operands[1]->getStartLoc(),
-                   "specified IC op does not use a register");
-    }
-    return false;
-  }
-  case AArch64::ICi: {
-    int64_t ImmVal = Inst.getOperand(0).getImm();
-    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
-    if (A64IC::NeedsRegister(ICOp)) {
-      return Error(Operands[1]->getStartLoc(),
-                   "specified IC op requires a register");
-    }
-    return false;
-  }
-  case AArch64::TLBIix: {
-    int64_t ImmVal = Inst.getOperand(0).getImm();
-    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
-    if (!A64TLBI::NeedsRegister(TLBIOp)) {
-      return Error(Operands[1]->getStartLoc(),
-                   "specified TLBI op does not use a register");
-    }
-    return false;
-  }
-  case AArch64::TLBIi: {
-    int64_t ImmVal = Inst.getOperand(0).getImm();
-    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
-    if (A64TLBI::NeedsRegister(TLBIOp)) {
-      return Error(Operands[1]->getStartLoc(),
-                   "specified TLBI op requires a register");
-    }
-    return false;
-  }
-  }
-
-  return false;
-}
-
-
-// Parses the instruction *together with* all operands, appending each parsed
-// operand to the "Operands" list
-bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
-                                        StringRef Name, SMLoc NameLoc,
-                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
-  StringRef PatchedName = StringSwitch<StringRef>(Name.lower())
-    .Case("beq", "b.eq")
-    .Case("bne", "b.ne")
-    .Case("bhs", "b.hs")
-    .Case("bcs", "b.cs")
-    .Case("blo", "b.lo")
-    .Case("bcc", "b.cc")
-    .Case("bmi", "b.mi")
-    .Case("bpl", "b.pl")
-    .Case("bvs", "b.vs")
-    .Case("bvc", "b.vc")
-    .Case("bhi", "b.hi")
-    .Case("bls", "b.ls")
-    .Case("bge", "b.ge")
-    .Case("blt", "b.lt")
-    .Case("bgt", "b.gt")
-    .Case("ble", "b.le")
-    .Case("bal", "b.al")
-    .Case("bnv", "b.nv")
-    .Default(Name);
-
-  size_t CondCodePos = PatchedName.find('.');
-
-  StringRef Mnemonic = PatchedName.substr(0, CondCodePos);
-  Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
-
-  if (CondCodePos != StringRef::npos) {
-    // We have a condition code
-    SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
-    StringRef CondStr = PatchedName.substr(CondCodePos + 1, StringRef::npos);
-    A64CC::CondCodes Code;
-
-    Code = A64StringToCondCode(CondStr);
-
-    if (Code == A64CC::Invalid) {
-      Error(S, "invalid condition code");
-      Parser.eatToEndOfStatement();
-      return true;
-    }
-
-    SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
-
-    Operands.push_back(AArch64Operand::CreateToken(".",  DotL));
-    SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
-    Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
-  }
-
-  // Now we parse the operands of this instruction
-  if (getLexer().isNot(AsmToken::EndOfStatement)) {
-    // Read the first operand.
-    if (ParseOperand(Operands, Mnemonic)) {
-      Parser.eatToEndOfStatement();
-      return true;
-    }
-
-    while (getLexer().is(AsmToken::Comma)) {
-      Parser.Lex();  // Eat the comma.
-
-      // Parse and remember the operand.
-      if (ParseOperand(Operands, Mnemonic)) {
-        Parser.eatToEndOfStatement();
-        return true;
-      }
-
-
-      // After successfully parsing some operands there are two special cases to
-      // consider (i.e. notional operands not separated by commas). Both are due
-      // to memory specifiers:
-      //  + An RBrac will end an address for load/store/prefetch
-      //  + An '!' will indicate a pre-indexed operation.
-      //
-      // It's someone else's responsibility to make sure these tokens are sane
-      // in the given context!
-      if (Parser.getTok().is(AsmToken::RBrac)) {
-        SMLoc Loc = Parser.getTok().getLoc();
-        Operands.push_back(AArch64Operand::CreateToken("]", Loc));
-        Parser.Lex();
-      }
-
-      if (Parser.getTok().is(AsmToken::Exclaim)) {
-        SMLoc Loc = Parser.getTok().getLoc();
-        Operands.push_back(AArch64Operand::CreateToken("!", Loc));
-        Parser.Lex();
-      }
-    }
-  }
-
-  if (getLexer().isNot(AsmToken::EndOfStatement)) {
-    SMLoc Loc = getLexer().getLoc();
-    Parser.eatToEndOfStatement();
-    return Error(Loc, "expected comma before next operand");
-  }
-
-  // Eat the EndOfStatement
-  Parser.Lex();
-
-  return false;
-}
-
-bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
-  StringRef IDVal = DirectiveID.getIdentifier();
-  if (IDVal == ".hword")
-    return ParseDirectiveWord(2, DirectiveID.getLoc());
-  else if (IDVal == ".word")
-    return ParseDirectiveWord(4, DirectiveID.getLoc());
-  else if (IDVal == ".xword")
-    return ParseDirectiveWord(8, DirectiveID.getLoc());
-  else if (IDVal == ".tlsdesccall")
-    return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
-
-  return true;
-}
-
-/// parseDirectiveWord
-///  ::= .word [ expression (, expression)* ]
-bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
-  if (getLexer().isNot(AsmToken::EndOfStatement)) {
-    for (;;) {
-      const MCExpr *Value;
-      if (getParser().parseExpression(Value))
-        return false;
-
-      getParser().getStreamer().EmitValue(Value, Size);
-
-      if (getLexer().is(AsmToken::EndOfStatement))
-        break;
-
-      // FIXME: Improve diagnostic.
-      if (getLexer().isNot(AsmToken::Comma)) {
-        Error(L, "unexpected token in directive");
-        return false;
-      }
-      Parser.Lex();
-    }
-  }
-
-  Parser.Lex();
-  return false;
-}
-
-// parseDirectiveTLSDescCall:
-//   ::= .tlsdesccall symbol
-bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
-  StringRef Name;
-  if (getParser().parseIdentifier(Name)) {
-    Error(L, "expected symbol after directive");
-    return false;
-  }
-
-  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
-  const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
-
-  MCInst Inst;
-  Inst.setOpcode(AArch64::TLSDESCCALL);
-  Inst.addOperand(MCOperand::CreateExpr(Expr));
-
-  getParser().getStreamer().EmitInstruction(Inst, STI);
-  return false;
-}
-
-
-bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
-                                 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                                 MCStreamer &Out, unsigned &ErrorInfo,
-                                 bool MatchingInlineAsm) {
-  MCInst Inst;
-  unsigned MatchResult;
-  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
-                                     MatchingInlineAsm);
-
-  if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
-    return Error(IDLoc, "too few operands for instruction");
-
-  switch (MatchResult) {
-  default: break;
-  case Match_Success:
-    if (validateInstruction(Inst, Operands))
-      return true;
-
-    Out.EmitInstruction(Inst, STI);
-    return false;
-  case Match_MissingFeature:
-    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
-    return true;
-  case Match_InvalidOperand: {
-    SMLoc ErrorLoc = IDLoc;
-    if (ErrorInfo != ~0U) {
-      ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
-      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
-    }
-
-    return Error(ErrorLoc, "invalid operand for instruction");
-  }
-  case Match_MnemonicFail:
-    return Error(IDLoc, "invalid instruction");
-
-  case Match_AddSubRegExtendSmall:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
-  case Match_AddSubRegExtendLarge:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
-  case Match_AddSubRegShift32:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
-  case Match_AddSubRegShift64:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
-  case Match_AddSubSecondSource:
-      return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-          "expected compatible register, symbol or integer in range [0, 4095]");
-  case Match_CVTFixedPos32:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [1, 32]");
-  case Match_CVTFixedPos64:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [1, 64]");
-  case Match_CondCode:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected AArch64 condition code");
-  case Match_FPImm:
-    // Any situation which allows a nontrivial floating-point constant also
-    // allows a register.
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected compatible register or floating-point constant");
-  case Match_FPZero:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected floating-point constant #0.0 or invalid register type");
-  case Match_Label:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected label or encodable integer pc offset");
-  case Match_Lane1:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected lane specifier '[1]'");
-  case Match_LoadStoreExtend32_1:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
-  case Match_LoadStoreExtend32_2:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
-  case Match_LoadStoreExtend32_4:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
-  case Match_LoadStoreExtend32_8:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
-  case Match_LoadStoreExtend32_16:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
-  case Match_LoadStoreExtend64_1:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'lsl' or 'sxtx' with optional shift of #0");
-  case Match_LoadStoreExtend64_2:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
-  case Match_LoadStoreExtend64_4:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
-  case Match_LoadStoreExtend64_8:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
-  case Match_LoadStoreExtend64_16:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
-  case Match_LoadStoreSImm7_4:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer multiple of 4 in range [-256, 252]");
-  case Match_LoadStoreSImm7_8:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer multiple of 8 in range [-512, 504]");
-  case Match_LoadStoreSImm7_16:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer multiple of 16 in range [-1024, 1008]");
-  case Match_LoadStoreSImm9:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [-256, 255]");
-  case Match_LoadStoreUImm12_1:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected symbolic reference or integer in range [0, 4095]");
-  case Match_LoadStoreUImm12_2:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected symbolic reference or integer in range [0, 8190]");
-  case Match_LoadStoreUImm12_4:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected symbolic reference or integer in range [0, 16380]");
-  case Match_LoadStoreUImm12_8:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected symbolic reference or integer in range [0, 32760]");
-  case Match_LoadStoreUImm12_16:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected symbolic reference or integer in range [0, 65520]");
-  case Match_LogicalSecondSource:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected compatible register or logical immediate");
-  case Match_MOVWUImm16:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected relocated symbol or integer in range [0, 65535]");
-  case Match_MRS:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected readable system register");
-  case Match_MSR:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected writable system register or pstate");
-  case Match_NamedImm_at:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
-  case Match_NamedImm_dbarrier:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-             "expected integer in range [0, 15] or symbolic barrier operand");
-  case Match_NamedImm_dc:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected symbolic 'dc' operand");
-  case Match_NamedImm_ic:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
-  case Match_NamedImm_isb:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 15] or 'sy'");
-  case Match_NamedImm_prefetch:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
-  case Match_NamedImm_tlbi:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected translation buffer invalidation operand");
-  case Match_UImm16:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 65535]");
-  case Match_UImm3:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 7]");
-  case Match_UImm4:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 15]");
-  case Match_UImm5:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 31]");
-  case Match_UImm6:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 63]");
-  case Match_UImm7:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 127]");
-  case Match_Width32:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [<lsb>, 31]");
-  case Match_Width64:
-    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [<lsb>, 63]");
-  case Match_ShrImm8:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [1, 8]");
-  case Match_ShrImm16:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [1, 16]");
-  case Match_ShrImm32:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [1, 32]");
-  case Match_ShrImm64:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [1, 64]");
-  case Match_ShlImm8:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 7]");
-  case Match_ShlImm16:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 15]");
-  case Match_ShlImm32:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 31]");
-  case Match_ShlImm64:
-    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
-                 "expected integer in range [0, 63]");
-  }
-
-  llvm_unreachable("Implement any new match types added!");
-  return true;
-}
-
-void AArch64Operand::print(raw_ostream &OS) const {
-  switch (Kind) {
-  case k_CondCode:
-    OS << "<CondCode: " << CondCode.Code << ">";
-    break;
-  case k_FPImmediate:
-    OS << "<fpimm: " << FPImm.Val << ">";
-    break;
-  case k_ImmWithLSL:
-    OS << "<immwithlsl: imm=" << ImmWithLSL.Val
-       << ", shift=" << ImmWithLSL.ShiftAmount << ">";
-    break;
-  case k_Immediate:
-    getImm()->print(OS);
-    break;
-  case k_Register:
-    OS << "<register " << getReg() << '>';
-    break;
-  case k_Token:
-    OS << '\'' << getToken() << '\'';
-    break;
-  case k_ShiftExtend:
-    OS << "<shift: type=" << ShiftExtend.ShiftType
-       << ", amount=" << ShiftExtend.Amount << ">";
-    break;
-  case k_SysReg: {
-    StringRef Name(SysReg.Data, SysReg.Length);
-    OS << "<sysreg: " << Name << '>';
-    break;
-  }
-  default:
-    llvm_unreachable("No idea how to print this kind of operand");
-    break;
-  }
-}
-
-void AArch64Operand::dump() const {
-  print(errs());
-}
-
-
-/// Force static initialization.
-extern "C" void LLVMInitializeAArch64AsmParser() {
-  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
-  RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
-}
-
-#define GET_REGISTER_MATCHER
-#define GET_MATCHER_IMPLEMENTATION
-#include "AArch64GenAsmMatcher.inc"

Removed: llvm/trunk/lib/Target/AArch64/AsmParser/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AsmParser/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AsmParser/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/AsmParser/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMAArch64AsmParser
-  AArch64AsmParser.cpp
-  )

Removed: llvm/trunk/lib/Target/AArch64/AsmParser/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AsmParser/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AsmParser/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/AsmParser/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/AArch64/AsmParser/LLVMBuild.txt -------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = AArch64AsmParser
-parent = AArch64
-required_libraries = AArch64Desc AArch64Info AArch64Utils MC MCParser Support
-add_to_library_groups = AArch64

Removed: llvm/trunk/lib/Target/AArch64/AsmParser/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AsmParser/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AsmParser/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/AsmParser/Makefile (removed)
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/AsmParser/Makefile ---------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64AsmParser
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/AArch64/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/CMakeLists.txt (removed)
@@ -1,37 +0,0 @@
-set(LLVM_TARGET_DEFINITIONS AArch64.td)
-
-tablegen(LLVM AArch64GenAsmMatcher.inc -gen-asm-matcher)
-tablegen(LLVM AArch64GenAsmWriter.inc -gen-asm-writer)
-tablegen(LLVM AArch64GenCallingConv.inc -gen-callingconv)
-tablegen(LLVM AArch64GenDisassemblerTables.inc -gen-disassembler)
-tablegen(LLVM AArch64GenInstrInfo.inc -gen-instr-info)
-tablegen(LLVM AArch64GenMCCodeEmitter.inc -gen-emitter -mc-emitter)
-tablegen(LLVM AArch64GenMCPseudoLowering.inc -gen-pseudo-lowering)
-tablegen(LLVM AArch64GenRegisterInfo.inc -gen-register-info)
-tablegen(LLVM AArch64GenDAGISel.inc -gen-dag-isel)
-tablegen(LLVM AArch64GenSubtargetInfo.inc -gen-subtarget)
-add_public_tablegen_target(AArch64CommonTableGen)
-
-add_llvm_target(AArch64CodeGen
-  AArch64AsmPrinter.cpp
-  AArch64BranchFixupPass.cpp
-  AArch64FrameLowering.cpp
-  AArch64ISelDAGToDAG.cpp
-  AArch64ISelLowering.cpp
-  AArch64InstrInfo.cpp
-  AArch64MachineFunctionInfo.cpp
-  AArch64MCInstLower.cpp
-  AArch64RegisterInfo.cpp
-  AArch64SelectionDAGInfo.cpp
-  AArch64Subtarget.cpp
-  AArch64TargetMachine.cpp
-  AArch64TargetObjectFile.cpp
-  AArch64TargetTransformInfo.cpp
-  )
-
-add_subdirectory(AsmParser)
-add_subdirectory(Disassembler)
-add_subdirectory(InstPrinter)
-add_subdirectory(MCTargetDesc)
-add_subdirectory(TargetInfo)
-add_subdirectory(Utils)

Removed: llvm/trunk/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp (removed)
@@ -1,1572 +0,0 @@
-//===- AArch64Disassembler.cpp - Disassembler for AArch64 ISA -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the functions necessary to decode AArch64 instruction
-// bitpatterns into MCInsts (with the help of TableGenerated information from
-// the instruction definitions).
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "AArch64RegisterInfo.h"
-#include "AArch64Subtarget.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCFixedLenDisassembler.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrDesc.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryObject.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "arm-disassembler"
-
-typedef MCDisassembler::DecodeStatus DecodeStatus;
-
-namespace {
-/// AArch64 disassembler for all AArch64 platforms.
-class AArch64Disassembler : public MCDisassembler {
-public:
-  /// Initializes the disassembler.
-  ///
-  AArch64Disassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
-    : MCDisassembler(STI, Ctx) {
-  }
-
-  ~AArch64Disassembler() {}
-
-  /// See MCDisassembler.
-  DecodeStatus getInstruction(MCInst &instr,
-                              uint64_t &size,
-                              const MemoryObject &region,
-                              uint64_t address,
-                              raw_ostream &vStream,
-                              raw_ostream &cStream) const override;
-};
-
-}
-
-// Forward-declarations used in the auto-generated files.
-static DecodeStatus DecodeGPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus
-DecodeGPR64xspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder);
-
-static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus
-DecodeGPR32wspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder);
-
-static DecodeStatus DecodeFPR8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR16RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR64LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeFPR128RegisterClass(llvm::MCInst &Inst,
-                                              unsigned RegNo, uint64_t Address,
-                                              const void *Decoder);
-static DecodeStatus DecodeFPR128LoRegisterClass(llvm::MCInst &Inst,
-                                                unsigned RegNo, uint64_t Address,
-                                                const void *Decoder);
-
-static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
-                                                  unsigned RegNo,
-                                                  uint64_t Address,
-                                                  const void *Decoder);
-
-static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder);
-static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder);
-static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst,
-                                               unsigned RegNo, uint64_t Address,
-                                               const void *Decoder);
-static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst,
-                                               unsigned RegNo, uint64_t Address,
-                                               const void *Decoder);
-static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder);
-static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder);
-
-static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
-                                               unsigned OptionHiS,
-                                               uint64_t Address,
-                                               const void *Decoder);
-
-
-static DecodeStatus DecodeBitfield32ImmOperand(llvm::MCInst &Inst,
-                                               unsigned Imm6Bits,
-                                               uint64_t Address,
-                                               const void *Decoder);
-
-static DecodeStatus DecodeCVT32FixedPosOperand(llvm::MCInst &Inst,
-                                               unsigned Imm6Bits,
-                                               uint64_t Address,
-                                               const void *Decoder);
-
-static DecodeStatus DecodeFPZeroOperand(llvm::MCInst &Inst,
-                                        unsigned RmBits,
-                                        uint64_t Address,
-                                        const void *Decoder);
-
-static DecodeStatus DecodeShiftRightImm8(MCInst &Inst, unsigned Val,
-                                         uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftRightImm16(MCInst &Inst, unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder);
-static DecodeStatus DecodeShiftRightImm32(MCInst &Inst, unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder);
-static DecodeStatus DecodeShiftRightImm64(MCInst &Inst, unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder);
-
-static DecodeStatus DecodeShiftLeftImm8(MCInst &Inst, unsigned Val,
-                                        uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftLeftImm16(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder);
-static DecodeStatus DecodeShiftLeftImm32(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder);
-static DecodeStatus DecodeShiftLeftImm64(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder);
-
-template<int RegWidth>
-static DecodeStatus DecodeMoveWideImmOperand(llvm::MCInst &Inst,
-                                             unsigned FullImm,
-                                             uint64_t Address,
-                                             const void *Decoder);
-
-template<int RegWidth>
-static DecodeStatus DecodeLogicalImmOperand(llvm::MCInst &Inst,
-                                            unsigned Bits,
-                                            uint64_t Address,
-                                            const void *Decoder);
-
-static DecodeStatus DecodeRegExtendOperand(llvm::MCInst &Inst,
-                                           unsigned ShiftAmount,
-                                           uint64_t Address,
-                                           const void *Decoder);
-template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
-static DecodeStatus
-DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
-                             uint64_t Address, const void *Decoder);
-
-static DecodeStatus Decode32BitShiftOperand(llvm::MCInst &Inst,
-                                            unsigned ShiftAmount,
-                                            uint64_t Address,
-                                            const void *Decoder);
-static DecodeStatus DecodeBitfieldInstruction(llvm::MCInst &Inst, unsigned Insn,
-                                              uint64_t Address,
-                                              const void *Decoder);
-
-static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
-                                              uint64_t Address,
-                                              const void *Decoder);
-
-static DecodeStatus DecodeLDSTPairInstruction(llvm::MCInst &Inst,
-                                              unsigned Insn,
-                                              uint64_t Address,
-                                              const void *Decoder);
-
-static DecodeStatus DecodeLoadPairExclusiveInstruction(llvm::MCInst &Inst,
-                                                       unsigned Val,
-                                                       uint64_t Address,
-                                                       const void *Decoder);
-
-template<typename SomeNamedImmMapper>
-static DecodeStatus DecodeNamedImmOperand(llvm::MCInst &Inst,
-                                          unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder);
-
-static DecodeStatus
-DecodeSysRegOperand(const A64SysReg::SysRegMapper &InstMapper,
-                    llvm::MCInst &Inst, unsigned Val,
-                    uint64_t Address, const void *Decoder);
-
-static DecodeStatus DecodeMRSOperand(llvm::MCInst &Inst,
-                                     unsigned Val,
-                                     uint64_t Address,
-                                     const void *Decoder);
-
-static DecodeStatus DecodeMSROperand(llvm::MCInst &Inst,
-                                     unsigned Val,
-                                     uint64_t Address,
-                                     const void *Decoder);
-
-
-static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst,
-                                                   unsigned Val,
-                                                   uint64_t Address,
-                                                   const void *Decoder);
-
-static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Val,
-                                               uint64_t Address,
-                                               const void *Decoder);
-
-static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
-                                                   uint64_t Address,
-                                                   const void *Decoder);
-
-static DecodeStatus DecodeSHLLInstruction(MCInst &Inst, unsigned Insn,
-                                          uint64_t Address,
-                                          const void *Decoder);
-
-static bool Check(DecodeStatus &Out, DecodeStatus In);
-
-#include "AArch64GenDisassemblerTables.inc"
-
-static bool Check(DecodeStatus &Out, DecodeStatus In) {
-  switch (In) {
-    case MCDisassembler::Success:
-      // Out stays the same.
-      return true;
-    case MCDisassembler::SoftFail:
-      Out = In;
-      return true;
-    case MCDisassembler::Fail:
-      Out = In;
-      return false;
-  }
-  llvm_unreachable("Invalid DecodeStatus!");
-}
-
-DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
-                                                 const MemoryObject &Region,
-                                                 uint64_t Address,
-                                                 raw_ostream &os,
-                                                 raw_ostream &cs) const {
-  CommentStream = &cs;
-
-  uint8_t bytes[4];
-
-  // We want to read exactly 4 bytes of data.
-  if (Region.readBytes(Address, 4, bytes) == -1) {
-    Size = 0;
-    return MCDisassembler::Fail;
-  }
-
-  // Encoded as a small-endian 32-bit word in the stream.
-  uint32_t insn = (bytes[3] << 24) |
-    (bytes[2] << 16) |
-    (bytes[1] <<  8) |
-    (bytes[0] <<  0);
-
-  // Calling the auto-generated decoder function.
-  DecodeStatus result = decodeInstruction(DecoderTableA6432, MI, insn, Address,
-                                          this, STI);
-  if (result != MCDisassembler::Fail) {
-    Size = 4;
-    return result;
-  }
-
-  MI.clear();
-  Size = 0;
-  return MCDisassembler::Fail;
-}
-
-static unsigned getReg(const void *D, unsigned RC, unsigned RegNo) {
-  const AArch64Disassembler *Dis = static_cast<const AArch64Disassembler*>(D);
-  const MCRegisterInfo *RegInfo = Dis->getContext().getRegisterInfo();
-  return RegInfo->getRegClass(RC).getRegister(RegNo);
-}
-
-static DecodeStatus DecodeGPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                        uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::GPR64RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeGPR64xspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::GPR64xspRegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeGPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::GPR32RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeGPR32wspRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::GPR32wspRegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeFPR8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::FPR8RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeFPR16RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::FPR16RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-
-static DecodeStatus
-DecodeFPR32RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::FPR32RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeFPR64RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::FPR64RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeFPR64LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 15)
-    return MCDisassembler::Fail;
-
-  return DecodeFPR64RegisterClass(Inst, RegNo, Address, Decoder);
-}
-
-static DecodeStatus
-DecodeFPR128RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::FPR128RegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus
-DecodeFPR128LoRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                            uint64_t Address, const void *Decoder) {
-  if (RegNo > 15)
-    return MCDisassembler::Fail;
-
-  return DecodeFPR128RegisterClass(Inst, RegNo, Address, Decoder);
-}
-
-static DecodeStatus DecodeGPR64noxzrRegisterClass(llvm::MCInst &Inst,
-                                                  unsigned RegNo,
-                                                  uint64_t Address,
-                                                  const void *Decoder) {
-  if (RegNo > 30)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, AArch64::GPR64noxzrRegClassID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeRegisterClassByID(llvm::MCInst &Inst, unsigned RegNo,
-                                            unsigned RegID,
-                                            const void *Decoder) {
-  if (RegNo > 31)
-    return MCDisassembler::Fail;
-
-  uint16_t Register = getReg(Decoder, RegID, RegNo);
-  Inst.addOperand(MCOperand::CreateReg(Register));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder) {
-  return DecodeRegisterClassByID(Inst, RegNo, AArch64::DPairRegClassID,
-                                 Decoder);
-}
-
-static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder) {
-  return DecodeRegisterClassByID(Inst, RegNo, AArch64::QPairRegClassID,
-                                 Decoder);
-}
-
-static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst,
-                                               unsigned RegNo, uint64_t Address,
-                                               const void *Decoder) {
-  return DecodeRegisterClassByID(Inst, RegNo, AArch64::DTripleRegClassID,
-                                 Decoder);
-}
-
-static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst,
-                                               unsigned RegNo, uint64_t Address,
-                                               const void *Decoder) {
-  return DecodeRegisterClassByID(Inst, RegNo, AArch64::QTripleRegClassID,
-                                 Decoder);
-}
-
-static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder) {
-  return DecodeRegisterClassByID(Inst, RegNo, AArch64::DQuadRegClassID,
-                                 Decoder);
-}
-
-static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
-                                             uint64_t Address,
-                                             const void *Decoder) {
-  return DecodeRegisterClassByID(Inst, RegNo, AArch64::QQuadRegClassID,
-                                 Decoder);
-}
-
-static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst,
-                                               unsigned OptionHiS,
-                                               uint64_t Address,
-                                               const void *Decoder) {
-  // Option{1} must be 1. OptionHiS is made up of {Option{2}, Option{1},
-  // S}. Hence we want to check bit 1.
-  if (!(OptionHiS & 2))
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(OptionHiS));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeBitfield32ImmOperand(llvm::MCInst &Inst,
-                                               unsigned Imm6Bits,
-                                               uint64_t Address,
-                                               const void *Decoder) {
-  // In the 32-bit variant, bit 6 must be zero. I.e. the immediate must be
-  // between 0 and 31.
-  if (Imm6Bits > 31)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Imm6Bits));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeCVT32FixedPosOperand(llvm::MCInst &Inst,
-                                               unsigned Imm6Bits,
-                                               uint64_t Address,
-                                               const void *Decoder) {
-  // 1 <= Imm <= 32. Encoded as 64 - Imm so: 63 >= Encoded >= 32.
-  if (Imm6Bits < 32)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Imm6Bits));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeFPZeroOperand(llvm::MCInst &Inst,
-                                        unsigned RmBits,
-                                        uint64_t Address,
-                                        const void *Decoder) {
-  // Any bits are valid in the instruction (they're architecturally ignored),
-  // but a code generator should insert 0.
-  Inst.addOperand(MCOperand::CreateImm(0));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftRightImm8(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder) {
-  Inst.addOperand(MCOperand::CreateImm(8 - Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftRightImm16(MCInst &Inst, unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder) {
-  Inst.addOperand(MCOperand::CreateImm(16 - Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftRightImm32(MCInst &Inst, unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder) {
-  Inst.addOperand(MCOperand::CreateImm(32 - Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftRightImm64(MCInst &Inst, unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder) {
-  Inst.addOperand(MCOperand::CreateImm(64 - Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftLeftImm8(MCInst &Inst, unsigned Val,
-                                        uint64_t Address,
-                                        const void *Decoder) {
-  if (Val > 7)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftLeftImm16(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder) {
-  if (Val > 15)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftLeftImm32(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder) {
-  if (Val > 31)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Val));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeShiftLeftImm64(MCInst &Inst, unsigned Val,
-                                         uint64_t Address,
-                                         const void *Decoder) {
-  if (Val > 63)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Val));
-  return MCDisassembler::Success;
-}
-
-template<int RegWidth>
-static DecodeStatus DecodeMoveWideImmOperand(llvm::MCInst &Inst,
-                                             unsigned FullImm,
-                                             uint64_t Address,
-                                             const void *Decoder) {
-  unsigned Imm16 = FullImm & 0xffff;
-  unsigned Shift = FullImm >> 16;
-
-  if (RegWidth == 32 && Shift > 1) return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Imm16));
-  Inst.addOperand(MCOperand::CreateImm(Shift));
-  return MCDisassembler::Success;
-}
-
-template<int RegWidth>
-static DecodeStatus DecodeLogicalImmOperand(llvm::MCInst &Inst,
-                                            unsigned Bits,
-                                            uint64_t Address,
-                                            const void *Decoder) {
-  uint64_t Imm;
-  if (!A64Imms::isLogicalImmBits(RegWidth, Bits, Imm))
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(Bits));
-  return MCDisassembler::Success;
-}
-
-
-static DecodeStatus DecodeRegExtendOperand(llvm::MCInst &Inst,
-                                           unsigned ShiftAmount,
-                                           uint64_t Address,
-                                           const void *Decoder) {
-  // Only values 0-4 are valid for this 3-bit field
-  if (ShiftAmount > 4)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus Decode32BitShiftOperand(llvm::MCInst &Inst,
-                                            unsigned ShiftAmount,
-                                            uint64_t Address,
-                                            const void *Decoder) {
-  // Only values below 32 are valid for a 32-bit register
-  if (ShiftAmount > 31)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeBitfieldInstruction(llvm::MCInst &Inst, unsigned Insn,
-                                              uint64_t Address,
-                                              const void *Decoder) {
-  unsigned Rd = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned ImmS = fieldFromInstruction(Insn, 10, 6);
-  unsigned ImmR = fieldFromInstruction(Insn, 16, 6);
-  unsigned SF = fieldFromInstruction(Insn, 31, 1);
-
-  // Undef for 0b11 just in case it occurs. Don't want the compiler to optimise
-  // out assertions that it thinks should never be hit.
-  enum OpcTypes { SBFM = 0, BFM, UBFM, Undef } Opc;
-  Opc = (OpcTypes)fieldFromInstruction(Insn, 29, 2);
-
-  if (!SF) {
-    // ImmR and ImmS must be between 0 and 31 for 32-bit instructions.
-    if (ImmR > 31 || ImmS > 31)
-      return MCDisassembler::Fail;
-  }
-
-  if (SF) {
-    DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
-    // BFM MCInsts use Rd as a source too.
-    if (Opc == BFM) DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
-    DecodeGPR64RegisterClass(Inst, Rn, Address, Decoder);
-  } else {
-    DecodeGPR32RegisterClass(Inst, Rd, Address, Decoder);
-    // BFM MCInsts use Rd as a source too.
-    if (Opc == BFM) DecodeGPR32RegisterClass(Inst, Rd, Address, Decoder);
-    DecodeGPR32RegisterClass(Inst, Rn, Address, Decoder);
-  }
-
-  // ASR and LSR have more specific patterns so they won't get here:
-  assert(!(ImmS == 31 && !SF && Opc != BFM)
-         && "shift should have used auto decode");
-  assert(!(ImmS == 63 && SF && Opc != BFM)
-         && "shift should have used auto decode");
-
-  // Extension instructions similarly:
-  if (Opc == SBFM && ImmR == 0) {
-    assert((ImmS != 7 && ImmS != 15) && "extension got here");
-    assert((ImmS != 31 || SF == 0) && "extension got here");
-  } else if (Opc == UBFM && ImmR == 0) {
-    assert((SF != 0 || (ImmS != 7 && ImmS != 15)) && "extension got here");
-  }
-
-  if (Opc == UBFM) {
-    // It might be a LSL instruction, which actually takes the shift amount
-    // itself as an MCInst operand.
-    if (SF && (ImmS + 1) % 64 == ImmR) {
-      Inst.setOpcode(AArch64::LSLxxi);
-      Inst.addOperand(MCOperand::CreateImm(63 - ImmS));
-      return MCDisassembler::Success;
-    } else if (!SF && (ImmS + 1) % 32 == ImmR) {
-      Inst.setOpcode(AArch64::LSLwwi);
-      Inst.addOperand(MCOperand::CreateImm(31 - ImmS));
-      return MCDisassembler::Success;
-    }
-  }
-
-  // Otherwise it's definitely either an extract or an insert depending on which
-  // of ImmR or ImmS is larger.
-  unsigned ExtractOp, InsertOp;
-  switch (Opc) {
-  default: llvm_unreachable("unexpected instruction trying to decode bitfield");
-  case SBFM:
-    ExtractOp = SF ? AArch64::SBFXxxii : AArch64::SBFXwwii;
-    InsertOp = SF ? AArch64::SBFIZxxii : AArch64::SBFIZwwii;
-    break;
-  case BFM:
-    ExtractOp = SF ? AArch64::BFXILxxii : AArch64::BFXILwwii;
-    InsertOp = SF ? AArch64::BFIxxii : AArch64::BFIwwii;
-    break;
-  case UBFM:
-    ExtractOp = SF ? AArch64::UBFXxxii : AArch64::UBFXwwii;
-    InsertOp = SF ? AArch64::UBFIZxxii : AArch64::UBFIZwwii;
-    break;
-  }
-
-  // Otherwise it's a boring insert or extract
-  Inst.addOperand(MCOperand::CreateImm(ImmR));
-  Inst.addOperand(MCOperand::CreateImm(ImmS));
-
-
-  if (ImmS < ImmR)
-    Inst.setOpcode(InsertOp);
-  else
-    Inst.setOpcode(ExtractOp);
-
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeFMOVLaneInstruction(llvm::MCInst &Inst, unsigned Insn,
-                                              uint64_t Address,
-                                              const void *Decoder) {
-  // This decoder exists to add the dummy Lane operand to the MCInst, which must
-  // be 1 in assembly but has no other real manifestation.
-  unsigned Rd = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned IsToVec = fieldFromInstruction(Insn, 16, 1);
-
-  if (IsToVec) {
-    DecodeFPR128RegisterClass(Inst, Rd, Address, Decoder);
-    DecodeGPR64RegisterClass(Inst, Rn, Address, Decoder);
-  } else {
-    DecodeGPR64RegisterClass(Inst, Rd, Address, Decoder);
-    DecodeFPR128RegisterClass(Inst, Rn, Address, Decoder);
-  }
-
-  // Add the lane
-  Inst.addOperand(MCOperand::CreateImm(1));
-
-  return MCDisassembler::Success;
-}
-
-
-static DecodeStatus DecodeLDSTPairInstruction(llvm::MCInst &Inst,
-                                              unsigned Insn,
-                                              uint64_t Address,
-                                              const void *Decoder) {
-  DecodeStatus Result = MCDisassembler::Success;
-  unsigned Rt = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned Rt2 = fieldFromInstruction(Insn, 10, 5);
-  unsigned SImm7 = fieldFromInstruction(Insn, 15, 7);
-  unsigned L = fieldFromInstruction(Insn, 22, 1);
-  unsigned V = fieldFromInstruction(Insn, 26, 1);
-  unsigned Opc = fieldFromInstruction(Insn, 30, 2);
-
-  // Not an official name, but it turns out that bit 23 distinguishes indexed
-  // from non-indexed operations.
-  unsigned Indexed = fieldFromInstruction(Insn, 23, 1);
-
-  if (Indexed && L == 0) {
-    // The MCInst for an indexed store has an out operand and 4 ins:
-    //    Rn_wb, Rt, Rt2, Rn, Imm
-    DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  }
-
-  // You shouldn't load to the same register twice in an instruction...
-  if (L && Rt == Rt2)
-    Result = MCDisassembler::SoftFail;
-
-  // ... or do any operation that writes-back to a transfer register. But note
-  // that "stp xzr, xzr, [sp], #4" is fine because xzr and sp are different.
-  if (Indexed && V == 0 && Rn != 31 && (Rt == Rn || Rt2 == Rn))
-    Result = MCDisassembler::SoftFail;
-
-  // Exactly how we decode the MCInst's registers depends on the Opc and V
-  // fields of the instruction. These also obviously determine the size of the
-  // operation so we can fill in that information while we're at it.
-  if (V) {
-    // The instruction operates on the FP/SIMD registers
-    switch (Opc) {
-    default: return MCDisassembler::Fail;
-    case 0:
-      DecodeFPR32RegisterClass(Inst, Rt, Address, Decoder);
-      DecodeFPR32RegisterClass(Inst, Rt2, Address, Decoder);
-      break;
-    case 1:
-      DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
-      DecodeFPR64RegisterClass(Inst, Rt2, Address, Decoder);
-      break;
-    case 2:
-      DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
-      DecodeFPR128RegisterClass(Inst, Rt2, Address, Decoder);
-      break;
-    }
-  } else {
-    switch (Opc) {
-    default: return MCDisassembler::Fail;
-    case 0:
-      DecodeGPR32RegisterClass(Inst, Rt, Address, Decoder);
-      DecodeGPR32RegisterClass(Inst, Rt2, Address, Decoder);
-      break;
-    case 1:
-      assert(L && "unexpected \"store signed\" attempt");
-      DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder);
-      DecodeGPR64RegisterClass(Inst, Rt2, Address, Decoder);
-      break;
-    case 2:
-      DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder);
-      DecodeGPR64RegisterClass(Inst, Rt2, Address, Decoder);
-      break;
-    }
-  }
-
-  if (Indexed && L == 1) {
-    // The MCInst for an indexed load has 3 out operands and an 3 ins:
-    //    Rt, Rt2, Rn_wb, Rt2, Rn, Imm
-    DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  }
-
-
-  DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  Inst.addOperand(MCOperand::CreateImm(SImm7));
-
-  return Result;
-}
-
-static DecodeStatus DecodeLoadPairExclusiveInstruction(llvm::MCInst &Inst,
-                                                       uint32_t Val,
-                                                       uint64_t Address,
-                                                       const void *Decoder) {
-  unsigned Rt = fieldFromInstruction(Val, 0, 5);
-  unsigned Rn = fieldFromInstruction(Val, 5, 5);
-  unsigned Rt2 = fieldFromInstruction(Val, 10, 5);
-  unsigned MemSize = fieldFromInstruction(Val, 30, 2);
-
-  DecodeStatus S = MCDisassembler::Success;
-  if (Rt == Rt2) S = MCDisassembler::SoftFail;
-
-  switch (MemSize) {
-    case 2:
-      if (!Check(S, DecodeGPR32RegisterClass(Inst, Rt, Address, Decoder)))
-        return MCDisassembler::Fail;
-      if (!Check(S, DecodeGPR32RegisterClass(Inst, Rt2, Address, Decoder)))
-        return MCDisassembler::Fail;
-      break;
-    case 3:
-      if (!Check(S, DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder)))
-        return MCDisassembler::Fail;
-      if (!Check(S, DecodeGPR64RegisterClass(Inst, Rt2, Address, Decoder)))
-        return MCDisassembler::Fail;
-      break;
-    default:
-      llvm_unreachable("Invalid MemSize in DecodeLoadPairExclusiveInstruction");
-  }
-
-  if (!Check(S, DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder)))
-    return MCDisassembler::Fail;
-
-  return S;
-}
-
-template<typename SomeNamedImmMapper>
-static DecodeStatus DecodeNamedImmOperand(llvm::MCInst &Inst,
-                                          unsigned Val,
-                                          uint64_t Address,
-                                          const void *Decoder) {
-  SomeNamedImmMapper Mapper;
-  bool ValidNamed;
-  Mapper.toString(Val, ValidNamed);
-  if (ValidNamed || Mapper.validImm(Val)) {
-    Inst.addOperand(MCOperand::CreateImm(Val));
-    return MCDisassembler::Success;
-  }
-
-  return MCDisassembler::Fail;
-}
-
-static DecodeStatus DecodeSysRegOperand(const A64SysReg::SysRegMapper &Mapper,
-                                        llvm::MCInst &Inst,
-                                        unsigned Val,
-                                        uint64_t Address,
-                                        const void *Decoder) {
-  bool ValidNamed;
-  Mapper.toString(Val, ValidNamed);
-
-  Inst.addOperand(MCOperand::CreateImm(Val));
-
-  return ValidNamed ? MCDisassembler::Success : MCDisassembler::Fail;
-}
-
-static DecodeStatus DecodeMRSOperand(llvm::MCInst &Inst,
-                                     unsigned Val,
-                                     uint64_t Address,
-                                     const void *Decoder) {
-  return DecodeSysRegOperand(A64SysReg::MRSMapper(), Inst, Val, Address,
-                             Decoder);
-}
-
-static DecodeStatus DecodeMSROperand(llvm::MCInst &Inst,
-                                     unsigned Val,
-                                     uint64_t Address,
-                                     const void *Decoder) {
-  return DecodeSysRegOperand(A64SysReg::MSRMapper(), Inst, Val, Address,
-                             Decoder);
-}
-
-static DecodeStatus DecodeSingleIndexedInstruction(llvm::MCInst &Inst,
-                                                   unsigned Insn,
-                                                   uint64_t Address,
-                                                   const void *Decoder) {
-  unsigned Rt = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned Imm9 = fieldFromInstruction(Insn, 12, 9);
-
-  unsigned Opc = fieldFromInstruction(Insn, 22, 2);
-  unsigned V = fieldFromInstruction(Insn, 26, 1);
-  unsigned Size = fieldFromInstruction(Insn, 30, 2);
-
-  if (Opc == 0 || (V == 1 && Opc == 2)) {
-    // It's a store, the MCInst gets: Rn_wb, Rt, Rn, Imm
-    DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  }
-
-  if (V == 0 && (Opc == 2 || Size == 3)) {
-    DecodeGPR64RegisterClass(Inst, Rt, Address, Decoder);
-  } else if (V == 0) {
-    DecodeGPR32RegisterClass(Inst, Rt, Address, Decoder);
-  } else if (V == 1 && (Opc & 2)) {
-    DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
-  } else {
-    switch (Size) {
-    case 0:
-      DecodeFPR8RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 1:
-      DecodeFPR16RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 2:
-      DecodeFPR32RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 3:
-      DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    }
-  }
-
-  if (Opc != 0 && (V != 1 || Opc != 2)) {
-    // It's a load, the MCInst gets: Rt, Rn_wb, Rn, Imm
-    DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  }
-
-  DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-
-  Inst.addOperand(MCOperand::CreateImm(Imm9));
-
-  // N.b. The official documentation says undpredictable if Rt == Rn, but this
-  // takes place at the architectural rather than encoding level:
-  //
-  // "STR xzr, [sp], #4" is perfectly valid.
-  if (V == 0 && Rt == Rn && Rn != 31)
-    return MCDisassembler::SoftFail;
-  else
-    return MCDisassembler::Success;
-}
-
-static MCDisassembler *createAArch64Disassembler(const Target &T,
-                                                 const MCSubtargetInfo &STI,
-                                                 MCContext &Ctx) {
-  return new AArch64Disassembler(STI, Ctx);
-}
-
-extern "C" void LLVMInitializeAArch64Disassembler() {
-  TargetRegistry::RegisterMCDisassembler(TheAArch64leTarget,
-                                         createAArch64Disassembler);
-  TargetRegistry::RegisterMCDisassembler(TheAArch64beTarget,
-                                         createAArch64Disassembler);
-}
-
-template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
-static DecodeStatus
-DecodeNeonMovImmShiftOperand(llvm::MCInst &Inst, unsigned ShiftAmount,
-                             uint64_t Address, const void *Decoder) {
-  bool IsLSL = false;
-  if (Ext == A64SE::LSL)
-    IsLSL = true;
-  else if (Ext != A64SE::MSL)
-    return MCDisassembler::Fail;
-
-  // MSL and LSLH accepts encoded shift amount 0 or 1.
-  if ((!IsLSL || (IsLSL && IsHalf)) && ShiftAmount != 0 && ShiftAmount != 1)
-    return MCDisassembler::Fail;
-
-  // LSL  accepts encoded shift amount 0, 1, 2 or 3.
-  if (IsLSL && ShiftAmount > 3)
-    return MCDisassembler::Fail;
-
-  Inst.addOperand(MCOperand::CreateImm(ShiftAmount));
-  return MCDisassembler::Success;
-}
-
-// Decode post-index vector load/store instructions.
-// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
-// operand is an immediate equal the the length of vector list in bytes,
-// or Rm is decoded to a GPR64noxzr register.
-static DecodeStatus DecodeVLDSTPostInstruction(MCInst &Inst, unsigned Insn,
-                                               uint64_t Address,
-                                               const void *Decoder) {
-  unsigned Rt = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned Rm = fieldFromInstruction(Insn, 16, 5);
-  unsigned Opcode = fieldFromInstruction(Insn, 12, 4);
-  unsigned IsLoad = fieldFromInstruction(Insn, 22, 1);
-  // 0 for 64bit vector list, 1 for 128bit vector list
-  unsigned Is128BitVec = fieldFromInstruction(Insn, 30, 1);
-
-  unsigned NumVecs;
-  switch (Opcode) {
-  case 0: // ld4/st4
-  case 2: // ld1/st1 with 4 vectors
-    NumVecs = 4; break;
-  case 4: // ld3/st3
-  case 6: // ld1/st1 with 3 vectors
-    NumVecs = 3; break;
-  case 7: // ld1/st1 with 1 vector
-    NumVecs = 1; break;
-  case 8:  // ld2/st2
-  case 10: // ld1/st1 with 2 vectors
-    NumVecs = 2; break;
-  default:
-    llvm_unreachable("Invalid opcode for post-index load/store instructions");
-  }
-
-  // Decode vector list of 1/2/3/4 vectors for load instructions.
-  if (IsLoad) {
-    switch (NumVecs) {
-    case 1:
-      Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 2:
-      Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 3:
-      Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 4:
-      Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    }
-  }
-
-  // Decode write back register, which is equal to Rn.
-  DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-
-  if (Rm == 31) // If Rm is 0x11111, add the vector list length in byte
-    Inst.addOperand(MCOperand::CreateImm(NumVecs * (Is128BitVec ? 16 : 8)));
-  else // Decode Rm
-    DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
-
-  // Decode vector list of 1/2/3/4 vectors for load instructions.
-  if (!IsLoad) {
-    switch (NumVecs) {
-    case 1:
-      Is128BitVec ? DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 2:
-      Is128BitVec ? DecodeQPairRegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeDPairRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 3:
-      Is128BitVec ? DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 4:
-      Is128BitVec ? DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder)
-                  : DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    }
-  }
-
-  return MCDisassembler::Success;
-}
-
-// Decode post-index vector load/store lane instructions.
-// This is necessary as we need to decode Rm: if Rm == 0b11111, the last
-// operand is an immediate equal the the length of the changed bytes,
-// or Rm is decoded to a GPR64noxzr register.
-static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
-                                                   uint64_t Address,
-                                                   const void *Decoder) {
-  bool Is64bitVec = false;
-  bool IsLoadDup = false;
-  bool IsLoad = false;
-  // The total number of bytes transferred.
-  // TransferBytes = NumVecs * OneLaneBytes
-  unsigned TransferBytes = 0;
-  unsigned NumVecs = 0;
-  unsigned Opc = Inst.getOpcode();
-  switch (Opc) {
-  case AArch64::LD1R_WB_8B_fixed: case AArch64::LD1R_WB_8B_register:
-  case AArch64::LD1R_WB_4H_fixed: case AArch64::LD1R_WB_4H_register:
-  case AArch64::LD1R_WB_2S_fixed: case AArch64::LD1R_WB_2S_register:
-  case AArch64::LD1R_WB_1D_fixed: case AArch64::LD1R_WB_1D_register: {
-    switch (Opc) {
-    case AArch64::LD1R_WB_8B_fixed: case AArch64::LD1R_WB_8B_register:
-      TransferBytes = 1; break;
-    case AArch64::LD1R_WB_4H_fixed: case AArch64::LD1R_WB_4H_register:
-      TransferBytes = 2; break;
-    case AArch64::LD1R_WB_2S_fixed: case AArch64::LD1R_WB_2S_register:
-      TransferBytes = 4; break;
-    case AArch64::LD1R_WB_1D_fixed: case AArch64::LD1R_WB_1D_register:
-      TransferBytes = 8; break;
-    }
-    Is64bitVec = true;
-    IsLoadDup = true;
-    NumVecs = 1;
-    break;
-  }
-
-  case AArch64::LD1R_WB_16B_fixed: case AArch64::LD1R_WB_16B_register:
-  case AArch64::LD1R_WB_8H_fixed: case AArch64::LD1R_WB_8H_register:
-  case AArch64::LD1R_WB_4S_fixed: case AArch64::LD1R_WB_4S_register:
-  case AArch64::LD1R_WB_2D_fixed: case AArch64::LD1R_WB_2D_register: {
-    switch (Opc) {
-    case AArch64::LD1R_WB_16B_fixed: case AArch64::LD1R_WB_16B_register:
-      TransferBytes = 1; break;
-    case AArch64::LD1R_WB_8H_fixed: case AArch64::LD1R_WB_8H_register:
-      TransferBytes = 2; break;
-    case AArch64::LD1R_WB_4S_fixed: case AArch64::LD1R_WB_4S_register:
-      TransferBytes = 4; break;
-    case AArch64::LD1R_WB_2D_fixed: case AArch64::LD1R_WB_2D_register:
-      TransferBytes = 8; break;
-    }
-    IsLoadDup = true;
-    NumVecs = 1;
-    break;
-  }
-
-  case AArch64::LD2R_WB_8B_fixed: case AArch64::LD2R_WB_8B_register:
-  case AArch64::LD2R_WB_4H_fixed: case AArch64::LD2R_WB_4H_register:
-  case AArch64::LD2R_WB_2S_fixed: case AArch64::LD2R_WB_2S_register:
-  case AArch64::LD2R_WB_1D_fixed: case AArch64::LD2R_WB_1D_register: {
-    switch (Opc) {
-    case AArch64::LD2R_WB_8B_fixed: case AArch64::LD2R_WB_8B_register:
-      TransferBytes = 2; break;
-    case AArch64::LD2R_WB_4H_fixed: case AArch64::LD2R_WB_4H_register:
-      TransferBytes = 4; break;
-    case AArch64::LD2R_WB_2S_fixed: case AArch64::LD2R_WB_2S_register:
-      TransferBytes = 8; break;
-    case AArch64::LD2R_WB_1D_fixed: case AArch64::LD2R_WB_1D_register:
-      TransferBytes = 16; break;
-    }
-    Is64bitVec = true;
-    IsLoadDup = true;
-    NumVecs = 2;
-    break;
-  }
-
-  case AArch64::LD2R_WB_16B_fixed: case AArch64::LD2R_WB_16B_register:
-  case AArch64::LD2R_WB_8H_fixed: case AArch64::LD2R_WB_8H_register:
-  case AArch64::LD2R_WB_4S_fixed: case AArch64::LD2R_WB_4S_register:
-  case AArch64::LD2R_WB_2D_fixed: case AArch64::LD2R_WB_2D_register: {
-    switch (Opc) {
-    case AArch64::LD2R_WB_16B_fixed: case AArch64::LD2R_WB_16B_register:
-      TransferBytes = 2; break;
-    case AArch64::LD2R_WB_8H_fixed: case AArch64::LD2R_WB_8H_register:
-      TransferBytes = 4; break;
-    case AArch64::LD2R_WB_4S_fixed: case AArch64::LD2R_WB_4S_register:
-      TransferBytes = 8; break;
-    case AArch64::LD2R_WB_2D_fixed: case AArch64::LD2R_WB_2D_register:
-      TransferBytes = 16; break;
-    }
-    IsLoadDup = true;
-    NumVecs = 2;
-    break;
-  }
-
-  case AArch64::LD3R_WB_8B_fixed: case AArch64::LD3R_WB_8B_register:
-  case AArch64::LD3R_WB_4H_fixed: case AArch64::LD3R_WB_4H_register:
-  case AArch64::LD3R_WB_2S_fixed: case AArch64::LD3R_WB_2S_register:
-  case AArch64::LD3R_WB_1D_fixed: case AArch64::LD3R_WB_1D_register: {
-    switch (Opc) {
-    case AArch64::LD3R_WB_8B_fixed: case AArch64::LD3R_WB_8B_register:
-      TransferBytes = 3; break;
-    case AArch64::LD3R_WB_4H_fixed: case AArch64::LD3R_WB_4H_register:
-      TransferBytes = 6; break;
-    case AArch64::LD3R_WB_2S_fixed: case AArch64::LD3R_WB_2S_register:
-      TransferBytes = 12; break;
-    case AArch64::LD3R_WB_1D_fixed: case AArch64::LD3R_WB_1D_register:
-      TransferBytes = 24; break;
-    }
-    Is64bitVec = true;
-    IsLoadDup = true;
-    NumVecs = 3;
-    break;
-  }
-
-  case AArch64::LD3R_WB_16B_fixed: case AArch64::LD3R_WB_16B_register:
-  case AArch64::LD3R_WB_4S_fixed: case AArch64::LD3R_WB_8H_register:
-  case AArch64::LD3R_WB_8H_fixed: case AArch64::LD3R_WB_4S_register:
-  case AArch64::LD3R_WB_2D_fixed: case AArch64::LD3R_WB_2D_register: {
-    switch (Opc) {
-    case AArch64::LD3R_WB_16B_fixed: case AArch64::LD3R_WB_16B_register:
-      TransferBytes = 3; break;
-    case AArch64::LD3R_WB_8H_fixed: case AArch64::LD3R_WB_8H_register:
-      TransferBytes = 6; break;
-    case AArch64::LD3R_WB_4S_fixed: case AArch64::LD3R_WB_4S_register:
-      TransferBytes = 12; break;
-    case AArch64::LD3R_WB_2D_fixed: case AArch64::LD3R_WB_2D_register:
-      TransferBytes = 24; break;
-    }
-    IsLoadDup = true;
-    NumVecs = 3;
-    break;
-  }
-
-  case AArch64::LD4R_WB_8B_fixed: case AArch64::LD4R_WB_8B_register:
-  case AArch64::LD4R_WB_4H_fixed: case AArch64::LD4R_WB_4H_register:
-  case AArch64::LD4R_WB_2S_fixed: case AArch64::LD4R_WB_2S_register:
-  case AArch64::LD4R_WB_1D_fixed: case AArch64::LD4R_WB_1D_register: {
-    switch (Opc) {
-    case AArch64::LD4R_WB_8B_fixed: case AArch64::LD4R_WB_8B_register:
-      TransferBytes = 4; break;
-    case AArch64::LD4R_WB_4H_fixed: case AArch64::LD4R_WB_4H_register:
-      TransferBytes = 8; break;
-    case AArch64::LD4R_WB_2S_fixed: case AArch64::LD4R_WB_2S_register:
-      TransferBytes = 16; break;
-    case AArch64::LD4R_WB_1D_fixed: case AArch64::LD4R_WB_1D_register:
-      TransferBytes = 32; break;
-    }
-    Is64bitVec = true;
-    IsLoadDup = true;
-    NumVecs = 4;
-    break;
-  }
-
-  case AArch64::LD4R_WB_16B_fixed: case AArch64::LD4R_WB_16B_register:
-  case AArch64::LD4R_WB_4S_fixed: case AArch64::LD4R_WB_8H_register:
-  case AArch64::LD4R_WB_8H_fixed: case AArch64::LD4R_WB_4S_register:
-  case AArch64::LD4R_WB_2D_fixed: case AArch64::LD4R_WB_2D_register: {
-    switch (Opc) {
-    case AArch64::LD4R_WB_16B_fixed: case AArch64::LD4R_WB_16B_register:
-      TransferBytes = 4; break;
-    case AArch64::LD4R_WB_8H_fixed: case AArch64::LD4R_WB_8H_register:
-      TransferBytes = 8; break;
-    case AArch64::LD4R_WB_4S_fixed: case AArch64::LD4R_WB_4S_register:
-      TransferBytes = 16; break;
-    case AArch64::LD4R_WB_2D_fixed: case AArch64::LD4R_WB_2D_register:
-      TransferBytes = 32; break;
-    }
-    IsLoadDup = true;
-    NumVecs = 4;
-    break;
-  }
-
-  case AArch64::LD1LN_WB_B_fixed: case AArch64::LD1LN_WB_B_register:
-  case AArch64::LD1LN_WB_H_fixed: case AArch64::LD1LN_WB_H_register:
-  case AArch64::LD1LN_WB_S_fixed: case AArch64::LD1LN_WB_S_register:
-  case AArch64::LD1LN_WB_D_fixed: case AArch64::LD1LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::LD1LN_WB_B_fixed: case AArch64::LD1LN_WB_B_register:
-      TransferBytes = 1; break;
-    case AArch64::LD1LN_WB_H_fixed: case AArch64::LD1LN_WB_H_register:
-      TransferBytes = 2; break;
-    case AArch64::LD1LN_WB_S_fixed: case AArch64::LD1LN_WB_S_register:
-      TransferBytes = 4; break;
-    case AArch64::LD1LN_WB_D_fixed: case AArch64::LD1LN_WB_D_register:
-      TransferBytes = 8; break;
-    }
-    IsLoad = true;
-    NumVecs = 1;
-    break;
-  }
-
-  case AArch64::LD2LN_WB_B_fixed: case AArch64::LD2LN_WB_B_register:
-  case AArch64::LD2LN_WB_H_fixed: case AArch64::LD2LN_WB_H_register:
-  case AArch64::LD2LN_WB_S_fixed: case AArch64::LD2LN_WB_S_register:
-  case AArch64::LD2LN_WB_D_fixed: case AArch64::LD2LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::LD2LN_WB_B_fixed: case AArch64::LD2LN_WB_B_register:
-      TransferBytes = 2; break;
-    case AArch64::LD2LN_WB_H_fixed: case AArch64::LD2LN_WB_H_register:
-      TransferBytes = 4; break;
-    case AArch64::LD2LN_WB_S_fixed: case AArch64::LD2LN_WB_S_register:
-      TransferBytes = 8; break;
-    case AArch64::LD2LN_WB_D_fixed: case AArch64::LD2LN_WB_D_register:
-      TransferBytes = 16; break;
-    }
-    IsLoad = true;
-    NumVecs = 2;
-    break;
-  }
-
-  case AArch64::LD3LN_WB_B_fixed: case AArch64::LD3LN_WB_B_register:
-  case AArch64::LD3LN_WB_H_fixed: case AArch64::LD3LN_WB_H_register:
-  case AArch64::LD3LN_WB_S_fixed: case AArch64::LD3LN_WB_S_register:
-  case AArch64::LD3LN_WB_D_fixed: case AArch64::LD3LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::LD3LN_WB_B_fixed: case AArch64::LD3LN_WB_B_register:
-      TransferBytes = 3; break;
-    case AArch64::LD3LN_WB_H_fixed: case AArch64::LD3LN_WB_H_register:
-      TransferBytes = 6; break;
-    case AArch64::LD3LN_WB_S_fixed: case AArch64::LD3LN_WB_S_register:
-      TransferBytes = 12; break;
-    case AArch64::LD3LN_WB_D_fixed: case AArch64::LD3LN_WB_D_register:
-      TransferBytes = 24; break;
-    }
-    IsLoad = true;
-    NumVecs = 3;
-    break;
-  }
-
-  case AArch64::LD4LN_WB_B_fixed: case AArch64::LD4LN_WB_B_register:
-  case AArch64::LD4LN_WB_H_fixed: case AArch64::LD4LN_WB_H_register:
-  case AArch64::LD4LN_WB_S_fixed: case AArch64::LD4LN_WB_S_register:
-  case AArch64::LD4LN_WB_D_fixed: case AArch64::LD4LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::LD4LN_WB_B_fixed: case AArch64::LD4LN_WB_B_register:
-      TransferBytes = 4; break;
-    case AArch64::LD4LN_WB_H_fixed: case AArch64::LD4LN_WB_H_register:
-      TransferBytes = 8; break;
-    case AArch64::LD4LN_WB_S_fixed: case AArch64::LD4LN_WB_S_register:
-      TransferBytes = 16; break;
-    case AArch64::LD4LN_WB_D_fixed: case AArch64::LD4LN_WB_D_register:
-      TransferBytes = 32; break;
-    }
-    IsLoad = true;
-    NumVecs = 4;
-    break;
-  }
-
-  case AArch64::ST1LN_WB_B_fixed: case AArch64::ST1LN_WB_B_register:
-  case AArch64::ST1LN_WB_H_fixed: case AArch64::ST1LN_WB_H_register:
-  case AArch64::ST1LN_WB_S_fixed: case AArch64::ST1LN_WB_S_register:
-  case AArch64::ST1LN_WB_D_fixed: case AArch64::ST1LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::ST1LN_WB_B_fixed: case AArch64::ST1LN_WB_B_register:
-      TransferBytes = 1; break;
-    case AArch64::ST1LN_WB_H_fixed: case AArch64::ST1LN_WB_H_register:
-      TransferBytes = 2; break;
-    case AArch64::ST1LN_WB_S_fixed: case AArch64::ST1LN_WB_S_register:
-      TransferBytes = 4; break;
-    case AArch64::ST1LN_WB_D_fixed: case AArch64::ST1LN_WB_D_register:
-      TransferBytes = 8; break;
-    }
-    NumVecs = 1;
-    break;
-  }
-
-  case AArch64::ST2LN_WB_B_fixed: case AArch64::ST2LN_WB_B_register:
-  case AArch64::ST2LN_WB_H_fixed: case AArch64::ST2LN_WB_H_register:
-  case AArch64::ST2LN_WB_S_fixed: case AArch64::ST2LN_WB_S_register:
-  case AArch64::ST2LN_WB_D_fixed: case AArch64::ST2LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::ST2LN_WB_B_fixed: case AArch64::ST2LN_WB_B_register:
-      TransferBytes = 2; break;
-    case AArch64::ST2LN_WB_H_fixed: case AArch64::ST2LN_WB_H_register:
-      TransferBytes = 4; break;
-    case AArch64::ST2LN_WB_S_fixed: case AArch64::ST2LN_WB_S_register:
-      TransferBytes = 8; break;
-    case AArch64::ST2LN_WB_D_fixed: case AArch64::ST2LN_WB_D_register:
-      TransferBytes = 16; break;
-    }
-    NumVecs = 2;
-    break;
-  }
-
-  case AArch64::ST3LN_WB_B_fixed: case AArch64::ST3LN_WB_B_register:
-  case AArch64::ST3LN_WB_H_fixed: case AArch64::ST3LN_WB_H_register:
-  case AArch64::ST3LN_WB_S_fixed: case AArch64::ST3LN_WB_S_register:
-  case AArch64::ST3LN_WB_D_fixed: case AArch64::ST3LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::ST3LN_WB_B_fixed: case AArch64::ST3LN_WB_B_register:
-      TransferBytes = 3; break;
-    case AArch64::ST3LN_WB_H_fixed: case AArch64::ST3LN_WB_H_register:
-      TransferBytes = 6; break;
-    case AArch64::ST3LN_WB_S_fixed: case AArch64::ST3LN_WB_S_register:
-      TransferBytes = 12; break;
-    case AArch64::ST3LN_WB_D_fixed: case AArch64::ST3LN_WB_D_register:
-      TransferBytes = 24; break;
-    }
-    NumVecs = 3;
-    break;
-  }
-
-  case AArch64::ST4LN_WB_B_fixed: case AArch64::ST4LN_WB_B_register:
-  case AArch64::ST4LN_WB_H_fixed: case AArch64::ST4LN_WB_H_register:
-  case AArch64::ST4LN_WB_S_fixed: case AArch64::ST4LN_WB_S_register:
-  case AArch64::ST4LN_WB_D_fixed: case AArch64::ST4LN_WB_D_register: {
-    switch (Opc) {
-    case AArch64::ST4LN_WB_B_fixed: case AArch64::ST4LN_WB_B_register:
-      TransferBytes = 4; break;
-    case AArch64::ST4LN_WB_H_fixed: case AArch64::ST4LN_WB_H_register:
-      TransferBytes = 8; break;
-    case AArch64::ST4LN_WB_S_fixed: case AArch64::ST4LN_WB_S_register:
-      TransferBytes = 16; break;
-    case AArch64::ST4LN_WB_D_fixed: case AArch64::ST4LN_WB_D_register:
-      TransferBytes = 32; break;
-    }
-    NumVecs = 4;
-    break;
-  }
-
-  default:
-    return MCDisassembler::Fail;
-  } // End of switch (Opc)
-
-  unsigned Rt = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned Rm = fieldFromInstruction(Insn, 16, 5);
-
-  // Decode post-index of load duplicate lane
-  if (IsLoadDup) {
-    switch (NumVecs) {
-    case 1:
-      Is64bitVec ? DecodeFPR64RegisterClass(Inst, Rt, Address, Decoder)
-                 : DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 2:
-      Is64bitVec ? DecodeDPairRegisterClass(Inst, Rt, Address, Decoder)
-                 : DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 3:
-      Is64bitVec ? DecodeDTripleRegisterClass(Inst, Rt, Address, Decoder)
-                 : DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 4:
-      Is64bitVec ? DecodeDQuadRegisterClass(Inst, Rt, Address, Decoder)
-                 : DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
-    }
-
-    // Decode write back register, which is equal to Rn.
-    DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-    DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-
-    if (Rm == 31) // If Rm is 0x11111, add the number of transferred bytes
-      Inst.addOperand(MCOperand::CreateImm(TransferBytes));
-    else // Decode Rm
-      DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
-
-    return MCDisassembler::Success;
-  }
-
-  // Decode post-index of load/store lane
-  // Loads have a vector list as output.
-  if (IsLoad) {
-    switch (NumVecs) {
-    case 1:
-      DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 2:
-      DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 3:
-      DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
-      break;
-    case 4:
-      DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
-    }
-  }
-
-  // Decode write back register, which is equal to Rn.
-  DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-  DecodeGPR64xspRegisterClass(Inst, Rn, Address, Decoder);
-
-  if (Rm == 31) // If Rm is 0x11111, add the number of transferred bytes
-    Inst.addOperand(MCOperand::CreateImm(TransferBytes));
-  else // Decode Rm
-    DecodeGPR64noxzrRegisterClass(Inst, Rm, Address, Decoder);
-
-  // Decode the source vector list.
-  switch (NumVecs) {
-  case 1:
-    DecodeFPR128RegisterClass(Inst, Rt, Address, Decoder);
-    break;
-  case 2:
-    DecodeQPairRegisterClass(Inst, Rt, Address, Decoder);
-    break;
-  case 3:
-    DecodeQTripleRegisterClass(Inst, Rt, Address, Decoder);
-    break;
-  case 4:
-    DecodeQQuadRegisterClass(Inst, Rt, Address, Decoder);
-  }
-
-  // Decode lane
-  unsigned Q = fieldFromInstruction(Insn, 30, 1);
-  unsigned S = fieldFromInstruction(Insn, 10, 3);
-  unsigned lane = 0;
-  // Calculate the number of lanes by number of vectors and transferred bytes.
-  // NumLanes = 16 bytes / bytes of each lane
-  unsigned NumLanes = 16 / (TransferBytes / NumVecs);
-  switch (NumLanes) {
-  case 16: // A vector has 16 lanes, each lane is 1 bytes.
-    lane = (Q << 3) | S;
-    break;
-  case 8:
-    lane = (Q << 2) | (S >> 1);
-    break;
-  case 4:
-    lane = (Q << 1) | (S >> 2);
-    break;
-  case 2:
-    lane = Q;
-    break;
-  }
-  Inst.addOperand(MCOperand::CreateImm(lane));
-
-  return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeSHLLInstruction(MCInst &Inst, unsigned Insn,
-                                          uint64_t Address,
-                                          const void *Decoder) {
-  unsigned Rd = fieldFromInstruction(Insn, 0, 5);
-  unsigned Rn = fieldFromInstruction(Insn, 5, 5);
-  unsigned size = fieldFromInstruction(Insn, 22, 2);
-  unsigned Q = fieldFromInstruction(Insn, 30, 1);
-
-  DecodeFPR128RegisterClass(Inst, Rd, Address, Decoder);
-
-  if(Q)
-    DecodeFPR128RegisterClass(Inst, Rn, Address, Decoder);
-  else
-    DecodeFPR64RegisterClass(Inst, Rn, Address, Decoder);
-
-  switch (size) {
-  case 0:
-    Inst.addOperand(MCOperand::CreateImm(8));
-    break;
-  case 1:
-    Inst.addOperand(MCOperand::CreateImm(16));
-    break;
-  case 2:
-    Inst.addOperand(MCOperand::CreateImm(32));
-    break;
-  default :
-    return MCDisassembler::Fail;
-  }
-  return MCDisassembler::Success;
-}
-

Removed: llvm/trunk/lib/Target/AArch64/Disassembler/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Disassembler/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Disassembler/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/Disassembler/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMAArch64Disassembler
-  AArch64Disassembler.cpp
-  )

Removed: llvm/trunk/lib/Target/AArch64/Disassembler/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Disassembler/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Disassembler/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/Disassembler/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/AArch64/Disassembler/LLVMBuild.txt ----------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = AArch64Disassembler
-parent = AArch64
-required_libraries = AArch64Info AArch64Utils MC Support
-add_to_library_groups = AArch64

Removed: llvm/trunk/lib/Target/AArch64/Disassembler/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Disassembler/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Disassembler/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/Disassembler/Makefile (removed)
@@ -1,16 +0,0 @@
-##===- lib/Target/AArch64/Disassembler/Makefile ------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Disassembler
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp (removed)
@@ -1,549 +0,0 @@
-//==-- AArch64InstPrinter.cpp - Convert AArch64 MCInst to assembly syntax --==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an AArch64 MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64InstPrinter.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "asm-printer"
-
-#define GET_INSTRUCTION_NAME
-#define PRINT_ALIAS_INSTR
-#include "AArch64GenAsmWriter.inc"
-
-static int64_t unpackSignedImm(int BitWidth, uint64_t Value) {
-  assert(!(Value & ~((1ULL << BitWidth)-1)) && "immediate not n-bit");
-  if (Value & (1ULL <<  (BitWidth - 1)))
-    return static_cast<int64_t>(Value) - (1LL << BitWidth);
-  else
-    return Value;
-}
-
-AArch64InstPrinter::AArch64InstPrinter(const MCAsmInfo &MAI,
-                                       const MCInstrInfo &MII,
-                                       const MCRegisterInfo &MRI,
-                                       const MCSubtargetInfo &STI) :
-  MCInstPrinter(MAI, MII, MRI) {
-  // Initialize the set of available features.
-  setAvailableFeatures(STI.getFeatureBits());
-}
-
-void AArch64InstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
-  OS << getRegisterName(RegNo);
-}
-
-void
-AArch64InstPrinter::printOffsetSImm9Operand(const MCInst *MI,
-                                              unsigned OpNum, raw_ostream &O) {
-  const MCOperand &MOImm = MI->getOperand(OpNum);
-  int32_t Imm = unpackSignedImm(9, MOImm.getImm());
-
-  O << '#' << Imm;
-}
-
-void
-AArch64InstPrinter::printAddrRegExtendOperand(const MCInst *MI, unsigned OpNum,
-                                          raw_ostream &O, unsigned MemSize,
-                                          unsigned RmSize) {
-  unsigned ExtImm = MI->getOperand(OpNum).getImm();
-  unsigned OptionHi = ExtImm >> 1;
-  unsigned S = ExtImm & 1;
-  bool IsLSL = OptionHi == 1 && RmSize == 64;
-
-  const char *Ext;
-  switch (OptionHi) {
-  case 1:
-    Ext = (RmSize == 32) ? "uxtw" : "lsl";
-    break;
-  case 3:
-    Ext = (RmSize == 32) ? "sxtw" : "sxtx";
-    break;
-  default:
-    llvm_unreachable("Incorrect Option on load/store (reg offset)");
-  }
-  O << Ext;
-
-  if (S) {
-    unsigned ShiftAmt = Log2_32(MemSize);
-    O << " #" << ShiftAmt;
-  } else if (IsLSL) {
-    O << " #0";
-  }
-}
-
-void
-AArch64InstPrinter::printAddSubImmLSL0Operand(const MCInst *MI,
-                                              unsigned OpNum, raw_ostream &O) {
-  const MCOperand &Imm12Op = MI->getOperand(OpNum);
-
-  if (Imm12Op.isImm()) {
-    int64_t Imm12 = Imm12Op.getImm();
-    assert(Imm12 >= 0 && "Invalid immediate for add/sub imm");
-    O << "#" << Imm12;
-  } else {
-    assert(Imm12Op.isExpr() && "Unexpected shift operand type");
-    O << "#" << *Imm12Op.getExpr();
-  }
-}
-
-void
-AArch64InstPrinter::printAddSubImmLSL12Operand(const MCInst *MI, unsigned OpNum,
-                                               raw_ostream &O) {
-
-  printAddSubImmLSL0Operand(MI, OpNum, O);
-
-  O << ", lsl #12";
-}
-
-void
-AArch64InstPrinter::printBareImmOperand(const MCInst *MI, unsigned OpNum,
-                                        raw_ostream &O) {
-  const MCOperand &MO = MI->getOperand(OpNum);
-  O << MO.getImm();
-}
-
-template<unsigned RegWidth> void
-AArch64InstPrinter::printBFILSBOperand(const MCInst *MI, unsigned OpNum,
-                                       raw_ostream &O) {
-  const MCOperand &ImmROp = MI->getOperand(OpNum);
-  unsigned LSB = ImmROp.getImm() == 0 ? 0 : RegWidth - ImmROp.getImm();
-
-  O << '#' << LSB;
-}
-
-void AArch64InstPrinter::printBFIWidthOperand(const MCInst *MI, unsigned OpNum,
-                                              raw_ostream &O) {
-  const MCOperand &ImmSOp = MI->getOperand(OpNum);
-  unsigned Width = ImmSOp.getImm() + 1;
-
-  O << '#' << Width;
-}
-
-void
-AArch64InstPrinter::printBFXWidthOperand(const MCInst *MI, unsigned OpNum,
-                                         raw_ostream &O) {
-  const MCOperand &ImmSOp = MI->getOperand(OpNum);
-  const MCOperand &ImmROp = MI->getOperand(OpNum - 1);
-
-  unsigned ImmR = ImmROp.getImm();
-  unsigned ImmS = ImmSOp.getImm();
-
-  assert(ImmS >= ImmR && "Invalid ImmR, ImmS combination for bitfield extract");
-
-  O << '#' << (ImmS - ImmR + 1);
-}
-
-void
-AArch64InstPrinter::printCRxOperand(const MCInst *MI, unsigned OpNum,
-                                    raw_ostream &O) {
-    const MCOperand &CRx = MI->getOperand(OpNum);
-
-    O << 'c' << CRx.getImm();
-}
-
-
-void
-AArch64InstPrinter::printCVTFixedPosOperand(const MCInst *MI, unsigned OpNum,
-                                            raw_ostream &O) {
-    const MCOperand &ScaleOp = MI->getOperand(OpNum);
-
-    O << '#' << (64 - ScaleOp.getImm());
-}
-
-
-void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
-                                           raw_ostream &o) {
-  const MCOperand &MOImm8 = MI->getOperand(OpNum);
-
-  assert(MOImm8.isImm()
-         && "Immediate operand required for floating-point immediate inst");
-
-  uint32_t Imm8 = MOImm8.getImm();
-  uint32_t Fraction = Imm8 & 0xf;
-  uint32_t Exponent = (Imm8 >> 4) & 0x7;
-  uint32_t Negative = (Imm8 >> 7) & 0x1;
-
-  float Val = 1.0f + Fraction / 16.0f;
-
-  // That is:
-  // 000 -> 2^1,  001 -> 2^2,  010 -> 2^3,  011 -> 2^4,
-  // 100 -> 2^-3, 101 -> 2^-2, 110 -> 2^-1, 111 -> 2^0
-  if (Exponent & 0x4) {
-    Val /= 1 << (7 - Exponent);
-  } else {
-    Val *= 1 << (Exponent + 1);
-  }
-
-  Val = Negative ? -Val : Val;
-
-  o << '#' << format("%.8f", Val);
-}
-
-void AArch64InstPrinter::printFPZeroOperand(const MCInst *MI, unsigned OpNum,
-                                            raw_ostream &o) {
-  o << "#0.0";
-}
-
-void
-AArch64InstPrinter::printCondCodeOperand(const MCInst *MI, unsigned OpNum,
-                                         raw_ostream &O) {
-  const MCOperand &MO = MI->getOperand(OpNum);
-
-  O << A64CondCodeToString(static_cast<A64CC::CondCodes>(MO.getImm()));
-}
-
-void
-AArch64InstPrinter::printInverseCondCodeOperand(const MCInst *MI,
-                                                unsigned OpNum,
-                                                raw_ostream &O) {
-  A64CC::CondCodes CC =
-      static_cast<A64CC::CondCodes>(MI->getOperand(OpNum).getImm());
-  O << A64CondCodeToString(A64InvertCondCode(CC));
-}
-
-template <unsigned field_width, unsigned scale> void
-AArch64InstPrinter::printLabelOperand(const MCInst *MI, unsigned OpNum,
-                                            raw_ostream &O) {
-  const MCOperand &MO = MI->getOperand(OpNum);
-
-  if (!MO.isImm()) {
-    printOperand(MI, OpNum, O);
-    return;
-  }
-
-  // The immediate of LDR (lit) instructions is a signed 19-bit immediate, which
-  // is multiplied by 4 (because all A64 instructions are 32-bits wide).
-  uint64_t UImm = MO.getImm();
-  uint64_t Sign = UImm & (1LL << (field_width - 1));
-  int64_t SImm = scale * ((UImm & ~Sign) - Sign);
-
-  O << "#" << SImm;
-}
-
-template<unsigned RegWidth> void
-AArch64InstPrinter::printLogicalImmOperand(const MCInst *MI, unsigned OpNum,
-                                           raw_ostream &O) {
-  const MCOperand &MO = MI->getOperand(OpNum);
-  uint64_t Val;
-  A64Imms::isLogicalImmBits(RegWidth, MO.getImm(), Val);
-  O << "#0x";
-  O.write_hex(Val);
-}
-
-void
-AArch64InstPrinter::printOffsetUImm12Operand(const MCInst *MI, unsigned OpNum,
-                                               raw_ostream &O, int MemSize) {
-  const MCOperand &MOImm = MI->getOperand(OpNum);
-
-  if (MOImm.isImm()) {
-    uint32_t Imm = MOImm.getImm() * MemSize;
-
-    O << "#" << Imm;
-  } else {
-    O << "#" << *MOImm.getExpr();
-  }
-}
-
-void
-AArch64InstPrinter::printShiftOperand(const MCInst *MI,  unsigned OpNum,
-                                      raw_ostream &O,
-                                      A64SE::ShiftExtSpecifiers Shift) {
-    const MCOperand &MO = MI->getOperand(OpNum);
-
-    // LSL #0 is not printed
-    if (Shift == A64SE::LSL && MO.isImm() && MO.getImm() == 0)
-        return;
-
-    switch (Shift) {
-    case A64SE::LSL: O << "lsl"; break;
-    case A64SE::LSR: O << "lsr"; break;
-    case A64SE::ASR: O << "asr"; break;
-    case A64SE::ROR: O << "ror"; break;
-    default: llvm_unreachable("Invalid shift specifier in logical instruction");
-    }
-
-  O << " #" << MO.getImm();
-}
-
-void
-AArch64InstPrinter::printMoveWideImmOperand(const MCInst *MI,  unsigned OpNum,
-                                            raw_ostream &O) {
-  const MCOperand &UImm16MO = MI->getOperand(OpNum);
-  const MCOperand &ShiftMO = MI->getOperand(OpNum + 1);
-
-  if (UImm16MO.isImm()) {
-    O << '#' << UImm16MO.getImm();
-
-    if (ShiftMO.getImm() != 0)
-      O << ", lsl #" << (ShiftMO.getImm() * 16);
-
-    return;
-  }
-
-  O << "#" << *UImm16MO.getExpr();
-}
-
-void AArch64InstPrinter::printNamedImmOperand(const NamedImmMapper &Mapper,
-                                              const MCInst *MI, unsigned OpNum,
-                                              raw_ostream &O) {
-  bool ValidName;
-  const MCOperand &MO = MI->getOperand(OpNum);
-  StringRef Name = Mapper.toString(MO.getImm(), ValidName);
-
-  if (ValidName)
-    O << Name;
-  else
-    O << '#' << MO.getImm();
-}
-
-void
-AArch64InstPrinter::printSysRegOperand(const A64SysReg::SysRegMapper &Mapper,
-                                       const MCInst *MI, unsigned OpNum,
-                                       raw_ostream &O) {
-  const MCOperand &MO = MI->getOperand(OpNum);
-
-  bool ValidName;
-  std::string Name = Mapper.toString(MO.getImm(), ValidName);
-  if (ValidName) {
-    O << Name;
-    return;
-  }
-}
-
-
-void AArch64InstPrinter::printRegExtendOperand(const MCInst *MI,
-                                               unsigned OpNum,
-                                               raw_ostream &O,
-                                               A64SE::ShiftExtSpecifiers Ext) {
-  // FIXME: In principle TableGen should be able to detect this itself far more
-  // easily. We will only accumulate more of these hacks.
-  unsigned Reg0 = MI->getOperand(0).getReg();
-  unsigned Reg1 = MI->getOperand(1).getReg();
-
-  if (isStackReg(Reg0) || isStackReg(Reg1)) {
-    A64SE::ShiftExtSpecifiers LSLEquiv;
-
-    if (Reg0 == AArch64::XSP || Reg1 == AArch64::XSP)
-      LSLEquiv = A64SE::UXTX;
-    else
-      LSLEquiv = A64SE::UXTW;
-
-    if (Ext == LSLEquiv) {
-      O << "lsl #" << MI->getOperand(OpNum).getImm();
-      return;
-    }
-  }
-
-  switch (Ext) {
-  case A64SE::UXTB: O << "uxtb"; break;
-  case A64SE::UXTH: O << "uxth"; break;
-  case A64SE::UXTW: O << "uxtw"; break;
-  case A64SE::UXTX: O << "uxtx"; break;
-  case A64SE::SXTB: O << "sxtb"; break;
-  case A64SE::SXTH: O << "sxth"; break;
-  case A64SE::SXTW: O << "sxtw"; break;
-  case A64SE::SXTX: O << "sxtx"; break;
-  default: llvm_unreachable("Unexpected shift type for printing");
-  }
-
-  const MCOperand &MO = MI->getOperand(OpNum);
-  if (MO.getImm() != 0)
-    O << " #" << MO.getImm();
-}
-
-template<int MemScale> void
-AArch64InstPrinter::printSImm7ScaledOperand(const MCInst *MI, unsigned OpNum,
-                                      raw_ostream &O) {
-  const MCOperand &MOImm = MI->getOperand(OpNum);
-  int32_t Imm = unpackSignedImm(7, MOImm.getImm());
-
-  O << "#" << (Imm * MemScale);
-}
-
-void AArch64InstPrinter::printVPRRegister(const MCInst *MI, unsigned OpNo,
-                                          raw_ostream &O) {
-  unsigned Reg = MI->getOperand(OpNo).getReg();
-  std::string Name = getRegisterName(Reg);
-  Name[0] = 'v';
-  O << Name;
-}
-
-void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
-                                      raw_ostream &O) {
-  const MCOperand &Op = MI->getOperand(OpNo);
-  if (Op.isReg()) {
-    unsigned Reg = Op.getReg();
-    O << getRegisterName(Reg);
-  } else if (Op.isImm()) {
-    O << '#' << Op.getImm();
-  } else {
-    assert(Op.isExpr() && "unknown operand kind in printOperand");
-    // If a symbolic branch target was added as a constant expression then print
-    // that address in hex.
-    const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
-    int64_t Address;
-    if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
-      O << "0x";
-      O.write_hex(Address);
-    }
-    else {
-      // Otherwise, just print the expression.
-      O << *Op.getExpr();
-    }
-  }
-}
-
-
-void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
-                                   StringRef Annot) {
-  if (MI->getOpcode() == AArch64::TLSDESCCALL) {
-    // This is a special assembler directive which applies an
-    // R_AARCH64_TLSDESC_CALL to the following (BLR) instruction. It has a fixed
-    // form outside the normal TableGenerated scheme.
-    O << "\t.tlsdesccall " << *MI->getOperand(0).getExpr();
-  } else if (!printAliasInstr(MI, O))
-    printInstruction(MI, O);
-
-  printAnnotation(O, Annot);
-}
-
-template <A64SE::ShiftExtSpecifiers Ext, bool isHalf>
-void AArch64InstPrinter::printNeonMovImmShiftOperand(const MCInst *MI,
-                                                     unsigned OpNum,
-                                                     raw_ostream &O) {
-  const MCOperand &MO = MI->getOperand(OpNum);
-
-  assert(MO.isImm() &&
-         "Immediate operand required for Neon vector immediate inst.");
-
-  bool IsLSL = false;
-  if (Ext == A64SE::LSL)
-    IsLSL = true;
-  else if (Ext != A64SE::MSL)
-    llvm_unreachable("Invalid shift specifier in movi instruction");
-
-  int64_t Imm = MO.getImm();
-
-  // MSL and LSLH accepts encoded shift amount 0 or 1.
-  if ((!IsLSL || (IsLSL && isHalf)) && Imm != 0 && Imm != 1)
-    llvm_unreachable("Invalid shift amount in movi instruction");
-
-  // LSH accepts encoded shift amount 0, 1, 2 or 3.
-  if (IsLSL && (Imm < 0 || Imm > 3))
-    llvm_unreachable("Invalid shift amount in movi instruction");
-
-  // Print shift amount as multiple of 8 with MSL encoded shift amount
-  // 0 and 1 printed as 8 and 16.
-  if (!IsLSL)
-    Imm++;
-  Imm *= 8;
-
-  // LSL #0 is not printed
-  if (IsLSL) {
-    if (Imm == 0)
-      return;
-    O << ", lsl";
-  } else
-    O << ", msl";
-
-  O << " #" << Imm;
-}
-
-void AArch64InstPrinter::printNeonUImm0Operand(const MCInst *MI, unsigned OpNum,
-                                               raw_ostream &o) {
-  o << "#0x0";
-}
-
-void AArch64InstPrinter::printUImmHexOperand(const MCInst *MI, unsigned OpNum,
-                                             raw_ostream &O) {
-  const MCOperand &MOUImm = MI->getOperand(OpNum);
-
-  assert(MOUImm.isImm() &&
-         "Immediate operand required for Neon vector immediate inst.");
-
-  unsigned Imm = MOUImm.getImm();
-
-  O << "#0x";
-  O.write_hex(Imm);
-}
-
-void AArch64InstPrinter::printUImmBareOperand(const MCInst *MI,
-                                              unsigned OpNum,
-                                              raw_ostream &O) {
-  const MCOperand &MOUImm = MI->getOperand(OpNum);
-
-  assert(MOUImm.isImm()
-         && "Immediate operand required for Neon vector immediate inst.");
-
-  unsigned Imm = MOUImm.getImm();
-  O << Imm;
-}
-
-void AArch64InstPrinter::printNeonUImm64MaskOperand(const MCInst *MI,
-                                                    unsigned OpNum,
-                                                    raw_ostream &O) {
-  const MCOperand &MOUImm8 = MI->getOperand(OpNum);
-
-  assert(MOUImm8.isImm() &&
-         "Immediate operand required for Neon vector immediate bytemask inst.");
-
-  uint32_t UImm8 = MOUImm8.getImm();
-  uint64_t Mask = 0;
-
-  // Replicates 0x00 or 0xff byte in a 64-bit vector
-  for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
-    if ((UImm8 >> ByteNum) & 1)
-      Mask |= (uint64_t)0xff << (8 * ByteNum);
-  }
-
-  O << "#0x";
-  O.write_hex(Mask);
-}
-
-// If Count > 1, there are two valid kinds of vector list:
-//   (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
-//   (2) {Vn.layout - Vm.layout}
-// We choose the first kind as output.
-template <A64Layout::VectorLayout Layout, unsigned Count>
-void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum,
-                                         raw_ostream &O) {
-  assert(Count >= 1 && Count <= 4 && "Invalid Number of Vectors");
-
-  unsigned Reg = MI->getOperand(OpNum).getReg();
-  std::string LayoutStr = A64VectorLayoutToString(Layout);
-  O << "{ ";
-  if (Count > 1) { // Print sub registers separately
-    bool IsVec64 = (Layout < A64Layout::VL_16B);
-    unsigned SubRegIdx = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
-    for (unsigned I = 0; I < Count; I++) {
-      std::string Name = getRegisterName(MRI.getSubReg(Reg, SubRegIdx++));
-      Name[0] = 'v';
-      O << Name << LayoutStr;
-      if (I != Count - 1)
-        O << ", ";
-    }
-  } else { // Print the register directly when NumVecs is 1.
-    std::string Name = getRegisterName(Reg);
-    Name[0] = 'v';
-    O << Name << LayoutStr;
-  }
-  O << " }";
-}

Removed: llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h (original)
+++ llvm/trunk/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h (removed)
@@ -1,186 +0,0 @@
-//===-- AArch64InstPrinter.h - Convert AArch64 MCInst to assembly syntax --===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an AArch64 MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64INSTPRINTER_H
-#define LLVM_AARCH64INSTPRINTER_H
-
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/MC/MCInstPrinter.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-
-namespace llvm {
-
-class MCOperand;
-
-class AArch64InstPrinter : public MCInstPrinter {
-public:
-  AArch64InstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
-                     const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
-
-  // Autogenerated by tblgen
-  void printInstruction(const MCInst *MI, raw_ostream &O);
-  bool printAliasInstr(const MCInst *MI, raw_ostream &O);
-  void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
-                               unsigned PrintMethodIdx, raw_ostream &O);
-  static const char *getRegisterName(unsigned RegNo);
-  static const char *getInstructionName(unsigned Opcode);
-
-  void printRegName(raw_ostream &O, unsigned RegNum) const override;
-
-  template<unsigned MemSize, unsigned RmSize>
-  void printAddrRegExtendOperand(const MCInst *MI, unsigned OpNum,
-                                 raw_ostream &O) {
-    printAddrRegExtendOperand(MI, OpNum, O, MemSize, RmSize);
-  }
-
-
-  void printAddrRegExtendOperand(const MCInst *MI, unsigned OpNum,
-                                 raw_ostream &O, unsigned MemSize,
-                                 unsigned RmSize);
-
-  void printAddSubImmLSL0Operand(const MCInst *MI,
-                                 unsigned OpNum, raw_ostream &O);
-  void printAddSubImmLSL12Operand(const MCInst *MI,
-                                  unsigned OpNum, raw_ostream &O);
-
-  void printBareImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-  template<unsigned RegWidth>
-  void printBFILSBOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-  void printBFIWidthOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-  void printBFXWidthOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-
-  void printCondCodeOperand(const MCInst *MI, unsigned OpNum,
-                            raw_ostream &O);
-  void printInverseCondCodeOperand(const MCInst *MI, unsigned OpNum,
-                                   raw_ostream &O);
-
-  void printCRxOperand(const MCInst *MI, unsigned OpNum,
-                       raw_ostream &O);
-
-  void printCVTFixedPosOperand(const MCInst *MI, unsigned OpNum,
-                               raw_ostream &O);
-
-  void printFPImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &o);
-
-  void printFPZeroOperand(const MCInst *MI, unsigned OpNum, raw_ostream &o);
-
-  template<int MemScale>
-  void printOffsetUImm12Operand(const MCInst *MI,
-                                  unsigned OpNum, raw_ostream &o) {
-    printOffsetUImm12Operand(MI, OpNum, o, MemScale);
-  }
-
-  void printOffsetUImm12Operand(const MCInst *MI, unsigned OpNum,
-                                  raw_ostream &o, int MemScale);
-
-  template<unsigned field_width, unsigned scale>
-  void printLabelOperand(const MCInst *MI, unsigned OpNum,
-                         raw_ostream &O);
-
-  template<unsigned RegWidth>
-  void printLogicalImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-  template<typename SomeNamedImmMapper>
-  void printNamedImmOperand(const MCInst *MI, unsigned OpNum,
-                            raw_ostream &O) {
-    printNamedImmOperand(SomeNamedImmMapper(), MI, OpNum, O);
-  }
-
-  void printNamedImmOperand(const NamedImmMapper &Mapper,
-                            const MCInst *MI, unsigned OpNum,
-                            raw_ostream &O);
-
-  void printSysRegOperand(const A64SysReg::SysRegMapper &Mapper,
-                          const MCInst *MI, unsigned OpNum,
-                          raw_ostream &O);
-
-  void printMRSOperand(const MCInst *MI, unsigned OpNum,
-                       raw_ostream &O) {
-    printSysRegOperand(A64SysReg::MRSMapper(), MI, OpNum, O);
-  }
-
-  void printMSROperand(const MCInst *MI, unsigned OpNum,
-                       raw_ostream &O) {
-    printSysRegOperand(A64SysReg::MSRMapper(), MI, OpNum, O);
-  }
-
-  void printShiftOperand(const char *name, const MCInst *MI,
-                         unsigned OpIdx, raw_ostream &O);
-
-  void printLSLOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-  void printLSROperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
-    printShiftOperand("lsr", MI, OpNum, O);
-  }
-  void printASROperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
-    printShiftOperand("asr", MI, OpNum, O);
-  }
-  void printROROperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
-    printShiftOperand("ror", MI, OpNum, O);
-  }
-
-  template<A64SE::ShiftExtSpecifiers Shift>
-  void printShiftOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) {
-    printShiftOperand(MI, OpNum, O, Shift);
-  }
-
-  void printShiftOperand(const MCInst *MI, unsigned OpNum,
-                         raw_ostream &O, A64SE::ShiftExtSpecifiers Sh);
-
-
-  void printMoveWideImmOperand(const  MCInst *MI, unsigned OpNum,
-                               raw_ostream &O);
-
-  template<int MemSize> void
-  printSImm7ScaledOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-  void printOffsetSImm9Operand(const MCInst *MI, unsigned OpNum,
-                               raw_ostream &O);
-
-  void printPRFMOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-
-  template<A64SE::ShiftExtSpecifiers EXT>
-  void printRegExtendOperand(const MCInst *MI, unsigned OpNum,
-                             raw_ostream &O) {
-    printRegExtendOperand(MI, OpNum, O, EXT);
-  }
-
-  void printRegExtendOperand(const MCInst *MI, unsigned OpNum,
-                             raw_ostream &O, A64SE::ShiftExtSpecifiers Ext);
-
-  void printVPRRegister(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
-
-  bool isStackReg(unsigned RegNo) {
-    return RegNo == AArch64::XSP || RegNo == AArch64::WSP;
-  }
-
-  template <A64SE::ShiftExtSpecifiers Ext, bool IsHalf>
-  void printNeonMovImmShiftOperand(const MCInst *MI, unsigned OpNum,
-                                   raw_ostream &O);
-  void printNeonUImm0Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-  void printUImmHexOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-  void printUImmBareOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-  void printNeonUImm64MaskOperand(const MCInst *MI, unsigned OpNum,
-                                  raw_ostream &O);
-
-  template <A64Layout::VectorLayout Layout, unsigned Count>
-  void printVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-};
-}
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/InstPrinter/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/InstPrinter/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/InstPrinter/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/InstPrinter/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMAArch64AsmPrinter
-  AArch64InstPrinter.cpp
-  )

Removed: llvm/trunk/lib/Target/AArch64/InstPrinter/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/InstPrinter/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/InstPrinter/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/InstPrinter/LLVMBuild.txt (removed)
@@ -1,24 +0,0 @@
-;===- ./lib/Target/AArch64/InstPrinter/LLVMBuild.txt -----------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = AArch64AsmPrinter
-parent = AArch64
-required_libraries = AArch64Utils MC Support
-add_to_library_groups = AArch64
-

Removed: llvm/trunk/lib/Target/AArch64/InstPrinter/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/InstPrinter/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/InstPrinter/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/InstPrinter/Makefile (removed)
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/AsmPrinter/Makefile --------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64AsmPrinter
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/AArch64/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/LLVMBuild.txt (removed)
@@ -1,35 +0,0 @@
-;===- ./lib/Target/AArch64/LLVMBuild.txt -----------------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[common]
-subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo Utils
-
-[component_0]
-type = TargetGroup
-name = AArch64
-parent = Target
-has_asmparser = 1
-has_asmprinter = 1
-has_disassembler = 1
-has_jit = 1
-
-[component_1]
-type = Library
-name = AArch64CodeGen
-parent = AArch64
-required_libraries = AArch64AsmPrinter AArch64Desc AArch64Info AArch64Utils Analysis AsmPrinter CodeGen Core MC SelectionDAG Support Target
-add_to_library_groups = AArch64

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp (removed)
@@ -1,593 +0,0 @@
-//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the AArch64 implementation of the MCAsmBackend class,
-// which is principally concerned with relaxation of the various fixup kinds.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AArch64FixupKinds.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCFixupKindInfo.h"
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
-class AArch64AsmBackend : public MCAsmBackend {
-  const MCSubtargetInfo* STI;
-public:
-  AArch64AsmBackend(const Target &T, const StringRef TT)
-    : MCAsmBackend(),
-      STI(AArch64_MC::createAArch64MCSubtargetInfo(TT, "", ""))
-    {}
-
-
-  ~AArch64AsmBackend() {
-    delete STI;
-  }
-
-  bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
-
-  virtual void processFixupValue(const MCAssembler &Asm,
-                                 const MCAsmLayout &Layout,
-                                 const MCFixup &Fixup, const MCFragment *DF,
-                                 const MCValue &Target, uint64_t &Value,
-                                 bool &IsResolved) override;
-};
-} // end anonymous namespace
-
-void AArch64AsmBackend::processFixupValue(const MCAssembler &Asm,
-                                          const MCAsmLayout &Layout,
-                                          const MCFixup &Fixup,
-                                          const MCFragment *DF,
-                                          const MCValue &Target,
-                                          uint64_t &Value, bool &IsResolved) {
-  // The ADRP instruction adds some multiple of 0x1000 to the current PC &
-  // ~0xfff. This means that the required offset to reach a symbol can vary by
-  // up to one step depending on where the ADRP is in memory. For example:
-  //
-  //     ADRP x0, there
-  //  there:
-  //
-  // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
-  // we'll need that as an offset. At any other address "there" will be in the
-  // same page as the ADRP and the instruction should encode 0x0. Assuming the
-  // section isn't 0x1000-aligned, we therefore need to delegate this decision
-  // to the linker -- a relocation!
-  if ((uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_page ||
-      (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_got_page ||
-      (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_gottprel_page ||
-      (uint32_t)Fixup.getKind() == AArch64::fixup_a64_tlsdesc_adr_page)
-    IsResolved = false;
-}
-
-
-static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value);
-
-namespace {
-
-class ELFAArch64AsmBackend : public AArch64AsmBackend {
-  uint8_t OSABI;
-  bool IsLittle; // Big or little endian
-public:
-  ELFAArch64AsmBackend(const Target &T, const StringRef TT,
-                       uint8_t _OSABI, bool isLittle)
-    : AArch64AsmBackend(T, TT), OSABI(_OSABI), IsLittle(isLittle) { }
-
-  bool fixupNeedsRelaxation(const MCFixup &Fixup,
-                            uint64_t Value,
-                            const MCRelaxableFragment *DF,
-                            const MCAsmLayout &Layout) const override;
-
-  unsigned int getNumFixupKinds() const override {
-    return AArch64::NumTargetFixupKinds;
-  }
-
-  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
-    const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
-// This table *must* be in the order that the fixup_* kinds are defined in
-// AArch64FixupKinds.h.
-//
-// Name                   Offset (bits)    Size (bits)    Flags
-{ "fixup_a64_ld_prel",               0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_adr_prel",              0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_adr_prel_page",         0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_add_lo12",              0,    32,             0 },
-{ "fixup_a64_ldst8_lo12",            0,    32,             0 },
-{ "fixup_a64_ldst16_lo12",           0,    32,             0 },
-{ "fixup_a64_ldst32_lo12",           0,    32,             0 },
-{ "fixup_a64_ldst64_lo12",           0,    32,             0 },
-{ "fixup_a64_ldst128_lo12",          0,    32,             0 },
-{ "fixup_a64_tstbr",                 0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_condbr",                0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_uncondbr",              0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_call",                  0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_movw_uabs_g0",          0,    32,             0 },
-{ "fixup_a64_movw_uabs_g0_nc",       0,    32,             0 },
-{ "fixup_a64_movw_uabs_g1",          0,    32,             0 },
-{ "fixup_a64_movw_uabs_g1_nc",       0,    32,             0 },
-{ "fixup_a64_movw_uabs_g2",          0,    32,             0 },
-{ "fixup_a64_movw_uabs_g2_nc",       0,    32,             0 },
-{ "fixup_a64_movw_uabs_g3",          0,    32,             0 },
-{ "fixup_a64_movw_sabs_g0",          0,    32,             0 },
-{ "fixup_a64_movw_sabs_g1",          0,    32,             0 },
-{ "fixup_a64_movw_sabs_g2",          0,    32,             0 },
-{ "fixup_a64_adr_prel_got_page",     0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_ld64_got_lo12_nc",      0,    32,             0 },
-{ "fixup_a64_movw_dtprel_g2",        0,    32,             0 },
-{ "fixup_a64_movw_dtprel_g1",        0,    32,             0 },
-{ "fixup_a64_movw_dtprel_g1_nc",     0,    32,             0 },
-{ "fixup_a64_movw_dtprel_g0",        0,    32,             0 },
-{ "fixup_a64_movw_dtprel_g0_nc",     0,    32,             0 },
-{ "fixup_a64_add_dtprel_hi12",       0,    32,             0 },
-{ "fixup_a64_add_dtprel_lo12",       0,    32,             0 },
-{ "fixup_a64_add_dtprel_lo12_nc",    0,    32,             0 },
-{ "fixup_a64_ldst8_dtprel_lo12",     0,    32,             0 },
-{ "fixup_a64_ldst8_dtprel_lo12_nc",  0,    32,             0 },
-{ "fixup_a64_ldst16_dtprel_lo12",    0,    32,             0 },
-{ "fixup_a64_ldst16_dtprel_lo12_nc", 0,    32,             0 },
-{ "fixup_a64_ldst32_dtprel_lo12",    0,    32,             0 },
-{ "fixup_a64_ldst32_dtprel_lo12_nc", 0,    32,             0 },
-{ "fixup_a64_ldst64_dtprel_lo12",    0,    32,             0 },
-{ "fixup_a64_ldst64_dtprel_lo12_nc", 0,    32,             0 },
-{ "fixup_a64_movw_gottprel_g1",      0,    32,             0 },
-{ "fixup_a64_movw_gottprel_g0_nc",   0,    32,             0 },
-{ "fixup_a64_adr_gottprel_page",     0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_ld64_gottprel_lo12_nc", 0,    32,             0 },
-{ "fixup_a64_ld_gottprel_prel19",    0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_movw_tprel_g2",         0,    32,             0 },
-{ "fixup_a64_movw_tprel_g1",         0,    32,             0 },
-{ "fixup_a64_movw_tprel_g1_nc",      0,    32,             0 },
-{ "fixup_a64_movw_tprel_g0",         0,    32,             0 },
-{ "fixup_a64_movw_tprel_g0_nc",      0,    32,             0 },
-{ "fixup_a64_add_tprel_hi12",        0,    32,             0 },
-{ "fixup_a64_add_tprel_lo12",        0,    32,             0 },
-{ "fixup_a64_add_tprel_lo12_nc",     0,    32,             0 },
-{ "fixup_a64_ldst8_tprel_lo12",      0,    32,             0 },
-{ "fixup_a64_ldst8_tprel_lo12_nc",   0,    32,             0 },
-{ "fixup_a64_ldst16_tprel_lo12",     0,    32,             0 },
-{ "fixup_a64_ldst16_tprel_lo12_nc",  0,    32,             0 },
-{ "fixup_a64_ldst32_tprel_lo12",     0,    32,             0 },
-{ "fixup_a64_ldst32_tprel_lo12_nc",  0,    32,             0 },
-{ "fixup_a64_ldst64_tprel_lo12",     0,    32,             0 },
-{ "fixup_a64_ldst64_tprel_lo12_nc",  0,    32,             0 },
-{ "fixup_a64_tlsdesc_adr_page",      0,    32, MCFixupKindInfo::FKF_IsPCRel },
-{ "fixup_a64_tlsdesc_ld64_lo12_nc",  0,    32,             0 },
-{ "fixup_a64_tlsdesc_add_lo12_nc",   0,    32,             0 },
-{ "fixup_a64_tlsdesc_call",          0,     0,             0 }
-    };
-    if (Kind < FirstTargetFixupKind)
-      return MCAsmBackend::getFixupKindInfo(Kind);
-
-    assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
-           "Invalid kind!");
-    return Infos[Kind - FirstTargetFixupKind];
-  }
-
-  void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
-                  uint64_t Value, bool IsPCRel) const override {
-    unsigned NumBytes = getFixupKindInfo(Fixup.getKind()).TargetSize / 8;
-    Value = adjustFixupValue(Fixup.getKind(), Value);
-    if (!Value) return;           // Doesn't change encoding.
-
-    unsigned Offset = Fixup.getOffset();
-    assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
-
-    // For each byte of the fragment that the fixup touches, mask in the bits
-    // from the fixup value.
-    for (unsigned i = 0; i != NumBytes; ++i) {
-      Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
-    }
-  }
-
-  bool mayNeedRelaxation(const MCInst&) const override {
-    return false;
-  }
-
-  void relaxInstruction(const MCInst&, llvm::MCInst&) const override {
-    llvm_unreachable("Cannot relax instructions");
-  }
-
-  MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
-    return createAArch64ELFObjectWriter(OS, OSABI, IsLittle);
-  }
-};
-
-} // end anonymous namespace
-
-bool
-ELFAArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
-                                           uint64_t Value,
-                                           const MCRelaxableFragment *DF,
-                                           const MCAsmLayout &Layout) const {
-  // Correct for now. With all instructions 32-bit only very low-level
-  // considerations could make you select something which may fail.
-  return false;
-}
-
-
-bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
-  // Can't emit NOP with size not multiple of 32-bits
-  if (Count % 4 != 0)
-    return false;
-
-  uint64_t NumNops = Count / 4;
-  for (uint64_t i = 0; i != NumNops; ++i)
-    OW->Write32(0xd503201f);
-
-  return true;
-}
-
-static unsigned ADRImmBits(unsigned Value) {
-  unsigned lo2 = Value & 0x3;
-  unsigned hi19 = (Value & 0x1fffff) >> 2;
-
-  return (hi19 << 5) | (lo2 << 29);
-}
-
-static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
-  switch (Kind) {
-  default:
-    llvm_unreachable("Unknown fixup kind!");
-  case FK_Data_2:
-    assert((int64_t)Value >= -32768 &&
-           (int64_t)Value <= 65536 &&
-           "Out of range ABS16 fixup");
-    return Value;
-  case FK_Data_4:
-    assert((int64_t)Value >= -(1LL << 31) &&
-           (int64_t)Value <= (1LL << 32) - 1 &&
-           "Out of range ABS32 fixup");
-    return Value;
-  case FK_Data_8:
-    return Value;
-
-  case AArch64::fixup_a64_ld_gottprel_prel19:
-    // R_AARCH64_LD_GOTTPREL_PREL19: Set a load-literal immediate to bits 1F
-    // FFFC of G(TPREL(S+A)) - P; check -2^20 <= X < 2^20.
-  case AArch64::fixup_a64_ld_prel:
-    // R_AARCH64_LD_PREL_LO19: Sets a load-literal (immediate) value to bits
-    // 1F FFFC of S+A-P, checking that -2^20 <= S+A-P < 2^20.
-    assert((int64_t)Value >= -(1LL << 20) &&
-           (int64_t)Value < (1LL << 20) && "Out of range LDR (lit) fixup");
-    return (Value & 0x1ffffc) << 3;
-
-  case AArch64::fixup_a64_adr_prel:
-    // R_AARCH64_ADR_PREL_LO21: Sets an ADR immediate value to bits 1F FFFF of
-    // the result of S+A-P, checking that -2^20 <= S+A-P < 2^20.
-    assert((int64_t)Value >= -(1LL << 20) &&
-           (int64_t)Value < (1LL << 20) && "Out of range ADR fixup");
-    return ADRImmBits(Value & 0x1fffff);
-
-  case AArch64::fixup_a64_adr_prel_page:
-    // R_AARCH64_ADR_PREL_PG_HI21: Sets an ADRP immediate value to bits 1 FFFF
-    // F000 of the result of the operation, checking that -2^32 <= result <
-    // 2^32.
-    assert((int64_t)Value >= -(1LL << 32) &&
-           (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup");
-    return ADRImmBits((Value & 0x1fffff000ULL) >> 12);
-
-  case AArch64::fixup_a64_add_dtprel_hi12:
-    // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits
-    // FF F000 of DTPREL(S+A), check 0 <= X < 2^24.
-  case AArch64::fixup_a64_add_tprel_hi12:
-    // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits
-    // FF F000 of TPREL(S+A), check 0 <= X < 2^24.
-    assert((int64_t)Value >= 0 &&
-           (int64_t)Value < (1LL << 24) && "Out of range ADD fixup");
-    return (Value & 0xfff000) >> 2;
-
-  case AArch64::fixup_a64_add_dtprel_lo12:
-    // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits
-    // FFF of DTPREL(S+A), check 0 <= X < 2^12.
-  case AArch64::fixup_a64_add_tprel_lo12:
-    // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits
-    // FFF of TPREL(S+A), check 0 <= X < 2^12.
-    assert((int64_t)Value >= 0 &&
-           (int64_t)Value < (1LL << 12) && "Out of range ADD fixup");
-    // ... fallthrough to no-checking versions ...
-  case AArch64::fixup_a64_add_dtprel_lo12_nc:
-    // R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: Set an ADD immediate field to bits
-    // FFF of DTPREL(S+A) with no overflow check.
-  case AArch64::fixup_a64_add_tprel_lo12_nc:
-    // R_AARCH64_TLSLD_ADD_TPREL_LO12_NC: Set an ADD immediate field to bits
-    // FFF of TPREL(S+A) with no overflow check.
-  case AArch64::fixup_a64_tlsdesc_add_lo12_nc:
-    // R_AARCH64_TLSDESC_ADD_LO12_NC: Set an ADD immediate field to bits
-    // FFF of G(TLSDESC(S+A)), with no overflow check.
-  case AArch64::fixup_a64_add_lo12:
-    // R_AARCH64_ADD_ABS_LO12_NC: Sets an ADD immediate value to bits FFF of
-    // S+A, with no overflow check.
-    return (Value & 0xfff) << 10;
-
-  case AArch64::fixup_a64_ldst8_dtprel_lo12:
-    // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-  case AArch64::fixup_a64_ldst8_tprel_lo12:
-    // R_AARCH64_TLSLE_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-    assert((int64_t) Value >= 0 &&
-           (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
-    // ... fallthrough to no-checking versions ...
-  case AArch64::fixup_a64_ldst8_dtprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF
-    // of DTPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst8_tprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF
-    // of TPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst8_lo12:
-    // R_AARCH64_LDST8_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFF
-    // of S+A, with no overflow check.
-    return (Value & 0xfff) << 10;
-
-  case AArch64::fixup_a64_ldst16_dtprel_lo12:
-    // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-  case AArch64::fixup_a64_ldst16_tprel_lo12:
-    // R_AARCH64_TLSLE_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-    assert((int64_t) Value >= 0 &&
-           (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
-    // ... fallthrough to no-checking versions ...
-  case AArch64::fixup_a64_ldst16_dtprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE
-    // of DTPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst16_tprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE
-    // of TPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst16_lo12:
-    // R_AARCH64_LDST16_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFE
-    // of S+A, with no overflow check.
-    return (Value & 0xffe) << 9;
-
-  case AArch64::fixup_a64_ldst32_dtprel_lo12:
-    // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-  case AArch64::fixup_a64_ldst32_tprel_lo12:
-    // R_AARCH64_TLSLE_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-    assert((int64_t) Value >= 0 &&
-           (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
-    // ... fallthrough to no-checking versions ...
-  case AArch64::fixup_a64_ldst32_dtprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC
-    // of DTPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst32_tprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC
-    // of TPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst32_lo12:
-    // R_AARCH64_LDST32_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFC
-    // of S+A, with no overflow check.
-    return (Value & 0xffc) << 8;
-
-  case AArch64::fixup_a64_ldst64_dtprel_lo12:
-    // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-  case AArch64::fixup_a64_ldst64_tprel_lo12:
-    // R_AARCH64_TLSLE_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8
-    // of DTPREL(S+A), check 0 <= X < 2^12.
-    assert((int64_t) Value >= 0 &&
-           (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup");
-    // ... fallthrough to no-checking versions ...
-  case AArch64::fixup_a64_ldst64_dtprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8
-    // of DTPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst64_tprel_lo12_nc:
-    // R_AARCH64_TLSLD_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8
-    // of TPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_ldst64_lo12:
-    // R_AARCH64_LDST64_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF8
-    // of S+A, with no overflow check.
-    return (Value & 0xff8) << 7;
-
-  case AArch64::fixup_a64_ldst128_lo12:
-    // R_AARCH64_LDST128_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF0
-    // of S+A, with no overflow check.
-    return (Value & 0xff0) << 6;
-
-  case AArch64::fixup_a64_movw_uabs_g0:
-    // R_AARCH64_MOVW_UABS_G0: Sets a MOVZ immediate field to bits FFFF of S+A
-    // with a check that S+A < 2^16
-    assert(Value <= 0xffff && "Out of range move wide fixup");
-    return (Value & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_dtprel_g0_nc:
-    // R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: Sets a MOVK immediate field to bits
-    // FFFF of DTPREL(S+A) with no overflow check.
-  case AArch64::fixup_a64_movw_gottprel_g0_nc:
-    // R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: Sets a MOVK immediate field to bits
-    // FFFF of G(TPREL(S+A)) - GOT with no overflow check.
-  case AArch64::fixup_a64_movw_tprel_g0_nc:
-    // R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: Sets a MOVK immediate field to bits
-    // FFFF of TPREL(S+A) with no overflow check.
-  case AArch64::fixup_a64_movw_uabs_g0_nc:
-    // R_AARCH64_MOVW_UABS_G0_NC: Sets a MOVK immediate field to bits FFFF of
-    // S+A with no overflow check.
-    return (Value & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_uabs_g1:
-    // R_AARCH64_MOVW_UABS_G1: Sets a MOVZ immediate field to bits FFFF0000 of
-    // S+A with a check that S+A < 2^32
-    assert(Value <= 0xffffffffull && "Out of range move wide fixup");
-    return ((Value >> 16) & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_dtprel_g1_nc:
-    // R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: Set a MOVK immediate field
-    // to bits FFFF0000 of DTPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_movw_tprel_g1_nc:
-    // R_AARCH64_TLSLD_MOVW_TPREL_G1_NC: Set a MOVK immediate field
-    // to bits FFFF0000 of TPREL(S+A), with no overflow check.
-  case AArch64::fixup_a64_movw_uabs_g1_nc:
-    // R_AARCH64_MOVW_UABS_G1_NC: Sets a MOVK immediate field to bits
-    // FFFF0000 of S+A with no overflow check.
-    return ((Value >> 16) & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_uabs_g2:
-    // R_AARCH64_MOVW_UABS_G2: Sets a MOVZ immediate field to bits FFFF 0000
-    // 0000 of S+A with a check that S+A < 2^48
-    assert(Value <= 0xffffffffffffull && "Out of range move wide fixup");
-    return ((Value >> 32) & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_uabs_g2_nc:
-    // R_AARCH64_MOVW_UABS_G2: Sets a MOVK immediate field to bits FFFF 0000
-    // 0000 of S+A with no overflow check.
-    return ((Value >> 32) & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_uabs_g3:
-    // R_AARCH64_MOVW_UABS_G3: Sets a MOVZ immediate field to bits FFFF 0000
-    // 0000 0000 of S+A (no overflow check needed)
-    return ((Value >> 48) & 0xffff) << 5;
-
-  case AArch64::fixup_a64_movw_dtprel_g0:
-    // R_AARCH64_TLSLD_MOVW_DTPREL_G0: Set a MOV[NZ] immediate field
-    // to bits FFFF of DTPREL(S+A).
-  case AArch64::fixup_a64_movw_tprel_g0:
-    // R_AARCH64_TLSLE_MOVW_TPREL_G0: Set a MOV[NZ] immediate field to
-    // bits FFFF of TPREL(S+A).
-  case AArch64::fixup_a64_movw_sabs_g0: {
-    // R_AARCH64_MOVW_SABS_G0: Sets MOV[NZ] immediate field using bits FFFF of
-    // S+A (see notes below); check -2^16 <= S+A < 2^16. (notes say that we
-    // should convert between MOVN and MOVZ to achieve our goals).
-    int64_t Signed = Value;
-    assert(Signed >= -(1LL << 16) && Signed < (1LL << 16)
-           && "Out of range move wide fixup");
-    if (Signed >= 0) {
-      Value = (Value & 0xffff) << 5;
-      // Bit 30 converts the MOVN encoding into a MOVZ
-      Value |= 1 << 30;
-    } else {
-      // MCCodeEmitter should have encoded a MOVN, which is fine.
-      Value = (~Value & 0xffff) << 5;
-    }
-    return Value;
-  }
-
-  case AArch64::fixup_a64_movw_dtprel_g1:
-    // R_AARCH64_TLSLD_MOVW_DTPREL_G1: Set a MOV[NZ] immediate field
-    // to bits FFFF0000 of DTPREL(S+A).
-  case AArch64::fixup_a64_movw_gottprel_g1:
-    // R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: Set a MOV[NZ] immediate field
-    // to bits FFFF0000 of G(TPREL(S+A)) - GOT.
-  case AArch64::fixup_a64_movw_tprel_g1:
-    // R_AARCH64_TLSLE_MOVW_TPREL_G1: Set a MOV[NZ] immediate field to
-    // bits FFFF0000 of TPREL(S+A).
-  case AArch64::fixup_a64_movw_sabs_g1: {
-    // R_AARCH64_MOVW_SABS_G1: Sets MOV[NZ] immediate field using bits FFFF 0000
-    // of S+A (see notes below); check -2^32 <= S+A < 2^32. (notes say that we
-    // should convert between MOVN and MOVZ to achieve our goals).
-    int64_t Signed = Value;
-    assert(Signed >= -(1LL << 32) && Signed < (1LL << 32)
-           && "Out of range move wide fixup");
-    if (Signed >= 0) {
-      Value = ((Value >> 16) & 0xffff) << 5;
-      // Bit 30 converts the MOVN encoding into a MOVZ
-      Value |= 1 << 30;
-    } else {
-      Value = ((~Value >> 16) & 0xffff) << 5;
-    }
-    return Value;
-  }
-
-  case AArch64::fixup_a64_movw_dtprel_g2:
-    // R_AARCH64_TLSLD_MOVW_DTPREL_G2: Set a MOV[NZ] immediate field
-    // to bits FFFF 0000 0000 of DTPREL(S+A).
-  case AArch64::fixup_a64_movw_tprel_g2:
-    // R_AARCH64_TLSLE_MOVW_TPREL_G2: Set a MOV[NZ] immediate field to
-    // bits FFFF 0000 0000 of TPREL(S+A).
-  case AArch64::fixup_a64_movw_sabs_g2: {
-    // R_AARCH64_MOVW_SABS_G2: Sets MOV[NZ] immediate field using bits FFFF 0000
-    // 0000 of S+A (see notes below); check -2^48 <= S+A < 2^48. (notes say that
-    // we should convert between MOVN and MOVZ to achieve our goals).
-    int64_t Signed = Value;
-    assert(Signed >= -(1LL << 48) && Signed < (1LL << 48)
-           && "Out of range move wide fixup");
-    if (Signed >= 0) {
-      Value = ((Value >> 32) & 0xffff) << 5;
-      // Bit 30 converts the MOVN encoding into a MOVZ
-      Value |= 1 << 30;
-    } else {
-      Value = ((~Value >> 32) & 0xffff) << 5;
-    }
-    return Value;
-  }
-
-  case AArch64::fixup_a64_tstbr:
-    // R_AARCH64_TSTBR14: Sets the immediate field of a TBZ/TBNZ instruction to
-    // bits FFFC of S+A-P, checking -2^15 <= S+A-P < 2^15.
-    assert((int64_t)Value >= -(1LL << 15) &&
-           (int64_t)Value < (1LL << 15) && "Out of range TBZ/TBNZ fixup");
-    return (Value & 0xfffc) << (5 - 2);
-
-  case AArch64::fixup_a64_condbr:
-    // R_AARCH64_CONDBR19: Sets the immediate field of a conditional branch
-    // instruction to bits 1FFFFC of S+A-P, checking -2^20 <= S+A-P < 2^20.
-    assert((int64_t)Value >= -(1LL << 20) &&
-           (int64_t)Value < (1LL << 20) && "Out of range B.cond fixup");
-    return (Value & 0x1ffffc) << (5 - 2);
-
-  case AArch64::fixup_a64_uncondbr:
-    // R_AARCH64_JUMP26 same as below (except to a linker, possibly).
-  case AArch64::fixup_a64_call:
-    // R_AARCH64_CALL26: Sets a CALL immediate field to bits FFFFFFC of S+A-P,
-    // checking that -2^27 <= S+A-P < 2^27.
-    assert((int64_t)Value >= -(1LL << 27) &&
-           (int64_t)Value < (1LL << 27) && "Out of range branch fixup");
-    return (Value & 0xffffffc) >> 2;
-
-  case AArch64::fixup_a64_adr_gottprel_page:
-    // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: Set an ADRP immediate field to bits
-    // 1FFFFF000 of Page(G(TPREL(S+A))) - Page(P); check -2^32 <= X < 2^32.
-  case AArch64::fixup_a64_tlsdesc_adr_page:
-    // R_AARCH64_TLSDESC_ADR_PAGE: Set an ADRP immediate field to bits 1FFFFF000
-    // of Page(G(TLSDESC(S+A))) - Page(P); check -2^32 <= X < 2^32.
-  case AArch64::fixup_a64_adr_prel_got_page:
-    // R_AARCH64_ADR_GOT_PAGE: Sets the immediate value of an ADRP to bits
-    // 1FFFFF000 of the operation, checking that -2^32 < Page(G(S))-Page(GOT) <
-    // 2^32.
-    assert((int64_t)Value >= -(1LL << 32) &&
-           (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup");
-    return ADRImmBits((Value & 0x1fffff000ULL) >> 12);
-
-  case AArch64::fixup_a64_ld64_gottprel_lo12_nc:
-    // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: Set an LD offset field to bits FF8
-    // of X, with no overflow check. Check that X & 7 == 0.
-  case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc:
-    // R_AARCH64_TLSDESC_LD64_LO12_NC: Set an LD offset field to bits FF8 of
-    // G(TLSDESC(S+A)), with no overflow check. Check that X & 7 == 0.
-  case AArch64::fixup_a64_ld64_got_lo12_nc:
-    // R_AARCH64_LD64_GOT_LO12_NC: Sets the LD/ST immediate field to bits FF8 of
-    // G(S) with no overflow check. Check X & 7 == 0
-    assert(((int64_t)Value & 7) == 0 && "Misaligned fixup");
-    return (Value & 0xff8) << 7;
-
-  case AArch64::fixup_a64_tlsdesc_call:
-    // R_AARCH64_TLSDESC_CALL: For relaxation only.
-    return 0;
-  }
-}
-
-MCAsmBackend *
-llvm::createAArch64leAsmBackend(const Target &T, const MCRegisterInfo &MRI,
-                              StringRef TT, StringRef CPU) {
-  Triple TheTriple(TT);
-  return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS(), /*isLittle*/ true);
-}
-
-MCAsmBackend *
-llvm::createAArch64beAsmBackend(const Target &T, const MCRegisterInfo &MRI,
-                              StringRef TT, StringRef CPU) {
-  Triple TheTriple(TT);
-  return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS(), /*isLittle*/ false);
-}

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp (removed)
@@ -1,291 +0,0 @@
-//===-- AArch64ELFObjectWriter.cpp - AArch64 ELF Writer -------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file handles ELF-specific object emission, converting LLVM's internal
-// fixups into the appropriate relocations.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AArch64FixupKinds.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/ErrorHandling.h"
-
-using namespace llvm;
-
-namespace {
-class AArch64ELFObjectWriter : public MCELFObjectTargetWriter {
-public:
-  AArch64ELFObjectWriter(uint8_t OSABI, bool IsLittleEndian);
-
-  virtual ~AArch64ELFObjectWriter();
-
-protected:
-  unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
-                        bool IsPCRel) const override;
-
-private:
-};
-}
-
-AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI, bool IsLittleEndian)
-  : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64,
-                            /*HasRelocationAddend*/ true)
-{}
-
-AArch64ELFObjectWriter::~AArch64ELFObjectWriter()
-{}
-
-unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
-                                              const MCFixup &Fixup,
-                                              bool IsPCRel) const {
-  unsigned Type;
-  if (IsPCRel) {
-    switch ((unsigned)Fixup.getKind()) {
-    default:
-      llvm_unreachable("Unimplemented fixup -> relocation");
-    case FK_Data_8:
-      return ELF::R_AARCH64_PREL64;
-    case FK_Data_4:
-      return ELF::R_AARCH64_PREL32;
-    case FK_Data_2:
-      return ELF::R_AARCH64_PREL16;
-    case AArch64::fixup_a64_ld_prel:
-      Type = ELF::R_AARCH64_LD_PREL_LO19;
-      break;
-    case AArch64::fixup_a64_adr_prel:
-      Type = ELF::R_AARCH64_ADR_PREL_LO21;
-      break;
-    case AArch64::fixup_a64_adr_prel_page:
-      Type = ELF::R_AARCH64_ADR_PREL_PG_HI21;
-      break;
-    case AArch64::fixup_a64_adr_prel_got_page:
-      Type = ELF::R_AARCH64_ADR_GOT_PAGE;
-      break;
-    case AArch64::fixup_a64_tstbr:
-      Type = ELF::R_AARCH64_TSTBR14;
-      break;
-    case AArch64::fixup_a64_condbr:
-      Type = ELF::R_AARCH64_CONDBR19;
-      break;
-    case AArch64::fixup_a64_uncondbr:
-      Type = ELF::R_AARCH64_JUMP26;
-      break;
-    case AArch64::fixup_a64_call:
-      Type = ELF::R_AARCH64_CALL26;
-      break;
-    case AArch64::fixup_a64_adr_gottprel_page:
-      Type = ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
-      break;
-    case AArch64::fixup_a64_ld_gottprel_prel19:
-      Type =  ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19;
-      break;
-    case AArch64::fixup_a64_tlsdesc_adr_page:
-      Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE;
-      break;
-    }
-  } else {
-    switch ((unsigned)Fixup.getKind()) {
-    default:
-      llvm_unreachable("Unimplemented fixup -> relocation");
-    case FK_Data_8:
-      return ELF::R_AARCH64_ABS64;
-    case FK_Data_4:
-      return ELF::R_AARCH64_ABS32;
-    case FK_Data_2:
-      return ELF::R_AARCH64_ABS16;
-    case AArch64::fixup_a64_add_lo12:
-      Type = ELF::R_AARCH64_ADD_ABS_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ld64_got_lo12_nc:
-      Type = ELF::R_AARCH64_LD64_GOT_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst8_lo12:
-      Type = ELF::R_AARCH64_LDST8_ABS_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst16_lo12:
-      Type = ELF::R_AARCH64_LDST16_ABS_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst32_lo12:
-      Type = ELF::R_AARCH64_LDST32_ABS_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst64_lo12:
-      Type = ELF::R_AARCH64_LDST64_ABS_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst128_lo12:
-      Type = ELF::R_AARCH64_LDST128_ABS_LO12_NC;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g0:
-      Type = ELF::R_AARCH64_MOVW_UABS_G0;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g0_nc:
-      Type = ELF::R_AARCH64_MOVW_UABS_G0_NC;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g1:
-      Type = ELF::R_AARCH64_MOVW_UABS_G1;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g1_nc:
-      Type = ELF::R_AARCH64_MOVW_UABS_G1_NC;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g2:
-      Type = ELF::R_AARCH64_MOVW_UABS_G2;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g2_nc:
-      Type = ELF::R_AARCH64_MOVW_UABS_G2_NC;
-      break;
-    case AArch64::fixup_a64_movw_uabs_g3:
-      Type = ELF::R_AARCH64_MOVW_UABS_G3;
-      break;
-    case AArch64::fixup_a64_movw_sabs_g0:
-      Type = ELF::R_AARCH64_MOVW_SABS_G0;
-      break;
-    case AArch64::fixup_a64_movw_sabs_g1:
-      Type = ELF::R_AARCH64_MOVW_SABS_G1;
-      break;
-    case AArch64::fixup_a64_movw_sabs_g2:
-      Type = ELF::R_AARCH64_MOVW_SABS_G2;
-      break;
-
-    // TLS Local-dynamic block
-    case AArch64::fixup_a64_movw_dtprel_g2:
-      Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2;
-      break;
-    case AArch64::fixup_a64_movw_dtprel_g1:
-      Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1;
-      break;
-    case AArch64::fixup_a64_movw_dtprel_g1_nc:
-      Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC;
-      break;
-    case AArch64::fixup_a64_movw_dtprel_g0:
-      Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0;
-      break;
-    case AArch64::fixup_a64_movw_dtprel_g0_nc:
-      Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC;
-      break;
-    case AArch64::fixup_a64_add_dtprel_hi12:
-      Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_HI12;
-      break;
-    case AArch64::fixup_a64_add_dtprel_lo12:
-      Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12;
-      break;
-    case AArch64::fixup_a64_add_dtprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst8_dtprel_lo12:
-      Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst8_dtprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst16_dtprel_lo12:
-      Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst16_dtprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst32_dtprel_lo12:
-      Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst32_dtprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst64_dtprel_lo12:
-      Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst64_dtprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC;
-      break;
-
-    // TLS initial-exec block
-    case AArch64::fixup_a64_movw_gottprel_g1:
-      Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
-      break;
-    case AArch64::fixup_a64_movw_gottprel_g0_nc:
-      Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
-      break;
-    case AArch64::fixup_a64_ld64_gottprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
-      break;
-
-    // TLS local-exec block
-    case AArch64::fixup_a64_movw_tprel_g2:
-      Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2;
-      break;
-    case AArch64::fixup_a64_movw_tprel_g1:
-      Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1;
-      break;
-    case AArch64::fixup_a64_movw_tprel_g1_nc:
-      Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC;
-      break;
-    case AArch64::fixup_a64_movw_tprel_g0:
-      Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0;
-      break;
-    case AArch64::fixup_a64_movw_tprel_g0_nc:
-      Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC;
-      break;
-    case AArch64::fixup_a64_add_tprel_hi12:
-      Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12;
-      break;
-    case AArch64::fixup_a64_add_tprel_lo12:
-      Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12;
-      break;
-    case AArch64::fixup_a64_add_tprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst8_tprel_lo12:
-      Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst8_tprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst16_tprel_lo12:
-      Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst16_tprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst32_tprel_lo12:
-      Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst32_tprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC;
-      break;
-    case AArch64::fixup_a64_ldst64_tprel_lo12:
-      Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12;
-      break;
-    case AArch64::fixup_a64_ldst64_tprel_lo12_nc:
-      Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC;
-      break;
-
-    // TLS general-dynamic block
-    case AArch64::fixup_a64_tlsdesc_adr_page:
-      Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE;
-      break;
-    case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc:
-      Type = ELF::R_AARCH64_TLSDESC_LD64_LO12_NC;
-      break;
-    case AArch64::fixup_a64_tlsdesc_add_lo12_nc:
-      Type = ELF::R_AARCH64_TLSDESC_ADD_LO12_NC;
-      break;
-    case AArch64::fixup_a64_tlsdesc_call:
-      Type = ELF::R_AARCH64_TLSDESC_CALL;
-      break;
-    }
-  }
-
-  return Type;
-}
-
-MCObjectWriter *llvm::createAArch64ELFObjectWriter(raw_ostream &OS,
-                                                   uint8_t OSABI,
-                                                   bool IsLittleEndian) {
-  MCELFObjectTargetWriter *MOTW = new AArch64ELFObjectWriter(OSABI, IsLittleEndian);
-  return createELFObjectWriter(MOTW, OS,  IsLittleEndian);
-}

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp (removed)
@@ -1,161 +0,0 @@
-//===- lib/MC/AArch64ELFStreamer.cpp - ELF Object Output for AArch64 ------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file assembles .s files and emits AArch64 ELF .o object files. Different
-// from generic ELF streamer in emitting mapping symbols ($x and $d) to delimit
-// regions of data and code.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/MC/MCELFStreamer.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCELF.h"
-#include "llvm/MC/MCELFStreamer.h"
-#include "llvm/MC/MCELFSymbolFlags.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCObjectStreamer.h"
-#include "llvm/MC/MCSection.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-
-/// Extend the generic ELFStreamer class so that it can emit mapping symbols at
-/// the appropriate points in the object files. These symbols are defined in the
-/// AArch64 ELF ABI:
-///    infocenter.arm.com/help/topic/com.arm.doc.ihi0056a/IHI0056A_aaelf64.pdf
-///
-/// In brief: $x or $d should be emitted at the start of each contiguous region
-/// of A64 code or data in a section. In practice, this emission does not rely
-/// on explicit assembler directives but on inherent properties of the
-/// directives doing the emission (e.g. ".byte" is data, "add x0, x0, x0" an
-/// instruction).
-///
-/// As a result this system is orthogonal to the DataRegion infrastructure used
-/// by MachO. Beware!
-class AArch64ELFStreamer : public MCELFStreamer {
-public:
-  AArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS,
-                     MCCodeEmitter *Emitter)
-      : MCELFStreamer(Context, TAB, OS, Emitter), MappingSymbolCounter(0),
-        LastEMS(EMS_None) {}
-
-  ~AArch64ELFStreamer() {}
-
-  void ChangeSection(const MCSection *Section,
-                     const MCExpr *Subsection) override {
-    // We have to keep track of the mapping symbol state of any sections we
-    // use. Each one should start off as EMS_None, which is provided as the
-    // default constructor by DenseMap::lookup.
-    LastMappingSymbols[getPreviousSection().first] = LastEMS;
-    LastEMS = LastMappingSymbols.lookup(Section);
-
-    MCELFStreamer::ChangeSection(Section, Subsection);
-  }
-
-  /// This function is the one used to emit instruction data into the ELF
-  /// streamer. We override it to add the appropriate mapping symbol if
-  /// necessary.
-  void EmitInstruction(const MCInst& Inst,
-                       const MCSubtargetInfo &STI) override {
-    EmitA64MappingSymbol();
-    MCELFStreamer::EmitInstruction(Inst, STI);
-  }
-
-  /// This is one of the functions used to emit data into an ELF section, so the
-  /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
-  /// if necessary.
-  void EmitBytes(StringRef Data) override {
-    EmitDataMappingSymbol();
-    MCELFStreamer::EmitBytes(Data);
-  }
-
-  /// This is one of the functions used to emit data into an ELF section, so the
-  /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d)
-  /// if necessary.
-  void EmitValueImpl(const MCExpr *Value, unsigned Size,
-                     const SMLoc &Loc) override {
-    EmitDataMappingSymbol();
-    MCELFStreamer::EmitValueImpl(Value, Size, Loc);
-  }
-
-private:
-  enum ElfMappingSymbol {
-    EMS_None,
-    EMS_A64,
-    EMS_Data
-  };
-
-  void EmitDataMappingSymbol() {
-    if (LastEMS == EMS_Data) return;
-    EmitMappingSymbol("$d");
-    LastEMS = EMS_Data;
-  }
-
-  void EmitA64MappingSymbol() {
-    if (LastEMS == EMS_A64) return;
-    EmitMappingSymbol("$x");
-    LastEMS = EMS_A64;
-  }
-
-  void EmitMappingSymbol(StringRef Name) {
-    MCSymbol *Start = getContext().CreateTempSymbol();
-    EmitLabel(Start);
-
-    MCSymbol *Symbol =
-      getContext().GetOrCreateSymbol(Name + "." +
-                                     Twine(MappingSymbolCounter++));
-
-    MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
-    MCELF::SetType(SD, ELF::STT_NOTYPE);
-    MCELF::SetBinding(SD, ELF::STB_LOCAL);
-    SD.setExternal(false);
-    AssignSection(Symbol, getCurrentSection().first);
-
-    const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext());
-    Symbol->setVariableValue(Value);
-  }
-
-  int64_t MappingSymbolCounter;
-
-  DenseMap<const MCSection *, ElfMappingSymbol> LastMappingSymbols;
-  ElfMappingSymbol LastEMS;
-
-  /// @}
-};
-}
-
-namespace llvm {
-  MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
-                                      raw_ostream &OS, MCCodeEmitter *Emitter,
-                                      bool RelaxAll, bool NoExecStack) {
-    AArch64ELFStreamer *S = new AArch64ELFStreamer(Context, TAB, OS, Emitter);
-    if (RelaxAll)
-      S->getAssembler().setRelaxAll(true);
-    if (NoExecStack)
-      S->getAssembler().setNoExecStack(true);
-    return S;
-  }
-}
-
-

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h (removed)
@@ -1,27 +0,0 @@
-//===-- AArch64ELFStreamer.h - ELF Streamer for AArch64 ---------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF streamer information for the AArch64 backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64_ELF_STREAMER_H
-#define LLVM_AARCH64_ELF_STREAMER_H
-
-#include "llvm/MC/MCELFStreamer.h"
-
-namespace llvm {
-
-  MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
-                                          raw_ostream &OS,
-                                          MCCodeEmitter *Emitter,
-                                          bool RelaxAll, bool NoExecStack);
-}
-
-#endif // AArch64_ELF_STREAMER_H

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h (removed)
@@ -1,113 +0,0 @@
-//=- AArch64/AArch64FixupKinds.h - AArch64 Specific Fixup Entries -*- C++ -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the LLVM fixups applied to MCInsts in the AArch64
-// backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64_AARCH64FIXUPKINDS_H
-#define LLVM_AARCH64_AARCH64FIXUPKINDS_H
-
-#include "llvm/MC/MCFixup.h"
-
-namespace llvm {
-  namespace AArch64 {
-    enum Fixups {
-      fixup_a64_ld_prel = FirstTargetFixupKind,
-      fixup_a64_adr_prel,
-      fixup_a64_adr_prel_page,
-
-      fixup_a64_add_lo12,
-
-      fixup_a64_ldst8_lo12,
-      fixup_a64_ldst16_lo12,
-      fixup_a64_ldst32_lo12,
-      fixup_a64_ldst64_lo12,
-      fixup_a64_ldst128_lo12,
-
-      fixup_a64_tstbr,
-      fixup_a64_condbr,
-      fixup_a64_uncondbr,
-      fixup_a64_call,
-
-      fixup_a64_movw_uabs_g0,
-      fixup_a64_movw_uabs_g0_nc,
-      fixup_a64_movw_uabs_g1,
-      fixup_a64_movw_uabs_g1_nc,
-      fixup_a64_movw_uabs_g2,
-      fixup_a64_movw_uabs_g2_nc,
-      fixup_a64_movw_uabs_g3,
-
-      fixup_a64_movw_sabs_g0,
-      fixup_a64_movw_sabs_g1,
-      fixup_a64_movw_sabs_g2,
-
-      fixup_a64_adr_prel_got_page,
-      fixup_a64_ld64_got_lo12_nc,
-
-      // Produce offsets relative to the module's dynamic TLS area.
-      fixup_a64_movw_dtprel_g2,
-      fixup_a64_movw_dtprel_g1,
-      fixup_a64_movw_dtprel_g1_nc,
-      fixup_a64_movw_dtprel_g0,
-      fixup_a64_movw_dtprel_g0_nc,
-      fixup_a64_add_dtprel_hi12,
-      fixup_a64_add_dtprel_lo12,
-      fixup_a64_add_dtprel_lo12_nc,
-      fixup_a64_ldst8_dtprel_lo12,
-      fixup_a64_ldst8_dtprel_lo12_nc,
-      fixup_a64_ldst16_dtprel_lo12,
-      fixup_a64_ldst16_dtprel_lo12_nc,
-      fixup_a64_ldst32_dtprel_lo12,
-      fixup_a64_ldst32_dtprel_lo12_nc,
-      fixup_a64_ldst64_dtprel_lo12,
-      fixup_a64_ldst64_dtprel_lo12_nc,
-
-      // Produce the GOT entry containing a variable's address in TLS's
-      // initial-exec mode.
-      fixup_a64_movw_gottprel_g1,
-      fixup_a64_movw_gottprel_g0_nc,
-      fixup_a64_adr_gottprel_page,
-      fixup_a64_ld64_gottprel_lo12_nc,
-      fixup_a64_ld_gottprel_prel19,
-
-      // Produce offsets relative to the thread pointer: TPIDR_EL0.
-      fixup_a64_movw_tprel_g2,
-      fixup_a64_movw_tprel_g1,
-      fixup_a64_movw_tprel_g1_nc,
-      fixup_a64_movw_tprel_g0,
-      fixup_a64_movw_tprel_g0_nc,
-      fixup_a64_add_tprel_hi12,
-      fixup_a64_add_tprel_lo12,
-      fixup_a64_add_tprel_lo12_nc,
-      fixup_a64_ldst8_tprel_lo12,
-      fixup_a64_ldst8_tprel_lo12_nc,
-      fixup_a64_ldst16_tprel_lo12,
-      fixup_a64_ldst16_tprel_lo12_nc,
-      fixup_a64_ldst32_tprel_lo12,
-      fixup_a64_ldst32_tprel_lo12_nc,
-      fixup_a64_ldst64_tprel_lo12,
-      fixup_a64_ldst64_tprel_lo12_nc,
-
-      // Produce the special fixups used by the general-dynamic TLS model.
-      fixup_a64_tlsdesc_adr_page,
-      fixup_a64_tlsdesc_ld64_lo12_nc,
-      fixup_a64_tlsdesc_add_lo12_nc,
-      fixup_a64_tlsdesc_call,
-
-
-      // Marker
-      LastTargetFixupKind,
-      NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
-    };
-  }
-}
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp (removed)
@@ -1,46 +0,0 @@
-//===-- AArch64MCAsmInfo.cpp - AArch64 asm properties ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the AArch64MCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64MCAsmInfo.h"
-#include "llvm/ADT/Triple.h"
-
-using namespace llvm;
-
-AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo(StringRef TT) {
-  Triple TheTriple(TT);
-  if (TheTriple.getArch() == Triple::aarch64_be)
-    IsLittleEndian = false;
-
-  PointerSize = 8;
-
-  // ".comm align is in bytes but .align is pow-2."
-  AlignmentIsInBytes = false;
-
-  CommentString = "//";
-  Code32Directive = ".code\t32";
-
-  Data16bitsDirective = "\t.hword\t";
-  Data32bitsDirective = "\t.word\t";
-  Data64bitsDirective = "\t.xword\t";
-
-  HasLEB128 = true;
-  SupportsDebugInformation = true;
-
-  // Exceptions handling
-  ExceptionsType = ExceptionHandling::DwarfCFI;
-
-  UseIntegratedAssembler = true;
-}
-
-// Pin the vtable to this file.
-void AArch64ELFMCAsmInfo::anchor() {}

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h (removed)
@@ -1,29 +0,0 @@
-//==-- AArch64MCAsmInfo.h - AArch64 asm properties -------------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the AArch64MCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64TARGETASMINFO_H
-#define LLVM_AARCH64TARGETASMINFO_H
-
-#include "llvm/MC/MCAsmInfoELF.h"
-
-namespace llvm {
-
-struct AArch64ELFMCAsmInfo : public MCAsmInfoELF {
-  explicit AArch64ELFMCAsmInfo(StringRef TT);
-private:
-  void anchor() override;
-};
-
-} // namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp (removed)
@@ -1,613 +0,0 @@
-//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the AArch64MCCodeEmitter class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AArch64FixupKinds.h"
-#include "MCTargetDesc/AArch64MCExpr.h"
-#include "MCTargetDesc/AArch64MCTargetDesc.h"
-#include "Utils/AArch64BaseInfo.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "mccodeemitter"
-
-namespace {
-class AArch64MCCodeEmitter : public MCCodeEmitter {
-  AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
-  void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
-  MCContext &Ctx;
-
-public:
-  AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
-
-  ~AArch64MCCodeEmitter() {}
-
-  unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
-                               SmallVectorImpl<MCFixup> &Fixups,
-                               const MCSubtargetInfo &STI) const;
-
-  unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
-                               SmallVectorImpl<MCFixup> &Fixups,
-                               const MCSubtargetInfo &STI) const;
-
-  template<int MemSize>
-  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
-                                    SmallVectorImpl<MCFixup> &Fixups,
-                                    const MCSubtargetInfo &STI) const {
-    return getOffsetUImm12OpValue(MI, OpIdx, Fixups, STI, MemSize);
-  }
-
-  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
-                                    SmallVectorImpl<MCFixup> &Fixups,
-                                    const MCSubtargetInfo &STI,
-                                    int MemSize) const;
-
-  unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
-                                   SmallVectorImpl<MCFixup> &Fixups,
-                                   const MCSubtargetInfo &STI) const;
-  unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
-                                   SmallVectorImpl<MCFixup> &Fixups,
-                                   const MCSubtargetInfo &STI) const;
-
-  unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const;
-  unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
-                              SmallVectorImpl<MCFixup> &Fixups,
-                              const MCSubtargetInfo &STI) const;
-  unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
-                              SmallVectorImpl<MCFixup> &Fixups,
-                              const MCSubtargetInfo &STI) const;
-  unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
-                              SmallVectorImpl<MCFixup> &Fixups,
-                              const MCSubtargetInfo &STI) const;
-
-  unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
-                            SmallVectorImpl<MCFixup> &Fixups,
-                            const MCSubtargetInfo &STI) const;
-  unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const;
-  unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const;
-  unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const;
-
-  // Labels are handled mostly the same way: a symbol is needed, and
-  // just gets some fixup attached.
-  template<AArch64::Fixups fixupDesired>
-  unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
-                           SmallVectorImpl<MCFixup> &Fixups,
-                           const MCSubtargetInfo &STI) const;
-
-  unsigned  getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
-                                   SmallVectorImpl<MCFixup> &Fixups,
-                                   const MCSubtargetInfo &STI) const;
-
-
-  unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
-                                 SmallVectorImpl<MCFixup> &Fixups,
-                                 const MCSubtargetInfo &STI) const;
-
-
-  unsigned getAddressWithFixup(const MCOperand &MO,
-                               unsigned FixupKind,
-                               SmallVectorImpl<MCFixup> &Fixups,
-                               const MCSubtargetInfo &STI) const;
-
-
-  // getBinaryCodeForInstr - TableGen'erated function for getting the
-  // binary encoding for an instruction.
-  uint64_t getBinaryCodeForInstr(const MCInst &MI,
-                                 SmallVectorImpl<MCFixup> &Fixups,
-                                 const MCSubtargetInfo &STI) const;
-
-  /// getMachineOpValue - Return binary encoding of operand. If the machine
-  /// operand requires relocation, record the relocation and return zero.
-  unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const;
-
-
-  void EmitByte(unsigned char C, raw_ostream &OS) const {
-    OS << (char)C;
-  }
-
-  void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
-    // Output the constant in little endian byte order.
-    for (unsigned i = 0; i != 4; ++i) {
-      EmitByte(Val & 0xff, OS);
-      Val >>= 8;
-    }
-  }
-
-
-  void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
-                         SmallVectorImpl<MCFixup> &Fixups,
-                         const MCSubtargetInfo &STI) const override;
-
-  template<int hasRs, int hasRt2> unsigned
-  fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
-                        const MCSubtargetInfo &STI) const;
-
-  unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
-                   const MCSubtargetInfo &STI) const;
-
-  unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
-                      const MCSubtargetInfo &STI) const;
-
-
-};
-
-} // end anonymous namespace
-
-unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
-                                       unsigned FixupKind,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  if (!MO.isExpr()) {
-    // This can occur for manually decoded or constructed MCInsts, but neither
-    // the assembly-parser nor instruction selection will currently produce an
-    // MCInst that's not a symbol reference.
-    assert(MO.isImm() && "Unexpected address requested");
-    return MO.getImm();
-  }
-
-  const MCExpr *Expr = MO.getExpr();
-  MCFixupKind Kind = MCFixupKind(FixupKind);
-  Fixups.push_back(MCFixup::Create(0, Expr, Kind));
-
-  return 0;
-}
-
-unsigned AArch64MCCodeEmitter::
-getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
-                       SmallVectorImpl<MCFixup> &Fixups,
-                       const MCSubtargetInfo &STI,
-                       int MemSize) const {
-  const MCOperand &ImmOp = MI.getOperand(OpIdx);
-  if (ImmOp.isImm())
-    return ImmOp.getImm();
-
-  assert(ImmOp.isExpr() && "Unexpected operand type");
-  const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
-  unsigned FixupKind;
-
-
-  switch (Expr->getKind()) {
-  default: llvm_unreachable("Unexpected operand modifier");
-  case AArch64MCExpr::VK_AARCH64_LO12: {
-    static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
-                                             AArch64::fixup_a64_ldst16_lo12,
-                                             AArch64::fixup_a64_ldst32_lo12,
-                                             AArch64::fixup_a64_ldst64_lo12,
-                                AArch64::fixup_a64_ldst128_lo12 };
-    assert(MemSize <= 16 && "Invalid fixup for operation");
-    FixupKind = FixupsBySize[Log2_32(MemSize)];
-    break;
-  }
-  case AArch64MCExpr::VK_AARCH64_GOT_LO12:
-    assert(MemSize == 8 && "Invalid fixup for operation");
-    FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
-    break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:  {
-    static const unsigned FixupsBySize[] = {
-      AArch64::fixup_a64_ldst8_dtprel_lo12,
-      AArch64::fixup_a64_ldst16_dtprel_lo12,
-      AArch64::fixup_a64_ldst32_dtprel_lo12,
-      AArch64::fixup_a64_ldst64_dtprel_lo12
-    };
-    assert(MemSize <= 8 && "Invalid fixup for operation");
-    FixupKind = FixupsBySize[Log2_32(MemSize)];
-    break;
-  }
-  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
-    static const unsigned FixupsBySize[] = {
-      AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
-      AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
-      AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
-      AArch64::fixup_a64_ldst64_dtprel_lo12_nc
-    };
-    assert(MemSize <= 8 && "Invalid fixup for operation");
-    FixupKind = FixupsBySize[Log2_32(MemSize)];
-    break;
-  }
-  case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
-    assert(MemSize == 8 && "Invalid fixup for operation");
-    FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
-    break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
-    static const unsigned FixupsBySize[] = {
-      AArch64::fixup_a64_ldst8_tprel_lo12,
-      AArch64::fixup_a64_ldst16_tprel_lo12,
-      AArch64::fixup_a64_ldst32_tprel_lo12,
-      AArch64::fixup_a64_ldst64_tprel_lo12
-    };
-    assert(MemSize <= 8 && "Invalid fixup for operation");
-    FixupKind = FixupsBySize[Log2_32(MemSize)];
-    break;
-  }
-  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
-    static const unsigned FixupsBySize[] = {
-      AArch64::fixup_a64_ldst8_tprel_lo12_nc,
-      AArch64::fixup_a64_ldst16_tprel_lo12_nc,
-      AArch64::fixup_a64_ldst32_tprel_lo12_nc,
-      AArch64::fixup_a64_ldst64_tprel_lo12_nc
-    };
-    assert(MemSize <= 8 && "Invalid fixup for operation");
-    FixupKind = FixupsBySize[Log2_32(MemSize)];
-    break;
-  }
-  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
-    assert(MemSize == 8 && "Invalid fixup for operation");
-    FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
-    break;
-  }
-
-  return getAddressWithFixup(ImmOp, FixupKind, Fixups, STI);
-}
-
-unsigned
-AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  const MCOperand &MO = MI.getOperand(OpIdx);
-  if (MO.isImm())
-    return static_cast<unsigned>(MO.getImm());
-
-  assert(MO.isExpr());
-
-  unsigned FixupKind = 0;
-  switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
-  default: llvm_unreachable("Invalid expression modifier");
-  case AArch64MCExpr::VK_AARCH64_LO12:
-    FixupKind = AArch64::fixup_a64_add_lo12; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
-    FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
-    FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
-    FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
-    FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
-    FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
-    FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
-  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
-    FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
-  }
-
-  return getAddressWithFixup(MO, FixupKind, Fixups, STI);
-}
-
-unsigned
-AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-
-  const MCOperand &MO = MI.getOperand(OpIdx);
-  if (MO.isImm())
-    return static_cast<unsigned>(MO.getImm());
-
-  assert(MO.isExpr());
-
-  unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
-  if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
-    Modifier = Expr->getKind();
-
-  unsigned FixupKind = 0;
-  switch(Modifier) {
-  case AArch64MCExpr::VK_AARCH64_None:
-    FixupKind = AArch64::fixup_a64_adr_prel_page;
-    break;
-  case AArch64MCExpr::VK_AARCH64_GOT:
-    FixupKind = AArch64::fixup_a64_adr_prel_got_page;
-    break;
-  case AArch64MCExpr::VK_AARCH64_GOTTPREL:
-    FixupKind = AArch64::fixup_a64_adr_gottprel_page;
-    break;
-  case AArch64MCExpr::VK_AARCH64_TLSDESC:
-    FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
-    break;
-  default:
-    llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
-  }
-
-  return getAddressWithFixup(MO, FixupKind, Fixups, STI);
-}
-
-unsigned
-AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-
-  const MCOperand &MO = MI.getOperand(OpIdx);
-  assert(MO.isImm() && "Only immediate expected for shift");
-
-  return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
-}
-
-unsigned
-AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-
-  const MCOperand &MO = MI.getOperand(OpIdx);
-  assert(MO.isImm() && "Only immediate expected for shift");
-
-  return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
-}
-
-unsigned AArch64MCCodeEmitter::getShiftRightImm8(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return 8 - MI.getOperand(Op).getImm();
-}
-
-unsigned AArch64MCCodeEmitter::getShiftRightImm16(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return 16 - MI.getOperand(Op).getImm();
-}
-
-unsigned AArch64MCCodeEmitter::getShiftRightImm32(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return 32 - MI.getOperand(Op).getImm();
-}
-
-unsigned AArch64MCCodeEmitter::getShiftRightImm64(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return 64 - MI.getOperand(Op).getImm();
-}
-
-unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return MI.getOperand(Op).getImm() - 8;
-}
-
-unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return MI.getOperand(Op).getImm() - 16;
-}
-
-unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return MI.getOperand(Op).getImm() - 32;
-}
-
-unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
-    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
-  return MI.getOperand(Op).getImm() - 64;
-}
-
-template<AArch64::Fixups fixupDesired> unsigned
-AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
-                                      unsigned OpIdx,
-                                      SmallVectorImpl<MCFixup> &Fixups,
-                                      const MCSubtargetInfo &STI) const {
-  const MCOperand &MO = MI.getOperand(OpIdx);
-
-  if (MO.isExpr())
-    return getAddressWithFixup(MO, fixupDesired, Fixups, STI);
-
-  assert(MO.isImm());
-  return MO.getImm();
-}
-
-unsigned
-AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
-                                       unsigned OpIdx,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  const MCOperand &MO = MI.getOperand(OpIdx);
-
-  if (MO.isImm())
-    return MO.getImm();
-
-  assert(MO.isExpr());
-
-  unsigned FixupKind;
-  if (isa<AArch64MCExpr>(MO.getExpr())) {
-    assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
-           == AArch64MCExpr::VK_AARCH64_GOTTPREL
-           && "Invalid symbol modifier for literal load");
-    FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
-  } else {
-    FixupKind = AArch64::fixup_a64_ld_prel;
-  }
-
-  return getAddressWithFixup(MO, FixupKind, Fixups, STI);
-}
-
-
-unsigned
-AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
-                                       const MCOperand &MO,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  if (MO.isReg()) {
-    return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
-  } else if (MO.isImm()) {
-    return static_cast<unsigned>(MO.getImm());
-  }
-
-  llvm_unreachable("Unable to encode MCOperand!");
-  return 0;
-}
-
-unsigned
-AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  const MCOperand &UImm16MO = MI.getOperand(OpIdx);
-  const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
-
-  unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
-
-  if (UImm16MO.isImm()) {
-    Result |= UImm16MO.getImm();
-    return Result;
-  }
-
-  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
-  AArch64::Fixups requestedFixup;
-  switch (A64E->getKind()) {
-  default: llvm_unreachable("unexpected expression modifier");
-  case AArch64MCExpr::VK_AARCH64_ABS_G0:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
-  case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
-  case AArch64MCExpr::VK_AARCH64_ABS_G1:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
-  case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
-  case AArch64MCExpr::VK_AARCH64_ABS_G2:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
-  case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
-  case AArch64MCExpr::VK_AARCH64_ABS_G3:
-    requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
-  case AArch64MCExpr::VK_AARCH64_SABS_G0:
-    requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
-  case AArch64MCExpr::VK_AARCH64_SABS_G1:
-    requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
-  case AArch64MCExpr::VK_AARCH64_SABS_G2:
-    requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
-    requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
-    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
-    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
-    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
-    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
-  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
-    requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
-  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
-    requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
-    requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
-    requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
-    requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
-    requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
-  case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
-    requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
-  }
-
-  return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups, STI);
-}
-
-template<int hasRs, int hasRt2> unsigned
-AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
-                                            unsigned EncodedValue,
-                                            const MCSubtargetInfo &STI) const {
-  if (!hasRs) EncodedValue |= 0x001F0000;
-  if (!hasRt2) EncodedValue |= 0x00007C00;
-
-  return EncodedValue;
-}
-
-unsigned
-AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
-                              const MCSubtargetInfo &STI) const {
-  // If one of the signed fixup kinds is applied to a MOVZ instruction, the
-  // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
-  // job to ensure that any bits possibly affected by this are 0. This means we
-  // must zero out bit 30 (essentially emitting a MOVN).
-  MCOperand UImm16MO = MI.getOperand(1);
-
-  // Nothing to do if there's no fixup.
-  if (UImm16MO.isImm())
-    return EncodedValue;
-
-  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
-  switch (A64E->getKind()) {
-  case AArch64MCExpr::VK_AARCH64_SABS_G0:
-  case AArch64MCExpr::VK_AARCH64_SABS_G1:
-  case AArch64MCExpr::VK_AARCH64_SABS_G2:
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
-  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
-  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
-  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
-  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
-  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
-    return EncodedValue & ~(1u << 30);
-  default:
-    // Nothing to do for an unsigned fixup.
-    return EncodedValue;
-  }
-
-  llvm_unreachable("Should have returned by now");
-}
-
-unsigned
-AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
-                                 unsigned EncodedValue,
-                                 const MCSubtargetInfo &STI) const {
-  // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
-  // (i.e. all bits 1) but is ignored by the processor.
-  EncodedValue |= 0x1f << 10;
-  return EncodedValue;
-}
-
-MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
-                                                const MCRegisterInfo &MRI,
-                                                const MCSubtargetInfo &STI,
-                                                MCContext &Ctx) {
-  return new AArch64MCCodeEmitter(Ctx);
-}
-
-void AArch64MCCodeEmitter::
-EncodeInstruction(const MCInst &MI, raw_ostream &OS,
-                  SmallVectorImpl<MCFixup> &Fixups,
-                  const MCSubtargetInfo &STI) const {
-  if (MI.getOpcode() == AArch64::TLSDESCCALL) {
-    // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
-    // following (BLR) instruction. It doesn't emit any code itself so it
-    // doesn't go through the normal TableGenerated channels.
-    MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
-    const MCExpr *Expr;
-    Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
-    Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
-    return;
-  }
-
-  uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
-
-  EmitInstruction(Binary, OS);
-}
-
-
-#include "AArch64GenMCCodeEmitter.inc"

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp (removed)
@@ -1,179 +0,0 @@
-//===-- AArch64MCExpr.cpp - AArch64 specific MC expression classes --------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the implementation of the assembly expression modifiers
-// accepted by the AArch64 architecture (e.g. ":lo12:", ":gottprel_g1:", ...).
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64MCExpr.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCELF.h"
-#include "llvm/Object/ELF.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "aarch64mcexpr"
-
-const AArch64MCExpr*
-AArch64MCExpr::Create(VariantKind Kind, const MCExpr *Expr,
-                      MCContext &Ctx) {
-  return new (Ctx) AArch64MCExpr(Kind, Expr);
-}
-
-void AArch64MCExpr::PrintImpl(raw_ostream &OS) const {
-  switch (Kind) {
-  default: llvm_unreachable("Invalid kind!");
-  case VK_AARCH64_GOT:              OS << ":got:"; break;
-  case VK_AARCH64_GOT_LO12:         OS << ":got_lo12:"; break;
-  case VK_AARCH64_LO12:             OS << ":lo12:"; break;
-  case VK_AARCH64_ABS_G0:           OS << ":abs_g0:"; break;
-  case VK_AARCH64_ABS_G0_NC:        OS << ":abs_g0_nc:"; break;
-  case VK_AARCH64_ABS_G1:           OS << ":abs_g1:"; break;
-  case VK_AARCH64_ABS_G1_NC:        OS << ":abs_g1_nc:"; break;
-  case VK_AARCH64_ABS_G2:           OS << ":abs_g2:"; break;
-  case VK_AARCH64_ABS_G2_NC:        OS << ":abs_g2_nc:"; break;
-  case VK_AARCH64_ABS_G3:           OS << ":abs_g3:"; break;
-  case VK_AARCH64_SABS_G0:          OS << ":abs_g0_s:"; break;
-  case VK_AARCH64_SABS_G1:          OS << ":abs_g1_s:"; break;
-  case VK_AARCH64_SABS_G2:          OS << ":abs_g2_s:"; break;
-  case VK_AARCH64_DTPREL_G2:        OS << ":dtprel_g2:"; break;
-  case VK_AARCH64_DTPREL_G1:        OS << ":dtprel_g1:"; break;
-  case VK_AARCH64_DTPREL_G1_NC:     OS << ":dtprel_g1_nc:"; break;
-  case VK_AARCH64_DTPREL_G0:        OS << ":dtprel_g0:"; break;
-  case VK_AARCH64_DTPREL_G0_NC:     OS << ":dtprel_g0_nc:"; break;
-  case VK_AARCH64_DTPREL_HI12:      OS << ":dtprel_hi12:"; break;
-  case VK_AARCH64_DTPREL_LO12:      OS << ":dtprel_lo12:"; break;
-  case VK_AARCH64_DTPREL_LO12_NC:   OS << ":dtprel_lo12_nc:"; break;
-  case VK_AARCH64_GOTTPREL_G1:      OS << ":gottprel_g1:"; break;
-  case VK_AARCH64_GOTTPREL_G0_NC:   OS << ":gottprel_g0_nc:"; break;
-  case VK_AARCH64_GOTTPREL:         OS << ":gottprel:"; break;
-  case VK_AARCH64_GOTTPREL_LO12:    OS << ":gottprel_lo12:"; break;
-  case VK_AARCH64_TPREL_G2:         OS << ":tprel_g2:"; break;
-  case VK_AARCH64_TPREL_G1:         OS << ":tprel_g1:"; break;
-  case VK_AARCH64_TPREL_G1_NC:      OS << ":tprel_g1_nc:"; break;
-  case VK_AARCH64_TPREL_G0:         OS << ":tprel_g0:"; break;
-  case VK_AARCH64_TPREL_G0_NC:      OS << ":tprel_g0_nc:"; break;
-  case VK_AARCH64_TPREL_HI12:       OS << ":tprel_hi12:"; break;
-  case VK_AARCH64_TPREL_LO12:       OS << ":tprel_lo12:"; break;
-  case VK_AARCH64_TPREL_LO12_NC:    OS << ":tprel_lo12_nc:"; break;
-  case VK_AARCH64_TLSDESC:          OS << ":tlsdesc:"; break;
-  case VK_AARCH64_TLSDESC_LO12:     OS << ":tlsdesc_lo12:"; break;
-
-  }
-
-  const MCExpr *Expr = getSubExpr();
-  if (Expr->getKind() != MCExpr::SymbolRef)
-    OS << '(';
-  Expr->print(OS);
-  if (Expr->getKind() != MCExpr::SymbolRef)
-    OS << ')';
-}
-
-bool
-AArch64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
-                                         const MCAsmLayout *Layout) const {
-  return getSubExpr()->EvaluateAsRelocatable(Res, Layout);
-}
-
-static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
-  switch (Expr->getKind()) {
-  case MCExpr::Target:
-    llvm_unreachable("Can't handle nested target expression");
-    break;
-  case MCExpr::Constant:
-    break;
-
-  case MCExpr::Binary: {
-    const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
-    fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm);
-    fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm);
-    break;
-  }
-
-  case MCExpr::SymbolRef: {
-    // We're known to be under a TLS fixup, so any symbol should be
-    // modified. There should be only one.
-    const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
-    MCSymbolData &SD = Asm.getOrCreateSymbolData(SymRef.getSymbol());
-    MCELF::SetType(SD, ELF::STT_TLS);
-    break;
-  }
-
-  case MCExpr::Unary:
-    fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm);
-    break;
-  }
-}
-
-void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
-  switch (getKind()) {
-  default:
-    return;
-  case VK_AARCH64_DTPREL_G2:
-  case VK_AARCH64_DTPREL_G1:
-  case VK_AARCH64_DTPREL_G1_NC:
-  case VK_AARCH64_DTPREL_G0:
-  case VK_AARCH64_DTPREL_G0_NC:
-  case VK_AARCH64_DTPREL_HI12:
-  case VK_AARCH64_DTPREL_LO12:
-  case VK_AARCH64_DTPREL_LO12_NC:
-  case VK_AARCH64_GOTTPREL_G1:
-  case VK_AARCH64_GOTTPREL_G0_NC:
-  case VK_AARCH64_GOTTPREL:
-  case VK_AARCH64_GOTTPREL_LO12:
-  case VK_AARCH64_TPREL_G2:
-  case VK_AARCH64_TPREL_G1:
-  case VK_AARCH64_TPREL_G1_NC:
-  case VK_AARCH64_TPREL_G0:
-  case VK_AARCH64_TPREL_G0_NC:
-  case VK_AARCH64_TPREL_HI12:
-  case VK_AARCH64_TPREL_LO12:
-  case VK_AARCH64_TPREL_LO12_NC:
-  case VK_AARCH64_TLSDESC:
-  case VK_AARCH64_TLSDESC_LO12:
-    break;
-  }
-
-  fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
-}
-
-// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps
-// that method should be made public?
-// FIXME: really do above: now that two backends are using it.
-static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) {
-  switch (Value->getKind()) {
-  case MCExpr::Target:
-    llvm_unreachable("Can't handle nested target expr!");
-    break;
-
-  case MCExpr::Constant:
-    break;
-
-  case MCExpr::Binary: {
-    const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
-    AddValueSymbolsImpl(BE->getLHS(), Asm);
-    AddValueSymbolsImpl(BE->getRHS(), Asm);
-    break;
-  }
-
-  case MCExpr::SymbolRef:
-    Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
-    break;
-
-  case MCExpr::Unary:
-    AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm);
-    break;
-  }
-}
-
-void AArch64MCExpr::AddValueSymbols(MCAssembler *Asm) const {
-  AddValueSymbolsImpl(getSubExpr(), Asm);
-}

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h (removed)
@@ -1,187 +0,0 @@
-//==- AArch64MCExpr.h - AArch64 specific MC expression classes --*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes AArch64-specific MCExprs, used for modifiers like
-// ":lo12:" or ":gottprel_g1:".
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64MCEXPR_H
-#define LLVM_AARCH64MCEXPR_H
-
-#include "llvm/MC/MCExpr.h"
-
-namespace llvm {
-
-class AArch64MCExpr : public MCTargetExpr {
-public:
-  enum VariantKind {
-    VK_AARCH64_None,
-    VK_AARCH64_GOT,      // :got: modifier in assembly
-    VK_AARCH64_GOT_LO12, // :got_lo12:
-    VK_AARCH64_LO12,     // :lo12:
-
-    VK_AARCH64_ABS_G0, // :abs_g0:
-    VK_AARCH64_ABS_G0_NC, // :abs_g0_nc:
-    VK_AARCH64_ABS_G1,
-    VK_AARCH64_ABS_G1_NC,
-    VK_AARCH64_ABS_G2,
-    VK_AARCH64_ABS_G2_NC,
-    VK_AARCH64_ABS_G3,
-
-    VK_AARCH64_SABS_G0, // :abs_g0_s:
-    VK_AARCH64_SABS_G1,
-    VK_AARCH64_SABS_G2,
-
-    VK_AARCH64_DTPREL_G2, // :dtprel_g2:
-    VK_AARCH64_DTPREL_G1,
-    VK_AARCH64_DTPREL_G1_NC,
-    VK_AARCH64_DTPREL_G0,
-    VK_AARCH64_DTPREL_G0_NC,
-    VK_AARCH64_DTPREL_HI12,
-    VK_AARCH64_DTPREL_LO12,
-    VK_AARCH64_DTPREL_LO12_NC,
-
-    VK_AARCH64_GOTTPREL_G1, // :gottprel:
-    VK_AARCH64_GOTTPREL_G0_NC,
-    VK_AARCH64_GOTTPREL,
-    VK_AARCH64_GOTTPREL_LO12,
-
-    VK_AARCH64_TPREL_G2, // :tprel:
-    VK_AARCH64_TPREL_G1,
-    VK_AARCH64_TPREL_G1_NC,
-    VK_AARCH64_TPREL_G0,
-    VK_AARCH64_TPREL_G0_NC,
-    VK_AARCH64_TPREL_HI12,
-    VK_AARCH64_TPREL_LO12,
-    VK_AARCH64_TPREL_LO12_NC,
-
-    VK_AARCH64_TLSDESC, // :tlsdesc:
-    VK_AARCH64_TLSDESC_LO12
-  };
-
-private:
-  const VariantKind Kind;
-  const MCExpr *Expr;
-
-  explicit AArch64MCExpr(VariantKind _Kind, const MCExpr *_Expr)
-    : Kind(_Kind), Expr(_Expr) {}
-
-public:
-  /// @name Construction
-  /// @{
-
-  static const AArch64MCExpr *Create(VariantKind Kind, const MCExpr *Expr,
-                                     MCContext &Ctx);
-
-  static const AArch64MCExpr *CreateLo12(const MCExpr *Expr, MCContext &Ctx) {
-    return Create(VK_AARCH64_LO12, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateGOT(const MCExpr *Expr, MCContext &Ctx) {
-    return Create(VK_AARCH64_GOT, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateGOTLo12(const MCExpr *Expr,
-                                            MCContext &Ctx) {
-    return Create(VK_AARCH64_GOT_LO12, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateDTPREL_G1(const MCExpr *Expr,
-                                             MCContext &Ctx) {
-    return Create(VK_AARCH64_DTPREL_G1, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateDTPREL_G0_NC(const MCExpr *Expr,
-                                                MCContext &Ctx) {
-    return Create(VK_AARCH64_DTPREL_G0_NC, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateGOTTPREL(const MCExpr *Expr,
-                                             MCContext &Ctx) {
-    return Create(VK_AARCH64_GOTTPREL, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateGOTTPRELLo12(const MCExpr *Expr,
-                                                 MCContext &Ctx) {
-    return Create(VK_AARCH64_GOTTPREL_LO12, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateTLSDesc(const MCExpr *Expr,
-                                            MCContext &Ctx) {
-    return Create(VK_AARCH64_TLSDESC, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateTLSDescLo12(const MCExpr *Expr,
-                                                MCContext &Ctx) {
-    return Create(VK_AARCH64_TLSDESC_LO12, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateTPREL_G1(const MCExpr *Expr,
-                                             MCContext &Ctx) {
-    return Create(VK_AARCH64_TPREL_G1, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateTPREL_G0_NC(const MCExpr *Expr,
-                                                MCContext &Ctx) {
-    return Create(VK_AARCH64_TPREL_G0_NC, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateABS_G3(const MCExpr *Expr,
-                                           MCContext &Ctx) {
-    return Create(VK_AARCH64_ABS_G3, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateABS_G2_NC(const MCExpr *Expr,
-                                           MCContext &Ctx) {
-    return Create(VK_AARCH64_ABS_G2_NC, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateABS_G1_NC(const MCExpr *Expr,
-                                           MCContext &Ctx) {
-    return Create(VK_AARCH64_ABS_G1_NC, Expr, Ctx);
-  }
-
-  static const AArch64MCExpr *CreateABS_G0_NC(const MCExpr *Expr,
-                                           MCContext &Ctx) {
-    return Create(VK_AARCH64_ABS_G0_NC, Expr, Ctx);
-  }
-
-  /// @}
-  /// @name Accessors
-  /// @{
-
-  /// getOpcode - Get the kind of this expression.
-  VariantKind getKind() const { return Kind; }
-
-  /// getSubExpr - Get the child of this expression.
-  const MCExpr *getSubExpr() const { return Expr; }
-
-  /// @}
-
-  void PrintImpl(raw_ostream &OS) const override;
-  bool EvaluateAsRelocatableImpl(MCValue &Res,
-                                 const MCAsmLayout *Layout) const override;
-  void AddValueSymbols(MCAssembler *) const override;
-  const MCSection *FindAssociatedSection() const override {
-    return getSubExpr()->FindAssociatedSection();
-  }
-
-  void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
-
-  static bool classof(const MCExpr *E) {
-    return E->getKind() == MCExpr::Target;
-  }
-
-  static bool classof(const AArch64MCExpr *) { return true; }
-
-};
-} // end namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp (removed)
@@ -1,221 +0,0 @@
-//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides AArch64 specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64MCTargetDesc.h"
-#include "AArch64ELFStreamer.h"
-#include "AArch64MCAsmInfo.h"
-#include "InstPrinter/AArch64InstPrinter.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrAnalysis.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-#define GET_REGINFO_MC_DESC
-#include "AArch64GenRegisterInfo.inc"
-
-#define GET_INSTRINFO_MC_DESC
-#include "AArch64GenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "AArch64GenSubtargetInfo.inc"
-
-MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT,
-                                                          StringRef CPU,
-                                                          StringRef FS) {
-  MCSubtargetInfo *X = new MCSubtargetInfo();
-  InitAArch64MCSubtargetInfo(X, TT, CPU, FS);
-  return X;
-}
-
-
-static MCInstrInfo *createAArch64MCInstrInfo() {
-  MCInstrInfo *X = new MCInstrInfo();
-  InitAArch64MCInstrInfo(X);
-  return X;
-}
-
-static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
-  MCRegisterInfo *X = new MCRegisterInfo();
-  InitAArch64MCRegisterInfo(X, AArch64::X30);
-  return X;
-}
-
-static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
-                                         StringRef TT) {
-  Triple TheTriple(TT);
-
-  MCAsmInfo *MAI = new AArch64ELFMCAsmInfo(TT);
-  unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true);
-  MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
-  MAI->addInitialFrameState(Inst);
-
-  return MAI;
-}
-
-static MCCodeGenInfo *createAArch64MCCodeGenInfo(StringRef TT, Reloc::Model RM,
-                                                 CodeModel::Model CM,
-                                                 CodeGenOpt::Level OL) {
-  MCCodeGenInfo *X = new MCCodeGenInfo();
-  if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC) {
-    // On ELF platforms the default static relocation model has a smart enough
-    // linker to cope with referencing external symbols defined in a shared
-    // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
-    RM = Reloc::Static;
-  }
-
-  if (CM == CodeModel::Default)
-    CM = CodeModel::Small;
-  else if (CM == CodeModel::JITDefault) {
-    // The default MCJIT memory managers make no guarantees about where they can
-    // find an executable page; JITed code needs to be able to refer to globals
-    // no matter how far away they are.
-    CM = CodeModel::Large;
-  }
-
-  X->InitMCCodeGenInfo(RM, CM, OL);
-  return X;
-}
-
-static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
-                                    MCContext &Ctx, MCAsmBackend &MAB,
-                                    raw_ostream &OS,
-                                    MCCodeEmitter *Emitter,
-                                    const MCSubtargetInfo &STI,
-                                    bool RelaxAll,
-                                    bool NoExecStack) {
-  Triple TheTriple(TT);
-
-  return createAArch64ELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
-}
-
-
-static MCInstPrinter *createAArch64MCInstPrinter(const Target &T,
-                                                 unsigned SyntaxVariant,
-                                                 const MCAsmInfo &MAI,
-                                                 const MCInstrInfo &MII,
-                                                 const MCRegisterInfo &MRI,
-                                                 const MCSubtargetInfo &STI) {
-  if (SyntaxVariant == 0)
-    return new AArch64InstPrinter(MAI, MII, MRI, STI);
-  return nullptr;
-}
-
-namespace {
-
-class AArch64MCInstrAnalysis : public MCInstrAnalysis {
-public:
-  AArch64MCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
-
-  bool isUnconditionalBranch(const MCInst &Inst) const override {
-    if (Inst.getOpcode() == AArch64::Bcc
-        && Inst.getOperand(0).getImm() == A64CC::AL)
-      return true;
-    return MCInstrAnalysis::isUnconditionalBranch(Inst);
-  }
-
-  bool isConditionalBranch(const MCInst &Inst) const override {
-    if (Inst.getOpcode() == AArch64::Bcc
-        && Inst.getOperand(0).getImm() == A64CC::AL)
-      return false;
-    return MCInstrAnalysis::isConditionalBranch(Inst);
-  }
-
-  bool evaluateBranch(const MCInst &Inst, uint64_t Addr,
-                      uint64_t Size, uint64_t &Target) const override {
-    unsigned LblOperand = Inst.getOpcode() == AArch64::Bcc ? 1 : 0;
-    // FIXME: We only handle PCRel branches for now.
-    if (Info->get(Inst.getOpcode()).OpInfo[LblOperand].OperandType
-        != MCOI::OPERAND_PCREL)
-      return false;
-
-    int64_t Imm = Inst.getOperand(LblOperand).getImm();
-    Target = Addr + Imm;
-    return true;
-  }
-};
-
-}
-
-static MCInstrAnalysis *createAArch64MCInstrAnalysis(const MCInstrInfo *Info) {
-  return new AArch64MCInstrAnalysis(Info);
-}
-
-
-
-extern "C" void LLVMInitializeAArch64TargetMC() {
-  // Register the MC asm info.
-  RegisterMCAsmInfoFn A(TheAArch64leTarget, createAArch64MCAsmInfo);
-  RegisterMCAsmInfoFn B(TheAArch64beTarget, createAArch64MCAsmInfo);
-
-  // Register the MC codegen info.
-  TargetRegistry::RegisterMCCodeGenInfo(TheAArch64leTarget,
-                                        createAArch64MCCodeGenInfo);
-  TargetRegistry::RegisterMCCodeGenInfo(TheAArch64beTarget,
-                                        createAArch64MCCodeGenInfo);
-
-  // Register the MC instruction info.
-  TargetRegistry::RegisterMCInstrInfo(TheAArch64leTarget,
-                                      createAArch64MCInstrInfo);
-  TargetRegistry::RegisterMCInstrInfo(TheAArch64beTarget,
-                                      createAArch64MCInstrInfo);
-
-  // Register the MC register info.
-  TargetRegistry::RegisterMCRegInfo(TheAArch64leTarget,
-                                    createAArch64MCRegisterInfo);
-  TargetRegistry::RegisterMCRegInfo(TheAArch64beTarget,
-                                    createAArch64MCRegisterInfo);
-
-  // Register the MC subtarget info.
-  using AArch64_MC::createAArch64MCSubtargetInfo;
-  TargetRegistry::RegisterMCSubtargetInfo(TheAArch64leTarget,
-                                          createAArch64MCSubtargetInfo);
-  TargetRegistry::RegisterMCSubtargetInfo(TheAArch64beTarget,
-                                          createAArch64MCSubtargetInfo);
-
-  // Register the MC instruction analyzer.
-  TargetRegistry::RegisterMCInstrAnalysis(TheAArch64leTarget,
-                                          createAArch64MCInstrAnalysis);
-  TargetRegistry::RegisterMCInstrAnalysis(TheAArch64beTarget,
-                                          createAArch64MCInstrAnalysis);
-
-  // Register the MC Code Emitter
-  TargetRegistry::RegisterMCCodeEmitter(TheAArch64leTarget,
-                                        createAArch64MCCodeEmitter);
-  TargetRegistry::RegisterMCCodeEmitter(TheAArch64beTarget,
-                                        createAArch64MCCodeEmitter);
-
-  // Register the asm backend.
-  TargetRegistry::RegisterMCAsmBackend(TheAArch64leTarget,
-                                       createAArch64leAsmBackend);
-  TargetRegistry::RegisterMCAsmBackend(TheAArch64beTarget,
-                                       createAArch64beAsmBackend);
-
-  // Register the object streamer.
-  TargetRegistry::RegisterMCObjectStreamer(TheAArch64leTarget,
-                                           createMCStreamer);
-  TargetRegistry::RegisterMCObjectStreamer(TheAArch64beTarget,
-                                           createMCStreamer);
-
-  // Register the MCInstPrinter.
-  TargetRegistry::RegisterMCInstPrinter(TheAArch64leTarget,
-                                        createAArch64MCInstPrinter);
-  TargetRegistry::RegisterMCInstPrinter(TheAArch64beTarget,
-                                        createAArch64MCInstPrinter);
-}

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h (removed)
@@ -1,72 +0,0 @@
-//===-- AArch64MCTargetDesc.h - AArch64 Target Descriptions -----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides AArch64 specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64MCTARGETDESC_H
-#define LLVM_AARCH64MCTARGETDESC_H
-
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-class MCAsmBackend;
-class MCCodeEmitter;
-class MCContext;
-class MCInstrInfo;
-class MCObjectWriter;
-class MCRegisterInfo;
-class MCSubtargetInfo;
-class StringRef;
-class Target;
-class raw_ostream;
-
-extern Target TheAArch64leTarget;
-extern Target TheAArch64beTarget;
-
-namespace AArch64_MC {
-  MCSubtargetInfo *createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU,
-                                                StringRef FS);
-}
-
-MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
-                                          const MCRegisterInfo &MRI,
-                                          const MCSubtargetInfo &STI,
-                                          MCContext &Ctx);
-
-MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS,
-                                             uint8_t OSABI,
-                                             bool IsLittleEndian);
-
-MCAsmBackend *createAArch64leAsmBackend(const Target &T,
-                                        const MCRegisterInfo &MRI,
-                                        StringRef TT, StringRef CPU);
-
-MCAsmBackend *createAArch64beAsmBackend(const Target &T,
-                                        const MCRegisterInfo &MRI,
-                                        StringRef TT, StringRef CPU);
-
-} // End llvm namespace
-
-// Defines symbolic names for AArch64 registers.  This defines a mapping from
-// register name to register number.
-//
-#define GET_REGINFO_ENUM
-#include "AArch64GenRegisterInfo.inc"
-
-// Defines symbolic names for the AArch64 instructions.
-//
-#define GET_INSTRINFO_ENUM
-#include "AArch64GenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_ENUM
-#include "AArch64GenSubtargetInfo.inc"
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/CMakeLists.txt (removed)
@@ -1,9 +0,0 @@
-add_llvm_library(LLVMAArch64Desc
-  AArch64AsmBackend.cpp
-  AArch64ELFObjectWriter.cpp
-  AArch64ELFStreamer.cpp
-  AArch64MCAsmInfo.cpp
-  AArch64MCCodeEmitter.cpp
-  AArch64MCExpr.cpp
-  AArch64MCTargetDesc.cpp
-  )

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt (removed)
@@ -1,24 +0,0 @@
-;===- ./lib/Target/AArch64/MCTargetDesc/LLVMBuild.txt ----------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = AArch64Desc
-parent = AArch64
-required_libraries = AArch64AsmPrinter AArch64Info MC Support
-add_to_library_groups = AArch64
-

Removed: llvm/trunk/lib/Target/AArch64/MCTargetDesc/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/MCTargetDesc/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/MCTargetDesc/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/Makefile (removed)
@@ -1,16 +0,0 @@
-##===- lib/Target/AArch64/TargetDesc/Makefile --------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Desc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/AArch64/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/Makefile (removed)
@@ -1,30 +0,0 @@
-##===- lib/Target/AArch64/Makefile -------------------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMAArch64CodeGen
-TARGET = AArch64
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = AArch64GenAsmMatcher.inc \
-   AArch64GenAsmWriter.inc \
-   AArch64GenCallingConv.inc \
-   AArch64GenDAGISel.inc \
-   AArch64GenDisassemblerTables.inc \
-   AArch64GenInstrInfo.inc \
-   AArch64GenMCCodeEmitter.inc \
-   AArch64GenMCPseudoLowering.inc \
-   AArch64GenRegisterInfo.inc \
-   AArch64GenSubtargetInfo.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc Utils
-
-include $(LEVEL)/Makefile.common
-
-

Removed: llvm/trunk/lib/Target/AArch64/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/README.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/README.txt (original)
+++ llvm/trunk/lib/Target/AArch64/README.txt (removed)
@@ -1,2 +0,0 @@
-This file will contain changes that need to be made before AArch64 can become an
-officially supported target. Currently a placeholder.

Removed: llvm/trunk/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/TargetInfo/AArch64TargetInfo.cpp (removed)
@@ -1,27 +0,0 @@
-//===-- AArch64TargetInfo.cpp - AArch64 Target Implementation -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the key registration step for the architecture.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-Target llvm::TheAArch64leTarget;
-Target llvm::TheAArch64beTarget;
-
-extern "C" void LLVMInitializeAArch64TargetInfo() {
-    RegisterTarget<Triple::aarch64, /*HasJIT=*/true>
-    X(TheAArch64leTarget, "aarch64", "AArch64 (ARM 64-bit little endian target)");
-    RegisterTarget<Triple::aarch64_be, /*HasJIT=*/true>
-    Y(TheAArch64beTarget, "aarch64_be", "AArch64 (ARM 64-bit big endian target)");
-}

Removed: llvm/trunk/lib/Target/AArch64/TargetInfo/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/TargetInfo/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/TargetInfo/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/TargetInfo/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMAArch64Info
-  AArch64TargetInfo.cpp
-  )

Removed: llvm/trunk/lib/Target/AArch64/TargetInfo/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/TargetInfo/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/TargetInfo/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/TargetInfo/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/AArch64/TargetInfo/LLVMBuild.txt ------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = AArch64Info
-parent = AArch64
-required_libraries = Support
-add_to_library_groups = AArch64

Removed: llvm/trunk/lib/Target/AArch64/TargetInfo/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/TargetInfo/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/TargetInfo/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/TargetInfo/Makefile (removed)
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/TargetInfo/Makefile --------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Info
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp (removed)
@@ -1,1173 +0,0 @@
-//===-- AArch64BaseInfo.cpp - AArch64 Base encoding information------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides basic encoding and assembly information for AArch64.
-//
-//===----------------------------------------------------------------------===//
-#include "AArch64BaseInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/Regex.h"
-
-using namespace llvm;
-
-StringRef NamedImmMapper::toString(uint32_t Value, bool &Valid) const {
-  for (unsigned i = 0; i < NumPairs; ++i) {
-    if (Pairs[i].Value == Value) {
-      Valid = true;
-      return Pairs[i].Name;
-    }
-  }
-
-  Valid = false;
-  return StringRef();
-}
-
-uint32_t NamedImmMapper::fromString(StringRef Name, bool &Valid) const {
-  std::string LowerCaseName = Name.lower();
-  for (unsigned i = 0; i < NumPairs; ++i) {
-    if (Pairs[i].Name == LowerCaseName) {
-      Valid = true;
-      return Pairs[i].Value;
-    }
-  }
-
-  Valid = false;
-  return -1;
-}
-
-bool NamedImmMapper::validImm(uint32_t Value) const {
-  return Value < TooBigImm;
-}
-
-const NamedImmMapper::Mapping A64AT::ATMapper::ATPairs[] = {
-  {"s1e1r", S1E1R},
-  {"s1e2r", S1E2R},
-  {"s1e3r", S1E3R},
-  {"s1e1w", S1E1W},
-  {"s1e2w", S1E2W},
-  {"s1e3w", S1E3W},
-  {"s1e0r", S1E0R},
-  {"s1e0w", S1E0W},
-  {"s12e1r", S12E1R},
-  {"s12e1w", S12E1W},
-  {"s12e0r", S12E0R},
-  {"s12e0w", S12E0W},
-};
-
-A64AT::ATMapper::ATMapper()
-  : NamedImmMapper(ATPairs, 0) {}
-
-const NamedImmMapper::Mapping A64DB::DBarrierMapper::DBarrierPairs[] = {
-  {"oshld", OSHLD},
-  {"oshst", OSHST},
-  {"osh", OSH},
-  {"nshld", NSHLD},
-  {"nshst", NSHST},
-  {"nsh", NSH},
-  {"ishld", ISHLD},
-  {"ishst", ISHST},
-  {"ish", ISH},
-  {"ld", LD},
-  {"st", ST},
-  {"sy", SY}
-};
-
-A64DB::DBarrierMapper::DBarrierMapper()
-  : NamedImmMapper(DBarrierPairs, 16u) {}
-
-const NamedImmMapper::Mapping A64DC::DCMapper::DCPairs[] = {
-  {"zva", ZVA},
-  {"ivac", IVAC},
-  {"isw", ISW},
-  {"cvac", CVAC},
-  {"csw", CSW},
-  {"cvau", CVAU},
-  {"civac", CIVAC},
-  {"cisw", CISW}
-};
-
-A64DC::DCMapper::DCMapper()
-  : NamedImmMapper(DCPairs, 0) {}
-
-const NamedImmMapper::Mapping A64IC::ICMapper::ICPairs[] = {
-  {"ialluis",  IALLUIS},
-  {"iallu", IALLU},
-  {"ivau", IVAU}
-};
-
-A64IC::ICMapper::ICMapper()
-  : NamedImmMapper(ICPairs, 0) {}
-
-const NamedImmMapper::Mapping A64ISB::ISBMapper::ISBPairs[] = {
-  {"sy",  SY},
-};
-
-A64ISB::ISBMapper::ISBMapper()
-  : NamedImmMapper(ISBPairs, 16) {}
-
-const NamedImmMapper::Mapping A64PRFM::PRFMMapper::PRFMPairs[] = {
-  {"pldl1keep", PLDL1KEEP},
-  {"pldl1strm", PLDL1STRM},
-  {"pldl2keep", PLDL2KEEP},
-  {"pldl2strm", PLDL2STRM},
-  {"pldl3keep", PLDL3KEEP},
-  {"pldl3strm", PLDL3STRM},
-  {"plil1keep", PLIL1KEEP},
-  {"plil1strm", PLIL1STRM},
-  {"plil2keep", PLIL2KEEP},
-  {"plil2strm", PLIL2STRM},
-  {"plil3keep", PLIL3KEEP},
-  {"plil3strm", PLIL3STRM},
-  {"pstl1keep", PSTL1KEEP},
-  {"pstl1strm", PSTL1STRM},
-  {"pstl2keep", PSTL2KEEP},
-  {"pstl2strm", PSTL2STRM},
-  {"pstl3keep", PSTL3KEEP},
-  {"pstl3strm", PSTL3STRM}
-};
-
-A64PRFM::PRFMMapper::PRFMMapper()
-  : NamedImmMapper(PRFMPairs, 32) {}
-
-const NamedImmMapper::Mapping A64PState::PStateMapper::PStatePairs[] = {
-  {"spsel", SPSel},
-  {"daifset", DAIFSet},
-  {"daifclr", DAIFClr}
-};
-
-A64PState::PStateMapper::PStateMapper()
-  : NamedImmMapper(PStatePairs, 0) {}
-
-const NamedImmMapper::Mapping A64SysReg::MRSMapper::MRSPairs[] = {
-  {"mdccsr_el0", MDCCSR_EL0},
-  {"dbgdtrrx_el0", DBGDTRRX_EL0},
-  {"mdrar_el1", MDRAR_EL1},
-  {"oslsr_el1", OSLSR_EL1},
-  {"dbgauthstatus_el1", DBGAUTHSTATUS_EL1},
-  {"pmceid0_el0", PMCEID0_EL0},
-  {"pmceid1_el0", PMCEID1_EL0},
-  {"midr_el1", MIDR_EL1},
-  {"ccsidr_el1", CCSIDR_EL1},
-  {"clidr_el1", CLIDR_EL1},
-  {"ctr_el0", CTR_EL0},
-  {"mpidr_el1", MPIDR_EL1},
-  {"revidr_el1", REVIDR_EL1},
-  {"aidr_el1", AIDR_EL1},
-  {"dczid_el0", DCZID_EL0},
-  {"id_pfr0_el1", ID_PFR0_EL1},
-  {"id_pfr1_el1", ID_PFR1_EL1},
-  {"id_dfr0_el1", ID_DFR0_EL1},
-  {"id_afr0_el1", ID_AFR0_EL1},
-  {"id_mmfr0_el1", ID_MMFR0_EL1},
-  {"id_mmfr1_el1", ID_MMFR1_EL1},
-  {"id_mmfr2_el1", ID_MMFR2_EL1},
-  {"id_mmfr3_el1", ID_MMFR3_EL1},
-  {"id_isar0_el1", ID_ISAR0_EL1},
-  {"id_isar1_el1", ID_ISAR1_EL1},
-  {"id_isar2_el1", ID_ISAR2_EL1},
-  {"id_isar3_el1", ID_ISAR3_EL1},
-  {"id_isar4_el1", ID_ISAR4_EL1},
-  {"id_isar5_el1", ID_ISAR5_EL1},
-  {"id_aa64pfr0_el1", ID_AA64PFR0_EL1},
-  {"id_aa64pfr1_el1", ID_AA64PFR1_EL1},
-  {"id_aa64dfr0_el1", ID_AA64DFR0_EL1},
-  {"id_aa64dfr1_el1", ID_AA64DFR1_EL1},
-  {"id_aa64afr0_el1", ID_AA64AFR0_EL1},
-  {"id_aa64afr1_el1", ID_AA64AFR1_EL1},
-  {"id_aa64isar0_el1", ID_AA64ISAR0_EL1},
-  {"id_aa64isar1_el1", ID_AA64ISAR1_EL1},
-  {"id_aa64mmfr0_el1", ID_AA64MMFR0_EL1},
-  {"id_aa64mmfr1_el1", ID_AA64MMFR1_EL1},
-  {"mvfr0_el1", MVFR0_EL1},
-  {"mvfr1_el1", MVFR1_EL1},
-  {"mvfr2_el1", MVFR2_EL1},
-  {"rvbar_el1", RVBAR_EL1},
-  {"rvbar_el2", RVBAR_EL2},
-  {"rvbar_el3", RVBAR_EL3},
-  {"isr_el1", ISR_EL1},
-  {"cntpct_el0", CNTPCT_EL0},
-  {"cntvct_el0", CNTVCT_EL0},
-
-  // Trace registers
-  {"trcstatr", TRCSTATR},
-  {"trcidr8", TRCIDR8},
-  {"trcidr9", TRCIDR9},
-  {"trcidr10", TRCIDR10},
-  {"trcidr11", TRCIDR11},
-  {"trcidr12", TRCIDR12},
-  {"trcidr13", TRCIDR13},
-  {"trcidr0", TRCIDR0},
-  {"trcidr1", TRCIDR1},
-  {"trcidr2", TRCIDR2},
-  {"trcidr3", TRCIDR3},
-  {"trcidr4", TRCIDR4},
-  {"trcidr5", TRCIDR5},
-  {"trcidr6", TRCIDR6},
-  {"trcidr7", TRCIDR7},
-  {"trcoslsr", TRCOSLSR},
-  {"trcpdsr", TRCPDSR},
-  {"trcdevaff0", TRCDEVAFF0},
-  {"trcdevaff1", TRCDEVAFF1},
-  {"trclsr", TRCLSR},
-  {"trcauthstatus", TRCAUTHSTATUS},
-  {"trcdevarch", TRCDEVARCH},
-  {"trcdevid", TRCDEVID},
-  {"trcdevtype", TRCDEVTYPE},
-  {"trcpidr4", TRCPIDR4},
-  {"trcpidr5", TRCPIDR5},
-  {"trcpidr6", TRCPIDR6},
-  {"trcpidr7", TRCPIDR7},
-  {"trcpidr0", TRCPIDR0},
-  {"trcpidr1", TRCPIDR1},
-  {"trcpidr2", TRCPIDR2},
-  {"trcpidr3", TRCPIDR3},
-  {"trccidr0", TRCCIDR0},
-  {"trccidr1", TRCCIDR1},
-  {"trccidr2", TRCCIDR2},
-  {"trccidr3", TRCCIDR3},
-
-  // GICv3 registers
-  {"icc_iar1_el1", ICC_IAR1_EL1},
-  {"icc_iar0_el1", ICC_IAR0_EL1},
-  {"icc_hppir1_el1", ICC_HPPIR1_EL1},
-  {"icc_hppir0_el1", ICC_HPPIR0_EL1},
-  {"icc_rpr_el1", ICC_RPR_EL1},
-  {"ich_vtr_el2", ICH_VTR_EL2},
-  {"ich_eisr_el2", ICH_EISR_EL2},
-  {"ich_elsr_el2", ICH_ELSR_EL2}
-};
-
-A64SysReg::MRSMapper::MRSMapper() {
-    InstPairs = &MRSPairs[0];
-    NumInstPairs = llvm::array_lengthof(MRSPairs);
-}
-
-const NamedImmMapper::Mapping A64SysReg::MSRMapper::MSRPairs[] = {
-  {"dbgdtrtx_el0", DBGDTRTX_EL0},
-  {"oslar_el1", OSLAR_EL1},
-  {"pmswinc_el0", PMSWINC_EL0},
-
-  // Trace registers
-  {"trcoslar", TRCOSLAR},
-  {"trclar", TRCLAR},
-
-  // GICv3 registers
-  {"icc_eoir1_el1", ICC_EOIR1_EL1},
-  {"icc_eoir0_el1", ICC_EOIR0_EL1},
-  {"icc_dir_el1", ICC_DIR_EL1},
-  {"icc_sgi1r_el1", ICC_SGI1R_EL1},
-  {"icc_asgi1r_el1", ICC_ASGI1R_EL1},
-  {"icc_sgi0r_el1", ICC_SGI0R_EL1}
-};
-
-A64SysReg::MSRMapper::MSRMapper() {
-    InstPairs = &MSRPairs[0];
-    NumInstPairs = llvm::array_lengthof(MSRPairs);
-}
-
-
-const NamedImmMapper::Mapping A64SysReg::SysRegMapper::SysRegPairs[] = {
-  {"osdtrrx_el1", OSDTRRX_EL1},
-  {"osdtrtx_el1",  OSDTRTX_EL1},
-  {"teecr32_el1", TEECR32_EL1},
-  {"mdccint_el1", MDCCINT_EL1},
-  {"mdscr_el1", MDSCR_EL1},
-  {"dbgdtr_el0", DBGDTR_EL0},
-  {"oseccr_el1", OSECCR_EL1},
-  {"dbgvcr32_el2", DBGVCR32_EL2},
-  {"dbgbvr0_el1", DBGBVR0_EL1},
-  {"dbgbvr1_el1", DBGBVR1_EL1},
-  {"dbgbvr2_el1", DBGBVR2_EL1},
-  {"dbgbvr3_el1", DBGBVR3_EL1},
-  {"dbgbvr4_el1", DBGBVR4_EL1},
-  {"dbgbvr5_el1", DBGBVR5_EL1},
-  {"dbgbvr6_el1", DBGBVR6_EL1},
-  {"dbgbvr7_el1", DBGBVR7_EL1},
-  {"dbgbvr8_el1", DBGBVR8_EL1},
-  {"dbgbvr9_el1", DBGBVR9_EL1},
-  {"dbgbvr10_el1", DBGBVR10_EL1},
-  {"dbgbvr11_el1", DBGBVR11_EL1},
-  {"dbgbvr12_el1", DBGBVR12_EL1},
-  {"dbgbvr13_el1", DBGBVR13_EL1},
-  {"dbgbvr14_el1", DBGBVR14_EL1},
-  {"dbgbvr15_el1", DBGBVR15_EL1},
-  {"dbgbcr0_el1", DBGBCR0_EL1},
-  {"dbgbcr1_el1", DBGBCR1_EL1},
-  {"dbgbcr2_el1", DBGBCR2_EL1},
-  {"dbgbcr3_el1", DBGBCR3_EL1},
-  {"dbgbcr4_el1", DBGBCR4_EL1},
-  {"dbgbcr5_el1", DBGBCR5_EL1},
-  {"dbgbcr6_el1", DBGBCR6_EL1},
-  {"dbgbcr7_el1", DBGBCR7_EL1},
-  {"dbgbcr8_el1", DBGBCR8_EL1},
-  {"dbgbcr9_el1", DBGBCR9_EL1},
-  {"dbgbcr10_el1", DBGBCR10_EL1},
-  {"dbgbcr11_el1", DBGBCR11_EL1},
-  {"dbgbcr12_el1", DBGBCR12_EL1},
-  {"dbgbcr13_el1", DBGBCR13_EL1},
-  {"dbgbcr14_el1", DBGBCR14_EL1},
-  {"dbgbcr15_el1", DBGBCR15_EL1},
-  {"dbgwvr0_el1", DBGWVR0_EL1},
-  {"dbgwvr1_el1", DBGWVR1_EL1},
-  {"dbgwvr2_el1", DBGWVR2_EL1},
-  {"dbgwvr3_el1", DBGWVR3_EL1},
-  {"dbgwvr4_el1", DBGWVR4_EL1},
-  {"dbgwvr5_el1", DBGWVR5_EL1},
-  {"dbgwvr6_el1", DBGWVR6_EL1},
-  {"dbgwvr7_el1", DBGWVR7_EL1},
-  {"dbgwvr8_el1", DBGWVR8_EL1},
-  {"dbgwvr9_el1", DBGWVR9_EL1},
-  {"dbgwvr10_el1", DBGWVR10_EL1},
-  {"dbgwvr11_el1", DBGWVR11_EL1},
-  {"dbgwvr12_el1", DBGWVR12_EL1},
-  {"dbgwvr13_el1", DBGWVR13_EL1},
-  {"dbgwvr14_el1", DBGWVR14_EL1},
-  {"dbgwvr15_el1", DBGWVR15_EL1},
-  {"dbgwcr0_el1", DBGWCR0_EL1},
-  {"dbgwcr1_el1", DBGWCR1_EL1},
-  {"dbgwcr2_el1", DBGWCR2_EL1},
-  {"dbgwcr3_el1", DBGWCR3_EL1},
-  {"dbgwcr4_el1", DBGWCR4_EL1},
-  {"dbgwcr5_el1", DBGWCR5_EL1},
-  {"dbgwcr6_el1", DBGWCR6_EL1},
-  {"dbgwcr7_el1", DBGWCR7_EL1},
-  {"dbgwcr8_el1", DBGWCR8_EL1},
-  {"dbgwcr9_el1", DBGWCR9_EL1},
-  {"dbgwcr10_el1", DBGWCR10_EL1},
-  {"dbgwcr11_el1", DBGWCR11_EL1},
-  {"dbgwcr12_el1", DBGWCR12_EL1},
-  {"dbgwcr13_el1", DBGWCR13_EL1},
-  {"dbgwcr14_el1", DBGWCR14_EL1},
-  {"dbgwcr15_el1", DBGWCR15_EL1},
-  {"teehbr32_el1", TEEHBR32_EL1},
-  {"osdlr_el1", OSDLR_EL1},
-  {"dbgprcr_el1", DBGPRCR_EL1},
-  {"dbgclaimset_el1", DBGCLAIMSET_EL1},
-  {"dbgclaimclr_el1", DBGCLAIMCLR_EL1},
-  {"csselr_el1", CSSELR_EL1},
-  {"vpidr_el2", VPIDR_EL2},
-  {"vmpidr_el2", VMPIDR_EL2},
-  {"sctlr_el1", SCTLR_EL1},
-  {"sctlr_el2", SCTLR_EL2},
-  {"sctlr_el3", SCTLR_EL3},
-  {"actlr_el1", ACTLR_EL1},
-  {"actlr_el2", ACTLR_EL2},
-  {"actlr_el3", ACTLR_EL3},
-  {"cpacr_el1", CPACR_EL1},
-  {"hcr_el2", HCR_EL2},
-  {"scr_el3", SCR_EL3},
-  {"mdcr_el2", MDCR_EL2},
-  {"sder32_el3", SDER32_EL3},
-  {"cptr_el2", CPTR_EL2},
-  {"cptr_el3", CPTR_EL3},
-  {"hstr_el2", HSTR_EL2},
-  {"hacr_el2", HACR_EL2},
-  {"mdcr_el3", MDCR_EL3},
-  {"ttbr0_el1", TTBR0_EL1},
-  {"ttbr0_el2", TTBR0_EL2},
-  {"ttbr0_el3", TTBR0_EL3},
-  {"ttbr1_el1", TTBR1_EL1},
-  {"tcr_el1", TCR_EL1},
-  {"tcr_el2", TCR_EL2},
-  {"tcr_el3", TCR_EL3},
-  {"vttbr_el2", VTTBR_EL2},
-  {"vtcr_el2", VTCR_EL2},
-  {"dacr32_el2", DACR32_EL2},
-  {"spsr_el1", SPSR_EL1},
-  {"spsr_el2", SPSR_EL2},
-  {"spsr_el3", SPSR_EL3},
-  {"elr_el1", ELR_EL1},
-  {"elr_el2", ELR_EL2},
-  {"elr_el3", ELR_EL3},
-  {"sp_el0", SP_EL0},
-  {"sp_el1", SP_EL1},
-  {"sp_el2", SP_EL2},
-  {"spsel", SPSel},
-  {"nzcv", NZCV},
-  {"daif", DAIF},
-  {"currentel", CurrentEL},
-  {"spsr_irq", SPSR_irq},
-  {"spsr_abt", SPSR_abt},
-  {"spsr_und", SPSR_und},
-  {"spsr_fiq", SPSR_fiq},
-  {"fpcr", FPCR},
-  {"fpsr", FPSR},
-  {"dspsr_el0", DSPSR_EL0},
-  {"dlr_el0", DLR_EL0},
-  {"ifsr32_el2", IFSR32_EL2},
-  {"afsr0_el1", AFSR0_EL1},
-  {"afsr0_el2", AFSR0_EL2},
-  {"afsr0_el3", AFSR0_EL3},
-  {"afsr1_el1", AFSR1_EL1},
-  {"afsr1_el2", AFSR1_EL2},
-  {"afsr1_el3", AFSR1_EL3},
-  {"esr_el1", ESR_EL1},
-  {"esr_el2", ESR_EL2},
-  {"esr_el3", ESR_EL3},
-  {"fpexc32_el2", FPEXC32_EL2},
-  {"far_el1", FAR_EL1},
-  {"far_el2", FAR_EL2},
-  {"far_el3", FAR_EL3},
-  {"hpfar_el2", HPFAR_EL2},
-  {"par_el1", PAR_EL1},
-  {"pmcr_el0", PMCR_EL0},
-  {"pmcntenset_el0", PMCNTENSET_EL0},
-  {"pmcntenclr_el0", PMCNTENCLR_EL0},
-  {"pmovsclr_el0", PMOVSCLR_EL0},
-  {"pmselr_el0", PMSELR_EL0},
-  {"pmccntr_el0", PMCCNTR_EL0},
-  {"pmxevtyper_el0", PMXEVTYPER_EL0},
-  {"pmxevcntr_el0", PMXEVCNTR_EL0},
-  {"pmuserenr_el0", PMUSERENR_EL0},
-  {"pmintenset_el1", PMINTENSET_EL1},
-  {"pmintenclr_el1", PMINTENCLR_EL1},
-  {"pmovsset_el0", PMOVSSET_EL0},
-  {"mair_el1", MAIR_EL1},
-  {"mair_el2", MAIR_EL2},
-  {"mair_el3", MAIR_EL3},
-  {"amair_el1", AMAIR_EL1},
-  {"amair_el2", AMAIR_EL2},
-  {"amair_el3", AMAIR_EL3},
-  {"vbar_el1", VBAR_EL1},
-  {"vbar_el2", VBAR_EL2},
-  {"vbar_el3", VBAR_EL3},
-  {"rmr_el1", RMR_EL1},
-  {"rmr_el2", RMR_EL2},
-  {"rmr_el3", RMR_EL3},
-  {"contextidr_el1", CONTEXTIDR_EL1},
-  {"tpidr_el0", TPIDR_EL0},
-  {"tpidr_el2", TPIDR_EL2},
-  {"tpidr_el3", TPIDR_EL3},
-  {"tpidrro_el0", TPIDRRO_EL0},
-  {"tpidr_el1", TPIDR_EL1},
-  {"cntfrq_el0", CNTFRQ_EL0},
-  {"cntvoff_el2", CNTVOFF_EL2},
-  {"cntkctl_el1", CNTKCTL_EL1},
-  {"cnthctl_el2", CNTHCTL_EL2},
-  {"cntp_tval_el0", CNTP_TVAL_EL0},
-  {"cnthp_tval_el2", CNTHP_TVAL_EL2},
-  {"cntps_tval_el1", CNTPS_TVAL_EL1},
-  {"cntp_ctl_el0", CNTP_CTL_EL0},
-  {"cnthp_ctl_el2", CNTHP_CTL_EL2},
-  {"cntps_ctl_el1", CNTPS_CTL_EL1},
-  {"cntp_cval_el0", CNTP_CVAL_EL0},
-  {"cnthp_cval_el2", CNTHP_CVAL_EL2},
-  {"cntps_cval_el1", CNTPS_CVAL_EL1},
-  {"cntv_tval_el0", CNTV_TVAL_EL0},
-  {"cntv_ctl_el0", CNTV_CTL_EL0},
-  {"cntv_cval_el0", CNTV_CVAL_EL0},
-  {"pmevcntr0_el0", PMEVCNTR0_EL0},
-  {"pmevcntr1_el0", PMEVCNTR1_EL0},
-  {"pmevcntr2_el0", PMEVCNTR2_EL0},
-  {"pmevcntr3_el0", PMEVCNTR3_EL0},
-  {"pmevcntr4_el0", PMEVCNTR4_EL0},
-  {"pmevcntr5_el0", PMEVCNTR5_EL0},
-  {"pmevcntr6_el0", PMEVCNTR6_EL0},
-  {"pmevcntr7_el0", PMEVCNTR7_EL0},
-  {"pmevcntr8_el0", PMEVCNTR8_EL0},
-  {"pmevcntr9_el0", PMEVCNTR9_EL0},
-  {"pmevcntr10_el0", PMEVCNTR10_EL0},
-  {"pmevcntr11_el0", PMEVCNTR11_EL0},
-  {"pmevcntr12_el0", PMEVCNTR12_EL0},
-  {"pmevcntr13_el0", PMEVCNTR13_EL0},
-  {"pmevcntr14_el0", PMEVCNTR14_EL0},
-  {"pmevcntr15_el0", PMEVCNTR15_EL0},
-  {"pmevcntr16_el0", PMEVCNTR16_EL0},
-  {"pmevcntr17_el0", PMEVCNTR17_EL0},
-  {"pmevcntr18_el0", PMEVCNTR18_EL0},
-  {"pmevcntr19_el0", PMEVCNTR19_EL0},
-  {"pmevcntr20_el0", PMEVCNTR20_EL0},
-  {"pmevcntr21_el0", PMEVCNTR21_EL0},
-  {"pmevcntr22_el0", PMEVCNTR22_EL0},
-  {"pmevcntr23_el0", PMEVCNTR23_EL0},
-  {"pmevcntr24_el0", PMEVCNTR24_EL0},
-  {"pmevcntr25_el0", PMEVCNTR25_EL0},
-  {"pmevcntr26_el0", PMEVCNTR26_EL0},
-  {"pmevcntr27_el0", PMEVCNTR27_EL0},
-  {"pmevcntr28_el0", PMEVCNTR28_EL0},
-  {"pmevcntr29_el0", PMEVCNTR29_EL0},
-  {"pmevcntr30_el0", PMEVCNTR30_EL0},
-  {"pmccfiltr_el0", PMCCFILTR_EL0},
-  {"pmevtyper0_el0", PMEVTYPER0_EL0},
-  {"pmevtyper1_el0", PMEVTYPER1_EL0},
-  {"pmevtyper2_el0", PMEVTYPER2_EL0},
-  {"pmevtyper3_el0", PMEVTYPER3_EL0},
-  {"pmevtyper4_el0", PMEVTYPER4_EL0},
-  {"pmevtyper5_el0", PMEVTYPER5_EL0},
-  {"pmevtyper6_el0", PMEVTYPER6_EL0},
-  {"pmevtyper7_el0", PMEVTYPER7_EL0},
-  {"pmevtyper8_el0", PMEVTYPER8_EL0},
-  {"pmevtyper9_el0", PMEVTYPER9_EL0},
-  {"pmevtyper10_el0", PMEVTYPER10_EL0},
-  {"pmevtyper11_el0", PMEVTYPER11_EL0},
-  {"pmevtyper12_el0", PMEVTYPER12_EL0},
-  {"pmevtyper13_el0", PMEVTYPER13_EL0},
-  {"pmevtyper14_el0", PMEVTYPER14_EL0},
-  {"pmevtyper15_el0", PMEVTYPER15_EL0},
-  {"pmevtyper16_el0", PMEVTYPER16_EL0},
-  {"pmevtyper17_el0", PMEVTYPER17_EL0},
-  {"pmevtyper18_el0", PMEVTYPER18_EL0},
-  {"pmevtyper19_el0", PMEVTYPER19_EL0},
-  {"pmevtyper20_el0", PMEVTYPER20_EL0},
-  {"pmevtyper21_el0", PMEVTYPER21_EL0},
-  {"pmevtyper22_el0", PMEVTYPER22_EL0},
-  {"pmevtyper23_el0", PMEVTYPER23_EL0},
-  {"pmevtyper24_el0", PMEVTYPER24_EL0},
-  {"pmevtyper25_el0", PMEVTYPER25_EL0},
-  {"pmevtyper26_el0", PMEVTYPER26_EL0},
-  {"pmevtyper27_el0", PMEVTYPER27_EL0},
-  {"pmevtyper28_el0", PMEVTYPER28_EL0},
-  {"pmevtyper29_el0", PMEVTYPER29_EL0},
-  {"pmevtyper30_el0", PMEVTYPER30_EL0},
-
-  // Trace registers
-  {"trcprgctlr", TRCPRGCTLR},
-  {"trcprocselr", TRCPROCSELR},
-  {"trcconfigr", TRCCONFIGR},
-  {"trcauxctlr", TRCAUXCTLR},
-  {"trceventctl0r", TRCEVENTCTL0R},
-  {"trceventctl1r", TRCEVENTCTL1R},
-  {"trcstallctlr", TRCSTALLCTLR},
-  {"trctsctlr", TRCTSCTLR},
-  {"trcsyncpr", TRCSYNCPR},
-  {"trcccctlr", TRCCCCTLR},
-  {"trcbbctlr", TRCBBCTLR},
-  {"trctraceidr", TRCTRACEIDR},
-  {"trcqctlr", TRCQCTLR},
-  {"trcvictlr", TRCVICTLR},
-  {"trcviiectlr", TRCVIIECTLR},
-  {"trcvissctlr", TRCVISSCTLR},
-  {"trcvipcssctlr", TRCVIPCSSCTLR},
-  {"trcvdctlr", TRCVDCTLR},
-  {"trcvdsacctlr", TRCVDSACCTLR},
-  {"trcvdarcctlr", TRCVDARCCTLR},
-  {"trcseqevr0", TRCSEQEVR0},
-  {"trcseqevr1", TRCSEQEVR1},
-  {"trcseqevr2", TRCSEQEVR2},
-  {"trcseqrstevr", TRCSEQRSTEVR},
-  {"trcseqstr", TRCSEQSTR},
-  {"trcextinselr", TRCEXTINSELR},
-  {"trccntrldvr0", TRCCNTRLDVR0},
-  {"trccntrldvr1", TRCCNTRLDVR1},
-  {"trccntrldvr2", TRCCNTRLDVR2},
-  {"trccntrldvr3", TRCCNTRLDVR3},
-  {"trccntctlr0", TRCCNTCTLR0},
-  {"trccntctlr1", TRCCNTCTLR1},
-  {"trccntctlr2", TRCCNTCTLR2},
-  {"trccntctlr3", TRCCNTCTLR3},
-  {"trccntvr0", TRCCNTVR0},
-  {"trccntvr1", TRCCNTVR1},
-  {"trccntvr2", TRCCNTVR2},
-  {"trccntvr3", TRCCNTVR3},
-  {"trcimspec0", TRCIMSPEC0},
-  {"trcimspec1", TRCIMSPEC1},
-  {"trcimspec2", TRCIMSPEC2},
-  {"trcimspec3", TRCIMSPEC3},
-  {"trcimspec4", TRCIMSPEC4},
-  {"trcimspec5", TRCIMSPEC5},
-  {"trcimspec6", TRCIMSPEC6},
-  {"trcimspec7", TRCIMSPEC7},
-  {"trcrsctlr2", TRCRSCTLR2},
-  {"trcrsctlr3", TRCRSCTLR3},
-  {"trcrsctlr4", TRCRSCTLR4},
-  {"trcrsctlr5", TRCRSCTLR5},
-  {"trcrsctlr6", TRCRSCTLR6},
-  {"trcrsctlr7", TRCRSCTLR7},
-  {"trcrsctlr8", TRCRSCTLR8},
-  {"trcrsctlr9", TRCRSCTLR9},
-  {"trcrsctlr10", TRCRSCTLR10},
-  {"trcrsctlr11", TRCRSCTLR11},
-  {"trcrsctlr12", TRCRSCTLR12},
-  {"trcrsctlr13", TRCRSCTLR13},
-  {"trcrsctlr14", TRCRSCTLR14},
-  {"trcrsctlr15", TRCRSCTLR15},
-  {"trcrsctlr16", TRCRSCTLR16},
-  {"trcrsctlr17", TRCRSCTLR17},
-  {"trcrsctlr18", TRCRSCTLR18},
-  {"trcrsctlr19", TRCRSCTLR19},
-  {"trcrsctlr20", TRCRSCTLR20},
-  {"trcrsctlr21", TRCRSCTLR21},
-  {"trcrsctlr22", TRCRSCTLR22},
-  {"trcrsctlr23", TRCRSCTLR23},
-  {"trcrsctlr24", TRCRSCTLR24},
-  {"trcrsctlr25", TRCRSCTLR25},
-  {"trcrsctlr26", TRCRSCTLR26},
-  {"trcrsctlr27", TRCRSCTLR27},
-  {"trcrsctlr28", TRCRSCTLR28},
-  {"trcrsctlr29", TRCRSCTLR29},
-  {"trcrsctlr30", TRCRSCTLR30},
-  {"trcrsctlr31", TRCRSCTLR31},
-  {"trcssccr0", TRCSSCCR0},
-  {"trcssccr1", TRCSSCCR1},
-  {"trcssccr2", TRCSSCCR2},
-  {"trcssccr3", TRCSSCCR3},
-  {"trcssccr4", TRCSSCCR4},
-  {"trcssccr5", TRCSSCCR5},
-  {"trcssccr6", TRCSSCCR6},
-  {"trcssccr7", TRCSSCCR7},
-  {"trcsscsr0", TRCSSCSR0},
-  {"trcsscsr1", TRCSSCSR1},
-  {"trcsscsr2", TRCSSCSR2},
-  {"trcsscsr3", TRCSSCSR3},
-  {"trcsscsr4", TRCSSCSR4},
-  {"trcsscsr5", TRCSSCSR5},
-  {"trcsscsr6", TRCSSCSR6},
-  {"trcsscsr7", TRCSSCSR7},
-  {"trcsspcicr0", TRCSSPCICR0},
-  {"trcsspcicr1", TRCSSPCICR1},
-  {"trcsspcicr2", TRCSSPCICR2},
-  {"trcsspcicr3", TRCSSPCICR3},
-  {"trcsspcicr4", TRCSSPCICR4},
-  {"trcsspcicr5", TRCSSPCICR5},
-  {"trcsspcicr6", TRCSSPCICR6},
-  {"trcsspcicr7", TRCSSPCICR7},
-  {"trcpdcr", TRCPDCR},
-  {"trcacvr0", TRCACVR0},
-  {"trcacvr1", TRCACVR1},
-  {"trcacvr2", TRCACVR2},
-  {"trcacvr3", TRCACVR3},
-  {"trcacvr4", TRCACVR4},
-  {"trcacvr5", TRCACVR5},
-  {"trcacvr6", TRCACVR6},
-  {"trcacvr7", TRCACVR7},
-  {"trcacvr8", TRCACVR8},
-  {"trcacvr9", TRCACVR9},
-  {"trcacvr10", TRCACVR10},
-  {"trcacvr11", TRCACVR11},
-  {"trcacvr12", TRCACVR12},
-  {"trcacvr13", TRCACVR13},
-  {"trcacvr14", TRCACVR14},
-  {"trcacvr15", TRCACVR15},
-  {"trcacatr0", TRCACATR0},
-  {"trcacatr1", TRCACATR1},
-  {"trcacatr2", TRCACATR2},
-  {"trcacatr3", TRCACATR3},
-  {"trcacatr4", TRCACATR4},
-  {"trcacatr5", TRCACATR5},
-  {"trcacatr6", TRCACATR6},
-  {"trcacatr7", TRCACATR7},
-  {"trcacatr8", TRCACATR8},
-  {"trcacatr9", TRCACATR9},
-  {"trcacatr10", TRCACATR10},
-  {"trcacatr11", TRCACATR11},
-  {"trcacatr12", TRCACATR12},
-  {"trcacatr13", TRCACATR13},
-  {"trcacatr14", TRCACATR14},
-  {"trcacatr15", TRCACATR15},
-  {"trcdvcvr0", TRCDVCVR0},
-  {"trcdvcvr1", TRCDVCVR1},
-  {"trcdvcvr2", TRCDVCVR2},
-  {"trcdvcvr3", TRCDVCVR3},
-  {"trcdvcvr4", TRCDVCVR4},
-  {"trcdvcvr5", TRCDVCVR5},
-  {"trcdvcvr6", TRCDVCVR6},
-  {"trcdvcvr7", TRCDVCVR7},
-  {"trcdvcmr0", TRCDVCMR0},
-  {"trcdvcmr1", TRCDVCMR1},
-  {"trcdvcmr2", TRCDVCMR2},
-  {"trcdvcmr3", TRCDVCMR3},
-  {"trcdvcmr4", TRCDVCMR4},
-  {"trcdvcmr5", TRCDVCMR5},
-  {"trcdvcmr6", TRCDVCMR6},
-  {"trcdvcmr7", TRCDVCMR7},
-  {"trccidcvr0", TRCCIDCVR0},
-  {"trccidcvr1", TRCCIDCVR1},
-  {"trccidcvr2", TRCCIDCVR2},
-  {"trccidcvr3", TRCCIDCVR3},
-  {"trccidcvr4", TRCCIDCVR4},
-  {"trccidcvr5", TRCCIDCVR5},
-  {"trccidcvr6", TRCCIDCVR6},
-  {"trccidcvr7", TRCCIDCVR7},
-  {"trcvmidcvr0", TRCVMIDCVR0},
-  {"trcvmidcvr1", TRCVMIDCVR1},
-  {"trcvmidcvr2", TRCVMIDCVR2},
-  {"trcvmidcvr3", TRCVMIDCVR3},
-  {"trcvmidcvr4", TRCVMIDCVR4},
-  {"trcvmidcvr5", TRCVMIDCVR5},
-  {"trcvmidcvr6", TRCVMIDCVR6},
-  {"trcvmidcvr7", TRCVMIDCVR7},
-  {"trccidcctlr0", TRCCIDCCTLR0},
-  {"trccidcctlr1", TRCCIDCCTLR1},
-  {"trcvmidcctlr0", TRCVMIDCCTLR0},
-  {"trcvmidcctlr1", TRCVMIDCCTLR1},
-  {"trcitctrl", TRCITCTRL},
-  {"trcclaimset", TRCCLAIMSET},
-  {"trcclaimclr", TRCCLAIMCLR},
-
-  // GICv3 registers
-  {"icc_bpr1_el1", ICC_BPR1_EL1},
-  {"icc_bpr0_el1", ICC_BPR0_EL1},
-  {"icc_pmr_el1", ICC_PMR_EL1},
-  {"icc_ctlr_el1", ICC_CTLR_EL1},
-  {"icc_ctlr_el3", ICC_CTLR_EL3},
-  {"icc_sre_el1", ICC_SRE_EL1},
-  {"icc_sre_el2", ICC_SRE_EL2},
-  {"icc_sre_el3", ICC_SRE_EL3},
-  {"icc_igrpen0_el1", ICC_IGRPEN0_EL1},
-  {"icc_igrpen1_el1", ICC_IGRPEN1_EL1},
-  {"icc_igrpen1_el3", ICC_IGRPEN1_EL3},
-  {"icc_seien_el1", ICC_SEIEN_EL1},
-  {"icc_ap0r0_el1", ICC_AP0R0_EL1},
-  {"icc_ap0r1_el1", ICC_AP0R1_EL1},
-  {"icc_ap0r2_el1", ICC_AP0R2_EL1},
-  {"icc_ap0r3_el1", ICC_AP0R3_EL1},
-  {"icc_ap1r0_el1", ICC_AP1R0_EL1},
-  {"icc_ap1r1_el1", ICC_AP1R1_EL1},
-  {"icc_ap1r2_el1", ICC_AP1R2_EL1},
-  {"icc_ap1r3_el1", ICC_AP1R3_EL1},
-  {"ich_ap0r0_el2", ICH_AP0R0_EL2},
-  {"ich_ap0r1_el2", ICH_AP0R1_EL2},
-  {"ich_ap0r2_el2", ICH_AP0R2_EL2},
-  {"ich_ap0r3_el2", ICH_AP0R3_EL2},
-  {"ich_ap1r0_el2", ICH_AP1R0_EL2},
-  {"ich_ap1r1_el2", ICH_AP1R1_EL2},
-  {"ich_ap1r2_el2", ICH_AP1R2_EL2},
-  {"ich_ap1r3_el2", ICH_AP1R3_EL2},
-  {"ich_hcr_el2", ICH_HCR_EL2},
-  {"ich_misr_el2", ICH_MISR_EL2},
-  {"ich_vmcr_el2", ICH_VMCR_EL2},
-  {"ich_vseir_el2", ICH_VSEIR_EL2},
-  {"ich_lr0_el2", ICH_LR0_EL2},
-  {"ich_lr1_el2", ICH_LR1_EL2},
-  {"ich_lr2_el2", ICH_LR2_EL2},
-  {"ich_lr3_el2", ICH_LR3_EL2},
-  {"ich_lr4_el2", ICH_LR4_EL2},
-  {"ich_lr5_el2", ICH_LR5_EL2},
-  {"ich_lr6_el2", ICH_LR6_EL2},
-  {"ich_lr7_el2", ICH_LR7_EL2},
-  {"ich_lr8_el2", ICH_LR8_EL2},
-  {"ich_lr9_el2", ICH_LR9_EL2},
-  {"ich_lr10_el2", ICH_LR10_EL2},
-  {"ich_lr11_el2", ICH_LR11_EL2},
-  {"ich_lr12_el2", ICH_LR12_EL2},
-  {"ich_lr13_el2", ICH_LR13_EL2},
-  {"ich_lr14_el2", ICH_LR14_EL2},
-  {"ich_lr15_el2", ICH_LR15_EL2}
-};
-
-uint32_t
-A64SysReg::SysRegMapper::fromString(StringRef Name, bool &Valid) const {
-  // First search the registers shared by all
-  std::string NameLower = Name.lower();
-  for (unsigned i = 0; i < array_lengthof(SysRegPairs); ++i) {
-    if (SysRegPairs[i].Name == NameLower) {
-      Valid = true;
-      return SysRegPairs[i].Value;
-    }
-  }
-
-  // Now try the instruction-specific registers (either read-only or
-  // write-only).
-  for (unsigned i = 0; i < NumInstPairs; ++i) {
-    if (InstPairs[i].Name == NameLower) {
-      Valid = true;
-      return InstPairs[i].Value;
-    }
-  }
-
-  // Try to parse an S<op0>_<op1>_<Cn>_<Cm>_<op2> register name, where the bits
-  // are: 11 xxx 1x11 xxxx xxx
-  Regex GenericRegPattern("^s3_([0-7])_c(1[15])_c([0-9]|1[0-5])_([0-7])$");
-
-  SmallVector<StringRef, 4> Ops;
-  if (!GenericRegPattern.match(NameLower, &Ops)) {
-    Valid = false;
-    return -1;
-  }
-
-  uint32_t Op0 = 3, Op1 = 0, CRn = 0, CRm = 0, Op2 = 0;
-  uint32_t Bits;
-  Ops[1].getAsInteger(10, Op1);
-  Ops[2].getAsInteger(10, CRn);
-  Ops[3].getAsInteger(10, CRm);
-  Ops[4].getAsInteger(10, Op2);
-  Bits = (Op0 << 14) | (Op1 << 11) | (CRn << 7) | (CRm << 3) | Op2;
-
-  Valid = true;
-  return Bits;
-}
-
-std::string
-A64SysReg::SysRegMapper::toString(uint32_t Bits, bool &Valid) const {
-  for (unsigned i = 0; i < array_lengthof(SysRegPairs); ++i) {
-    if (SysRegPairs[i].Value == Bits) {
-      Valid = true;
-      return SysRegPairs[i].Name;
-    }
-  }
-
-  for (unsigned i = 0; i < NumInstPairs; ++i) {
-    if (InstPairs[i].Value == Bits) {
-      Valid = true;
-      return InstPairs[i].Name;
-    }
-  }
-
-  uint32_t Op0 = (Bits >> 14) & 0x3;
-  uint32_t Op1 = (Bits >> 11) & 0x7;
-  uint32_t CRn = (Bits >> 7) & 0xf;
-  uint32_t CRm = (Bits >> 3) & 0xf;
-  uint32_t Op2 = Bits & 0x7;
-
-  // Only combinations matching: 11 xxx 1x11 xxxx xxx are valid for a generic
-  // name.
-  if (Op0 != 3 || (CRn != 11 && CRn != 15)) {
-      Valid = false;
-      return "";
-  }
-
-  assert(Op0 == 3 && (CRn == 11 || CRn == 15) && "Invalid generic sysreg");
-
-  Valid = true;
-  return "s3_" + utostr(Op1) + "_c" + utostr(CRn)
-               + "_c" + utostr(CRm) + "_" + utostr(Op2);
-}
-
-const NamedImmMapper::Mapping A64TLBI::TLBIMapper::TLBIPairs[] = {
-  {"ipas2e1is", IPAS2E1IS},
-  {"ipas2le1is", IPAS2LE1IS},
-  {"vmalle1is", VMALLE1IS},
-  {"alle2is", ALLE2IS},
-  {"alle3is", ALLE3IS},
-  {"vae1is", VAE1IS},
-  {"vae2is", VAE2IS},
-  {"vae3is", VAE3IS},
-  {"aside1is", ASIDE1IS},
-  {"vaae1is", VAAE1IS},
-  {"alle1is", ALLE1IS},
-  {"vale1is", VALE1IS},
-  {"vale2is", VALE2IS},
-  {"vale3is", VALE3IS},
-  {"vmalls12e1is", VMALLS12E1IS},
-  {"vaale1is", VAALE1IS},
-  {"ipas2e1", IPAS2E1},
-  {"ipas2le1", IPAS2LE1},
-  {"vmalle1", VMALLE1},
-  {"alle2", ALLE2},
-  {"alle3", ALLE3},
-  {"vae1", VAE1},
-  {"vae2", VAE2},
-  {"vae3", VAE3},
-  {"aside1", ASIDE1},
-  {"vaae1", VAAE1},
-  {"alle1", ALLE1},
-  {"vale1", VALE1},
-  {"vale2", VALE2},
-  {"vale3", VALE3},
-  {"vmalls12e1", VMALLS12E1},
-  {"vaale1", VAALE1}
-};
-
-A64TLBI::TLBIMapper::TLBIMapper()
-  : NamedImmMapper(TLBIPairs, 0) {}
-
-bool A64Imms::isFPImm(const APFloat &Val, uint32_t &Imm8Bits) {
-  const fltSemantics &Sem = Val.getSemantics();
-  unsigned FracBits = APFloat::semanticsPrecision(Sem) - 1;
-
-  uint32_t ExpMask;
-  switch (FracBits) {
-  case 10: // IEEE half-precision
-    ExpMask = 0x1f;
-    break;
-  case 23: // IEEE single-precision
-    ExpMask = 0xff;
-    break;
-  case 52: // IEEE double-precision
-    ExpMask = 0x7ff;
-    break;
-  case 112: // IEEE quad-precision
-    // No immediates are valid for double precision.
-    return false;
-  default:
-    llvm_unreachable("Only half, single and double precision supported");
-  }
-
-  uint32_t ExpStart = FracBits;
-  uint64_t FracMask = (1ULL << FracBits) - 1;
-
-  uint32_t Sign = Val.isNegative();
-
-  uint64_t Bits= Val.bitcastToAPInt().getLimitedValue();
-  uint64_t Fraction = Bits & FracMask;
-  int32_t Exponent = ((Bits >> ExpStart) & ExpMask);
-  Exponent -= ExpMask >> 1;
-
-  // S[d] = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>, 5):imm8<5:0>:Zeros(19)
-  // D[d] = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>, 8):imm8<5:0>:Zeros(48)
-  // This translates to: only 4 bits of fraction; -3 <= exp <= 4.
-  uint64_t A64FracStart = FracBits - 4;
-  uint64_t A64FracMask = 0xf;
-
-  // Are there too many fraction bits?
-  if (Fraction & ~(A64FracMask << A64FracStart))
-    return false;
-
-  if (Exponent < -3 || Exponent > 4)
-    return false;
-
-  uint32_t PackedFraction = (Fraction >> A64FracStart) & A64FracMask;
-  uint32_t PackedExp = (Exponent + 7) & 0x7;
-
-  Imm8Bits = (Sign << 7) | (PackedExp << 4) | PackedFraction;
-  return true;
-}
-
-// Encoding of the immediate for logical (immediate) instructions:
-//
-// | N | imms   | immr   | size | R            | S            |
-// |---+--------+--------+------+--------------+--------------|
-// | 1 | ssssss | rrrrrr |   64 | UInt(rrrrrr) | UInt(ssssss) |
-// | 0 | 0sssss | xrrrrr |   32 | UInt(rrrrr)  | UInt(sssss)  |
-// | 0 | 10ssss | xxrrrr |   16 | UInt(rrrr)   | UInt(ssss)   |
-// | 0 | 110sss | xxxrrr |    8 | UInt(rrr)    | UInt(sss)    |
-// | 0 | 1110ss | xxxxrr |    4 | UInt(rr)     | UInt(ss)     |
-// | 0 | 11110s | xxxxxr |    2 | UInt(r)      | UInt(s)      |
-// | 0 | 11111x | -      |      | UNALLOCATED  |              |
-//
-// Columns 'R', 'S' and 'size' specify a "bitmask immediate" of size bits in
-// which the lower S+1 bits are ones and the remaining bits are zero, then
-// rotated right by R bits, which is then replicated across the datapath.
-//
-// + Values of 'N', 'imms' and 'immr' which do not match the above table are
-//   RESERVED.
-// + If all 's' bits in the imms field are set then the instruction is
-//   RESERVED.
-// + The 'x' bits in the 'immr' field are IGNORED.
-
-bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) {
-  int RepeatWidth;
-  int Rotation = 0;
-  int Num1s = 0;
-
-  // Because there are S+1 ones in the replicated mask, an immediate of all
-  // zeros is not allowed. Filtering it here is probably more efficient.
-  if (Imm == 0) return false;
-
-  for (RepeatWidth = RegWidth; RepeatWidth > 1; RepeatWidth /= 2) {
-    uint64_t RepeatMask = RepeatWidth == 64 ? -1 : (1ULL << RepeatWidth) - 1;
-    uint64_t ReplicatedMask = Imm & RepeatMask;
-
-    if (ReplicatedMask == 0) continue;
-
-    // First we have to make sure the mask is actually repeated in each slot for
-    // this width-specifier.
-    bool IsReplicatedMask = true;
-    for (unsigned i = RepeatWidth; i < RegWidth; i += RepeatWidth) {
-      if (((Imm >> i) & RepeatMask) != ReplicatedMask) {
-        IsReplicatedMask = false;
-        break;
-      }
-    }
-    if (!IsReplicatedMask) continue;
-
-    // Now we have to work out the amount of rotation needed. The first part of
-    // this calculation is actually independent of RepeatWidth, but the complex
-    // case will depend on it.
-    Rotation = countTrailingZeros(Imm);
-    if (Rotation == 0) {
-      // There were no leading zeros, which means it's either in place or there
-      // are 1s at each end (e.g. 0x8003 needs rotating).
-      Rotation = RegWidth == 64 ? CountLeadingOnes_64(Imm)
-                                : CountLeadingOnes_32(Imm);
-      Rotation = RepeatWidth - Rotation;
-    }
-
-    uint64_t ReplicatedOnes = ReplicatedMask;
-    if (Rotation != 0 && Rotation != 64)
-      ReplicatedOnes = (ReplicatedMask >> Rotation)
-        | ((ReplicatedMask << (RepeatWidth - Rotation)) & RepeatMask);
-
-    // Of course, they may not actually be ones, so we have to check that:
-    if (!isMask_64(ReplicatedOnes))
-      continue;
-
-    Num1s = CountTrailingOnes_64(ReplicatedOnes);
-
-    // We know we've got an almost valid encoding (certainly, if this is invalid
-    // no other parameters would work).
-    break;
-  }
-
-  // The encodings which would produce all 1s are RESERVED.
-  if (RepeatWidth == 1 || Num1s == RepeatWidth) return false;
-
-  uint32_t N = RepeatWidth == 64;
-  uint32_t ImmR = RepeatWidth - Rotation;
-  uint32_t ImmS = Num1s - 1;
-
-  switch (RepeatWidth) {
-  default: break; // No action required for other valid rotations.
-  case 16: ImmS |= 0x20; break; // 10ssss
-  case 8: ImmS |= 0x30; break;  // 110sss
-  case 4: ImmS |= 0x38; break;  // 1110ss
-  case 2: ImmS |= 0x3c; break;  // 11110s
-  }
-
-  Bits = ImmS | (ImmR << 6) | (N << 12);
-
-  return true;
-}
-
-
-bool A64Imms::isLogicalImmBits(unsigned RegWidth, uint32_t Bits,
-                               uint64_t &Imm) {
-  uint32_t N = Bits >> 12;
-  uint32_t ImmR = (Bits >> 6) & 0x3f;
-  uint32_t ImmS = Bits & 0x3f;
-
-  // N=1 encodes a 64-bit replication and is invalid for the 32-bit
-  // instructions.
-  if (RegWidth == 32 && N != 0) return false;
-
-  int Width = 0;
-  if (N == 1)
-    Width = 64;
-  else if ((ImmS & 0x20) == 0)
-    Width = 32;
-  else if ((ImmS & 0x10) == 0)
-    Width = 16;
-  else if ((ImmS & 0x08) == 0)
-    Width = 8;
-  else if ((ImmS & 0x04) == 0)
-    Width = 4;
-  else if ((ImmS & 0x02) == 0)
-    Width = 2;
-  else {
-    // ImmS  is 0b11111x: UNALLOCATED
-    return false;
-  }
-
-  int Num1s = (ImmS & (Width - 1)) + 1;
-
-  // All encodings which would map to -1 (signed) are RESERVED.
-  if (Num1s == Width) return false;
-
-  int Rotation = (ImmR & (Width - 1));
-  uint64_t Mask = (1ULL << Num1s) - 1;
-  uint64_t WidthMask = Width == 64 ? -1 : (1ULL << Width) - 1;
-  if (Rotation != 0 && Rotation != 64)
-    Mask = (Mask >> Rotation)
-      | ((Mask << (Width - Rotation)) & WidthMask);
-
-  Imm = Mask;
-  for (unsigned i = 1; i < RegWidth / Width; ++i) {
-    Mask <<= Width;
-    Imm |= Mask;
-  }
-
-  return true;
-}
-
-bool A64Imms::isMOVZImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift) {
-  // If high bits are set then a 32-bit MOVZ can't possibly work.
-  if (RegWidth == 32 && (Value & ~0xffffffffULL))
-    return false;
-
-  for (int i = 0; i < RegWidth; i += 16) {
-    // If the value is 0 when we mask out all the bits that could be set with
-    // the current LSL value then it's representable.
-    if ((Value & ~(0xffffULL << i)) == 0) {
-      Shift = i / 16;
-      UImm16 = (Value >> i) & 0xffff;
-      return true;
-    }
-  }
-  return false;
-}
-
-bool A64Imms::isMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift) {
-  // MOVN is defined to set its register to NOT(LSL(imm16, shift)).
-
-  // We have to be a little careful about a 32-bit register: 0xffff_1234 *is*
-  // representable, but ~0xffff_1234 == 0xffff_ffff_0000_edcb which is not
-  // a valid input for isMOVZImm.
-  if (RegWidth == 32 && (Value & ~0xffffffffULL))
-    return false;
-
-  uint64_t MOVZEquivalent = RegWidth == 32 ? ~Value & 0xffffffff : ~Value;
-
-  return isMOVZImm(RegWidth, MOVZEquivalent, UImm16, Shift);
-}
-
-bool A64Imms::isOnlyMOVNImm(int RegWidth, uint64_t Value,
-                            int &UImm16, int &Shift) {
-  if (isMOVZImm(RegWidth, Value, UImm16, Shift))
-    return false;
-
-  return isMOVNImm(RegWidth, Value, UImm16, Shift);
-}
-
-// decodeNeonModShiftImm - Decode a Neon OpCmode value into the
-// the shift amount and the shift type (shift zeros or ones in) and
-// returns whether the OpCmode value implies a shift operation.
-bool A64Imms::decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm,
-                                    unsigned &ShiftOnesIn) {
-  ShiftImm = 0;
-  ShiftOnesIn = false;
-  bool HasShift = true;
-
-  if (OpCmode == 0xe) {
-    // movi byte
-    HasShift = false;
-  } else if (OpCmode == 0x1e) {
-    // movi 64-bit bytemask
-    HasShift = false;
-  } else if ((OpCmode & 0xc) == 0x8) {
-    // shift zeros, per halfword
-    ShiftImm = ((OpCmode & 0x2) >> 1);
-  } else if ((OpCmode & 0x8) == 0) {
-    // shift zeros, per word
-    ShiftImm = ((OpCmode & 0x6) >> 1);
-  } else if ((OpCmode & 0xe) == 0xc) {
-    // shift ones, per word
-    ShiftOnesIn = true;
-    ShiftImm = (OpCmode & 0x1);
-  } else {
-    // per byte, per bytemask
-    llvm_unreachable("Unsupported Neon modified immediate");
-  }
-
-  return HasShift;
-}
-
-// decodeNeonModImm - Decode a NEON modified immediate and OpCmode values
-// into the element value and the element size in bits.
-uint64_t A64Imms::decodeNeonModImm(unsigned Val, unsigned OpCmode,
-                                   unsigned &EltBits) {
-  uint64_t DecodedVal = Val;
-  EltBits = 0;
-
-  if (OpCmode == 0xe) {
-    // movi byte
-    EltBits = 8;
-  } else if (OpCmode == 0x1e) {
-    // movi 64-bit bytemask
-    DecodedVal = 0;
-    for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
-      if ((Val >> ByteNum) & 1)
-        DecodedVal |= (uint64_t)0xff << (8 * ByteNum);
-    }
-    EltBits = 64;
-  } else if ((OpCmode & 0xc) == 0x8) {
-    // shift zeros, per halfword
-    EltBits = 16;
-  } else if ((OpCmode & 0x8) == 0) {
-    // shift zeros, per word
-    EltBits = 32;
-  } else if ((OpCmode & 0xe) == 0xc) {
-    // shift ones, per word
-    EltBits = 32;
-  } else {
-    llvm_unreachable("Unsupported Neon modified immediate");
-  }
-  return DecodedVal;
-}

Removed: llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.h?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.h (original)
+++ llvm/trunk/lib/Target/AArch64/Utils/AArch64BaseInfo.h (removed)
@@ -1,1138 +0,0 @@
-//===-- AArch64BaseInfo.h - Top level definitions for AArch64- --*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains small standalone helper functions and enum definitions for
-// the AArch64 target useful for the compiler back-end and the MC libraries.
-// As such, it deliberately does not include references to LLVM core
-// code gen types, passes, etc..
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AARCH64_BASEINFO_H
-#define LLVM_AARCH64_BASEINFO_H
-
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/ErrorHandling.h"
-
-namespace llvm {
-
-// // Enums corresponding to AArch64 condition codes
-namespace A64CC {
-  // The CondCodes constants map directly to the 4-bit encoding of the
-  // condition field for predicated instructions.
-  enum CondCodes {   // Meaning (integer)          Meaning (floating-point)
-    EQ = 0,        // Equal                      Equal
-    NE,            // Not equal                  Not equal, or unordered
-    HS,            // Unsigned higher or same    >, ==, or unordered
-    LO,            // Unsigned lower or same     Less than
-    MI,            // Minus, negative            Less than
-    PL,            // Plus, positive or zero     >, ==, or unordered
-    VS,            // Overflow                   Unordered
-    VC,            // No overflow                Ordered
-    HI,            // Unsigned higher            Greater than, or unordered
-    LS,            // Unsigned lower or same     Less than or equal
-    GE,            // Greater than or equal      Greater than or equal
-    LT,            // Less than                  Less than, or unordered
-    GT,            // Signed greater than        Greater than
-    LE,            // Signed less than or equal  <, ==, or unordered
-    AL,            // Always (unconditional)     Always (unconditional)
-    NV,             // Always (unconditional)     Always (unconditional)
-    // Note the NV exists purely to disassemble 0b1111. Execution
-    // is "always".
-    Invalid
-  };
-
-} // namespace A64CC
-
-inline static const char *A64CondCodeToString(A64CC::CondCodes CC) {
-  switch (CC) {
-  default: llvm_unreachable("Unknown condition code");
-  case A64CC::EQ:  return "eq";
-  case A64CC::NE:  return "ne";
-  case A64CC::HS:  return "hs";
-  case A64CC::LO:  return "lo";
-  case A64CC::MI:  return "mi";
-  case A64CC::PL:  return "pl";
-  case A64CC::VS:  return "vs";
-  case A64CC::VC:  return "vc";
-  case A64CC::HI:  return "hi";
-  case A64CC::LS:  return "ls";
-  case A64CC::GE:  return "ge";
-  case A64CC::LT:  return "lt";
-  case A64CC::GT:  return "gt";
-  case A64CC::LE:  return "le";
-  case A64CC::AL:  return "al";
-  case A64CC::NV:  return "nv";
-  }
-}
-
-inline static A64CC::CondCodes A64StringToCondCode(StringRef CondStr) {
-  return StringSwitch<A64CC::CondCodes>(CondStr.lower())
-             .Case("eq", A64CC::EQ)
-             .Case("ne", A64CC::NE)
-             .Case("ne", A64CC::NE)
-             .Case("hs", A64CC::HS)
-             .Case("cs", A64CC::HS)
-             .Case("lo", A64CC::LO)
-             .Case("cc", A64CC::LO)
-             .Case("mi", A64CC::MI)
-             .Case("pl", A64CC::PL)
-             .Case("vs", A64CC::VS)
-             .Case("vc", A64CC::VC)
-             .Case("hi", A64CC::HI)
-             .Case("ls", A64CC::LS)
-             .Case("ge", A64CC::GE)
-             .Case("lt", A64CC::LT)
-             .Case("gt", A64CC::GT)
-             .Case("le", A64CC::LE)
-             .Case("al", A64CC::AL)
-             .Case("nv", A64CC::NV)
-             .Default(A64CC::Invalid);
-}
-
-inline static A64CC::CondCodes A64InvertCondCode(A64CC::CondCodes CC) {
-  // It turns out that the condition codes have been designed so that in order
-  // to reverse the intent of the condition you only have to invert the low bit:
-
-  return static_cast<A64CC::CondCodes>(static_cast<unsigned>(CC) ^ 0x1);
-}
-
-/// Instances of this class can perform bidirectional mapping from random
-/// identifier strings to operand encodings. For example "MSR" takes a named
-/// system-register which must be encoded somehow and decoded for printing. This
-/// central location means that the information for those transformations is not
-/// duplicated and remains in sync.
-///
-/// FIXME: currently the algorithm is a completely unoptimised linear
-/// search. Obviously this could be improved, but we would probably want to work
-/// out just how often these instructions are emitted before working on it. It
-/// might even be optimal to just reorder the tables for the common instructions
-/// rather than changing the algorithm.
-struct NamedImmMapper {
-  struct Mapping {
-    const char *Name;
-    uint32_t Value;
-  };
-
-  template<int N>
-  NamedImmMapper(const Mapping (&Pairs)[N], uint32_t TooBigImm)
-    : Pairs(&Pairs[0]), NumPairs(N), TooBigImm(TooBigImm) {}
-
-  StringRef toString(uint32_t Value, bool &Valid) const;
-  uint32_t fromString(StringRef Name, bool &Valid) const;
-
-  /// Many of the instructions allow an alternative assembly form consisting of
-  /// a simple immediate. Currently the only valid forms are ranges [0, N) where
-  /// N being 0 indicates no immediate syntax-form is allowed.
-  bool validImm(uint32_t Value) const;
-protected:
-  const Mapping *Pairs;
-  size_t NumPairs;
-  uint32_t TooBigImm;
-};
-
-namespace A64AT {
-  enum ATValues {
-    Invalid = -1,    // Op0 Op1  CRn   CRm   Op2
-    S1E1R = 0x43c0,  // 01  000  0111  1000  000
-    S1E2R = 0x63c0,  // 01  100  0111  1000  000
-    S1E3R = 0x73c0,  // 01  110  0111  1000  000
-    S1E1W = 0x43c1,  // 01  000  0111  1000  001
-    S1E2W = 0x63c1,  // 01  100  0111  1000  001
-    S1E3W = 0x73c1,  // 01  110  0111  1000  001
-    S1E0R = 0x43c2,  // 01  000  0111  1000  010
-    S1E0W = 0x43c3,  // 01  000  0111  1000  011
-    S12E1R = 0x63c4, // 01  100  0111  1000  100
-    S12E1W = 0x63c5, // 01  100  0111  1000  101
-    S12E0R = 0x63c6, // 01  100  0111  1000  110
-    S12E0W = 0x63c7  // 01  100  0111  1000  111
-  };
-
-  struct ATMapper : NamedImmMapper {
-    const static Mapping ATPairs[];
-
-    ATMapper();
-  };
-
-}
-namespace A64DB {
-  enum DBValues {
-    Invalid = -1,
-    OSHLD = 0x1,
-    OSHST = 0x2,
-    OSH =   0x3,
-    NSHLD = 0x5,
-    NSHST = 0x6,
-    NSH =   0x7,
-    ISHLD = 0x9,
-    ISHST = 0xa,
-    ISH =   0xb,
-    LD =    0xd,
-    ST =    0xe,
-    SY =    0xf
-  };
-
-  struct DBarrierMapper : NamedImmMapper {
-    const static Mapping DBarrierPairs[];
-
-    DBarrierMapper();
-  };
-}
-
-namespace  A64DC {
-  enum DCValues {
-    Invalid = -1,   // Op1  CRn   CRm   Op2
-    ZVA   = 0x5ba1, // 01  011  0111  0100  001
-    IVAC  = 0x43b1, // 01  000  0111  0110  001
-    ISW   = 0x43b2, // 01  000  0111  0110  010
-    CVAC  = 0x5bd1, // 01  011  0111  1010  001
-    CSW   = 0x43d2, // 01  000  0111  1010  010
-    CVAU  = 0x5bd9, // 01  011  0111  1011  001
-    CIVAC = 0x5bf1, // 01  011  0111  1110  001
-    CISW  = 0x43f2  // 01  000  0111  1110  010
-  };
-
-  struct DCMapper : NamedImmMapper {
-    const static Mapping DCPairs[];
-
-    DCMapper();
-  };
-
-}
-
-namespace  A64IC {
-  enum ICValues {
-    Invalid = -1,     // Op1  CRn   CRm   Op2
-    IALLUIS = 0x0388, // 000  0111  0001  000
-    IALLU = 0x03a8,   // 000  0111  0101  000
-    IVAU = 0x1ba9     // 011  0111  0101  001
-  };
-
-
-  struct ICMapper : NamedImmMapper {
-    const static Mapping ICPairs[];
-
-    ICMapper();
-  };
-
-  static inline bool NeedsRegister(ICValues Val) {
-    return Val == IVAU;
-  }
-}
-
-namespace  A64ISB {
-  enum ISBValues {
-    Invalid = -1,
-    SY = 0xf
-  };
-  struct ISBMapper : NamedImmMapper {
-    const static Mapping ISBPairs[];
-
-    ISBMapper();
-  };
-}
-
-namespace A64PRFM {
-  enum PRFMValues {
-    Invalid = -1,
-    PLDL1KEEP = 0x00,
-    PLDL1STRM = 0x01,
-    PLDL2KEEP = 0x02,
-    PLDL2STRM = 0x03,
-    PLDL3KEEP = 0x04,
-    PLDL3STRM = 0x05,
-    PLIL1KEEP = 0x08,
-    PLIL1STRM = 0x09,
-    PLIL2KEEP = 0x0a,
-    PLIL2STRM = 0x0b,
-    PLIL3KEEP = 0x0c,
-    PLIL3STRM = 0x0d,
-    PSTL1KEEP = 0x10,
-    PSTL1STRM = 0x11,
-    PSTL2KEEP = 0x12,
-    PSTL2STRM = 0x13,
-    PSTL3KEEP = 0x14,
-    PSTL3STRM = 0x15
-  };
-
-  struct PRFMMapper : NamedImmMapper {
-    const static Mapping PRFMPairs[];
-
-    PRFMMapper();
-  };
-}
-
-namespace A64PState {
-  enum PStateValues {
-    Invalid = -1,
-    SPSel = 0x05,
-    DAIFSet = 0x1e,
-    DAIFClr = 0x1f
-  };
-
-  struct PStateMapper : NamedImmMapper {
-    const static Mapping PStatePairs[];
-
-    PStateMapper();
-  };
-
-}
-
-namespace A64SE {
-    enum ShiftExtSpecifiers {
-        Invalid = -1,
-        LSL,
-        MSL,
-        LSR,
-        ASR,
-        ROR,
-
-        UXTB,
-        UXTH,
-        UXTW,
-        UXTX,
-
-        SXTB,
-        SXTH,
-        SXTW,
-        SXTX
-    };
-}
-
-namespace A64Layout {
-    enum VectorLayout {
-        Invalid = -1,
-        VL_8B,
-        VL_4H,
-        VL_2S,
-        VL_1D,
-
-        VL_16B,
-        VL_8H,
-        VL_4S,
-        VL_2D,
-
-        // Bare layout for the 128-bit vector
-        // (only show ".b", ".h", ".s", ".d" without vector number)
-        VL_B,
-        VL_H,
-        VL_S,
-        VL_D
-    };
-}
-
-inline static const char *
-A64VectorLayoutToString(A64Layout::VectorLayout Layout) {
-  switch (Layout) {
-  case A64Layout::VL_8B:  return ".8b";
-  case A64Layout::VL_4H:  return ".4h";
-  case A64Layout::VL_2S:  return ".2s";
-  case A64Layout::VL_1D:  return ".1d";
-  case A64Layout::VL_16B:  return ".16b";
-  case A64Layout::VL_8H:  return ".8h";
-  case A64Layout::VL_4S:  return ".4s";
-  case A64Layout::VL_2D:  return ".2d";
-  case A64Layout::VL_B:  return ".b";
-  case A64Layout::VL_H:  return ".h";
-  case A64Layout::VL_S:  return ".s";
-  case A64Layout::VL_D:  return ".d";
-  default: llvm_unreachable("Unknown Vector Layout");
-  }
-}
-
-inline static A64Layout::VectorLayout
-A64StringToVectorLayout(StringRef LayoutStr) {
-  return StringSwitch<A64Layout::VectorLayout>(LayoutStr)
-             .Case(".8b", A64Layout::VL_8B)
-             .Case(".4h", A64Layout::VL_4H)
-             .Case(".2s", A64Layout::VL_2S)
-             .Case(".1d", A64Layout::VL_1D)
-             .Case(".16b", A64Layout::VL_16B)
-             .Case(".8h", A64Layout::VL_8H)
-             .Case(".4s", A64Layout::VL_4S)
-             .Case(".2d", A64Layout::VL_2D)
-             .Case(".b", A64Layout::VL_B)
-             .Case(".h", A64Layout::VL_H)
-             .Case(".s", A64Layout::VL_S)
-             .Case(".d", A64Layout::VL_D)
-             .Default(A64Layout::Invalid);
-}
-
-namespace A64SysReg {
-  enum SysRegROValues {
-    MDCCSR_EL0        = 0x9808, // 10  011  0000  0001  000
-    DBGDTRRX_EL0      = 0x9828, // 10  011  0000  0101  000
-    MDRAR_EL1         = 0x8080, // 10  000  0001  0000  000
-    OSLSR_EL1         = 0x808c, // 10  000  0001  0001  100
-    DBGAUTHSTATUS_EL1 = 0x83f6, // 10  000  0111  1110  110
-    PMCEID0_EL0       = 0xdce6, // 11  011  1001  1100  110
-    PMCEID1_EL0       = 0xdce7, // 11  011  1001  1100  111
-    MIDR_EL1          = 0xc000, // 11  000  0000  0000  000
-    CCSIDR_EL1        = 0xc800, // 11  001  0000  0000  000
-    CLIDR_EL1         = 0xc801, // 11  001  0000  0000  001
-    CTR_EL0           = 0xd801, // 11  011  0000  0000  001
-    MPIDR_EL1         = 0xc005, // 11  000  0000  0000  101
-    REVIDR_EL1        = 0xc006, // 11  000  0000  0000  110
-    AIDR_EL1          = 0xc807, // 11  001  0000  0000  111
-    DCZID_EL0         = 0xd807, // 11  011  0000  0000  111
-    ID_PFR0_EL1       = 0xc008, // 11  000  0000  0001  000
-    ID_PFR1_EL1       = 0xc009, // 11  000  0000  0001  001
-    ID_DFR0_EL1       = 0xc00a, // 11  000  0000  0001  010
-    ID_AFR0_EL1       = 0xc00b, // 11  000  0000  0001  011
-    ID_MMFR0_EL1      = 0xc00c, // 11  000  0000  0001  100
-    ID_MMFR1_EL1      = 0xc00d, // 11  000  0000  0001  101
-    ID_MMFR2_EL1      = 0xc00e, // 11  000  0000  0001  110
-    ID_MMFR3_EL1      = 0xc00f, // 11  000  0000  0001  111
-    ID_ISAR0_EL1      = 0xc010, // 11  000  0000  0010  000
-    ID_ISAR1_EL1      = 0xc011, // 11  000  0000  0010  001
-    ID_ISAR2_EL1      = 0xc012, // 11  000  0000  0010  010
-    ID_ISAR3_EL1      = 0xc013, // 11  000  0000  0010  011
-    ID_ISAR4_EL1      = 0xc014, // 11  000  0000  0010  100
-    ID_ISAR5_EL1      = 0xc015, // 11  000  0000  0010  101
-    ID_AA64PFR0_EL1   = 0xc020, // 11  000  0000  0100  000
-    ID_AA64PFR1_EL1   = 0xc021, // 11  000  0000  0100  001
-    ID_AA64DFR0_EL1   = 0xc028, // 11  000  0000  0101  000
-    ID_AA64DFR1_EL1   = 0xc029, // 11  000  0000  0101  001
-    ID_AA64AFR0_EL1   = 0xc02c, // 11  000  0000  0101  100
-    ID_AA64AFR1_EL1   = 0xc02d, // 11  000  0000  0101  101
-    ID_AA64ISAR0_EL1  = 0xc030, // 11  000  0000  0110  000
-    ID_AA64ISAR1_EL1  = 0xc031, // 11  000  0000  0110  001
-    ID_AA64MMFR0_EL1  = 0xc038, // 11  000  0000  0111  000
-    ID_AA64MMFR1_EL1  = 0xc039, // 11  000  0000  0111  001
-    MVFR0_EL1         = 0xc018, // 11  000  0000  0011  000
-    MVFR1_EL1         = 0xc019, // 11  000  0000  0011  001
-    MVFR2_EL1         = 0xc01a, // 11  000  0000  0011  010
-    RVBAR_EL1         = 0xc601, // 11  000  1100  0000  001
-    RVBAR_EL2         = 0xe601, // 11  100  1100  0000  001
-    RVBAR_EL3         = 0xf601, // 11  110  1100  0000  001
-    ISR_EL1           = 0xc608, // 11  000  1100  0001  000
-    CNTPCT_EL0        = 0xdf01, // 11  011  1110  0000  001
-    CNTVCT_EL0        = 0xdf02,  // 11  011  1110  0000  010
-
-    // Trace registers
-    TRCSTATR          = 0x8818, // 10  001  0000  0011  000
-    TRCIDR8           = 0x8806, // 10  001  0000  0000  110
-    TRCIDR9           = 0x880e, // 10  001  0000  0001  110
-    TRCIDR10          = 0x8816, // 10  001  0000  0010  110
-    TRCIDR11          = 0x881e, // 10  001  0000  0011  110
-    TRCIDR12          = 0x8826, // 10  001  0000  0100  110
-    TRCIDR13          = 0x882e, // 10  001  0000  0101  110
-    TRCIDR0           = 0x8847, // 10  001  0000  1000  111
-    TRCIDR1           = 0x884f, // 10  001  0000  1001  111
-    TRCIDR2           = 0x8857, // 10  001  0000  1010  111
-    TRCIDR3           = 0x885f, // 10  001  0000  1011  111
-    TRCIDR4           = 0x8867, // 10  001  0000  1100  111
-    TRCIDR5           = 0x886f, // 10  001  0000  1101  111
-    TRCIDR6           = 0x8877, // 10  001  0000  1110  111
-    TRCIDR7           = 0x887f, // 10  001  0000  1111  111
-    TRCOSLSR          = 0x888c, // 10  001  0001  0001  100
-    TRCPDSR           = 0x88ac, // 10  001  0001  0101  100
-    TRCDEVAFF0        = 0x8bd6, // 10  001  0111  1010  110
-    TRCDEVAFF1        = 0x8bde, // 10  001  0111  1011  110
-    TRCLSR            = 0x8bee, // 10  001  0111  1101  110
-    TRCAUTHSTATUS     = 0x8bf6, // 10  001  0111  1110  110
-    TRCDEVARCH        = 0x8bfe, // 10  001  0111  1111  110
-    TRCDEVID          = 0x8b97, // 10  001  0111  0010  111
-    TRCDEVTYPE        = 0x8b9f, // 10  001  0111  0011  111
-    TRCPIDR4          = 0x8ba7, // 10  001  0111  0100  111
-    TRCPIDR5          = 0x8baf, // 10  001  0111  0101  111
-    TRCPIDR6          = 0x8bb7, // 10  001  0111  0110  111
-    TRCPIDR7          = 0x8bbf, // 10  001  0111  0111  111
-    TRCPIDR0          = 0x8bc7, // 10  001  0111  1000  111
-    TRCPIDR1          = 0x8bcf, // 10  001  0111  1001  111
-    TRCPIDR2          = 0x8bd7, // 10  001  0111  1010  111
-    TRCPIDR3          = 0x8bdf, // 10  001  0111  1011  111
-    TRCCIDR0          = 0x8be7, // 10  001  0111  1100  111
-    TRCCIDR1          = 0x8bef, // 10  001  0111  1101  111
-    TRCCIDR2          = 0x8bf7, // 10  001  0111  1110  111
-    TRCCIDR3          = 0x8bff, // 10  001  0111  1111  111
-
-    // GICv3 registers
-    ICC_IAR1_EL1      = 0xc660, // 11  000  1100  1100  000
-    ICC_IAR0_EL1      = 0xc640, // 11  000  1100  1000  000
-    ICC_HPPIR1_EL1    = 0xc662, // 11  000  1100  1100  010
-    ICC_HPPIR0_EL1    = 0xc642, // 11  000  1100  1000  010
-    ICC_RPR_EL1       = 0xc65b, // 11  000  1100  1011  011
-    ICH_VTR_EL2       = 0xe659, // 11  100  1100  1011  001
-    ICH_EISR_EL2      = 0xe65b, // 11  100  1100  1011  011
-    ICH_ELSR_EL2      = 0xe65d  // 11  100  1100  1011  101
-  };
-
-  enum SysRegWOValues {
-    DBGDTRTX_EL0      = 0x9828, // 10  011  0000  0101  000
-    OSLAR_EL1         = 0x8084, // 10  000  0001  0000  100
-    PMSWINC_EL0       = 0xdce4,  // 11  011  1001  1100  100
-
-    // Trace Registers
-    TRCOSLAR          = 0x8884, // 10  001  0001  0000  100
-    TRCLAR            = 0x8be6, // 10  001  0111  1100  110
-
-    // GICv3 registers
-    ICC_EOIR1_EL1     = 0xc661, // 11  000  1100  1100  001
-    ICC_EOIR0_EL1     = 0xc641, // 11  000  1100  1000  001
-    ICC_DIR_EL1       = 0xc659, // 11  000  1100  1011  001
-    ICC_SGI1R_EL1     = 0xc65d, // 11  000  1100  1011  101
-    ICC_ASGI1R_EL1    = 0xc65e, // 11  000  1100  1011  110
-    ICC_SGI0R_EL1     = 0xc65f  // 11  000  1100  1011  111
-  };
-
-  enum SysRegValues {
-    Invalid = -1,               // Op0 Op1  CRn   CRm   Op2
-    OSDTRRX_EL1       = 0x8002, // 10  000  0000  0000  010
-    OSDTRTX_EL1       = 0x801a, // 10  000  0000  0011  010
-    TEECR32_EL1       = 0x9000, // 10  010  0000  0000  000
-    MDCCINT_EL1       = 0x8010, // 10  000  0000  0010  000
-    MDSCR_EL1         = 0x8012, // 10  000  0000  0010  010
-    DBGDTR_EL0        = 0x9820, // 10  011  0000  0100  000
-    OSECCR_EL1        = 0x8032, // 10  000  0000  0110  010
-    DBGVCR32_EL2      = 0xa038, // 10  100  0000  0111  000
-    DBGBVR0_EL1       = 0x8004, // 10  000  0000  0000  100
-    DBGBVR1_EL1       = 0x800c, // 10  000  0000  0001  100
-    DBGBVR2_EL1       = 0x8014, // 10  000  0000  0010  100
-    DBGBVR3_EL1       = 0x801c, // 10  000  0000  0011  100
-    DBGBVR4_EL1       = 0x8024, // 10  000  0000  0100  100
-    DBGBVR5_EL1       = 0x802c, // 10  000  0000  0101  100
-    DBGBVR6_EL1       = 0x8034, // 10  000  0000  0110  100
-    DBGBVR7_EL1       = 0x803c, // 10  000  0000  0111  100
-    DBGBVR8_EL1       = 0x8044, // 10  000  0000  1000  100
-    DBGBVR9_EL1       = 0x804c, // 10  000  0000  1001  100
-    DBGBVR10_EL1      = 0x8054, // 10  000  0000  1010  100
-    DBGBVR11_EL1      = 0x805c, // 10  000  0000  1011  100
-    DBGBVR12_EL1      = 0x8064, // 10  000  0000  1100  100
-    DBGBVR13_EL1      = 0x806c, // 10  000  0000  1101  100
-    DBGBVR14_EL1      = 0x8074, // 10  000  0000  1110  100
-    DBGBVR15_EL1      = 0x807c, // 10  000  0000  1111  100
-    DBGBCR0_EL1       = 0x8005, // 10  000  0000  0000  101
-    DBGBCR1_EL1       = 0x800d, // 10  000  0000  0001  101
-    DBGBCR2_EL1       = 0x8015, // 10  000  0000  0010  101
-    DBGBCR3_EL1       = 0x801d, // 10  000  0000  0011  101
-    DBGBCR4_EL1       = 0x8025, // 10  000  0000  0100  101
-    DBGBCR5_EL1       = 0x802d, // 10  000  0000  0101  101
-    DBGBCR6_EL1       = 0x8035, // 10  000  0000  0110  101
-    DBGBCR7_EL1       = 0x803d, // 10  000  0000  0111  101
-    DBGBCR8_EL1       = 0x8045, // 10  000  0000  1000  101
-    DBGBCR9_EL1       = 0x804d, // 10  000  0000  1001  101
-    DBGBCR10_EL1      = 0x8055, // 10  000  0000  1010  101
-    DBGBCR11_EL1      = 0x805d, // 10  000  0000  1011  101
-    DBGBCR12_EL1      = 0x8065, // 10  000  0000  1100  101
-    DBGBCR13_EL1      = 0x806d, // 10  000  0000  1101  101
-    DBGBCR14_EL1      = 0x8075, // 10  000  0000  1110  101
-    DBGBCR15_EL1      = 0x807d, // 10  000  0000  1111  101
-    DBGWVR0_EL1       = 0x8006, // 10  000  0000  0000  110
-    DBGWVR1_EL1       = 0x800e, // 10  000  0000  0001  110
-    DBGWVR2_EL1       = 0x8016, // 10  000  0000  0010  110
-    DBGWVR3_EL1       = 0x801e, // 10  000  0000  0011  110
-    DBGWVR4_EL1       = 0x8026, // 10  000  0000  0100  110
-    DBGWVR5_EL1       = 0x802e, // 10  000  0000  0101  110
-    DBGWVR6_EL1       = 0x8036, // 10  000  0000  0110  110
-    DBGWVR7_EL1       = 0x803e, // 10  000  0000  0111  110
-    DBGWVR8_EL1       = 0x8046, // 10  000  0000  1000  110
-    DBGWVR9_EL1       = 0x804e, // 10  000  0000  1001  110
-    DBGWVR10_EL1      = 0x8056, // 10  000  0000  1010  110
-    DBGWVR11_EL1      = 0x805e, // 10  000  0000  1011  110
-    DBGWVR12_EL1      = 0x8066, // 10  000  0000  1100  110
-    DBGWVR13_EL1      = 0x806e, // 10  000  0000  1101  110
-    DBGWVR14_EL1      = 0x8076, // 10  000  0000  1110  110
-    DBGWVR15_EL1      = 0x807e, // 10  000  0000  1111  110
-    DBGWCR0_EL1       = 0x8007, // 10  000  0000  0000  111
-    DBGWCR1_EL1       = 0x800f, // 10  000  0000  0001  111
-    DBGWCR2_EL1       = 0x8017, // 10  000  0000  0010  111
-    DBGWCR3_EL1       = 0x801f, // 10  000  0000  0011  111
-    DBGWCR4_EL1       = 0x8027, // 10  000  0000  0100  111
-    DBGWCR5_EL1       = 0x802f, // 10  000  0000  0101  111
-    DBGWCR6_EL1       = 0x8037, // 10  000  0000  0110  111
-    DBGWCR7_EL1       = 0x803f, // 10  000  0000  0111  111
-    DBGWCR8_EL1       = 0x8047, // 10  000  0000  1000  111
-    DBGWCR9_EL1       = 0x804f, // 10  000  0000  1001  111
-    DBGWCR10_EL1      = 0x8057, // 10  000  0000  1010  111
-    DBGWCR11_EL1      = 0x805f, // 10  000  0000  1011  111
-    DBGWCR12_EL1      = 0x8067, // 10  000  0000  1100  111
-    DBGWCR13_EL1      = 0x806f, // 10  000  0000  1101  111
-    DBGWCR14_EL1      = 0x8077, // 10  000  0000  1110  111
-    DBGWCR15_EL1      = 0x807f, // 10  000  0000  1111  111
-    TEEHBR32_EL1      = 0x9080, // 10  010  0001  0000  000
-    OSDLR_EL1         = 0x809c, // 10  000  0001  0011  100
-    DBGPRCR_EL1       = 0x80a4, // 10  000  0001  0100  100
-    DBGCLAIMSET_EL1   = 0x83c6, // 10  000  0111  1000  110
-    DBGCLAIMCLR_EL1   = 0x83ce, // 10  000  0111  1001  110
-    CSSELR_EL1        = 0xd000, // 11  010  0000  0000  000
-    VPIDR_EL2         = 0xe000, // 11  100  0000  0000  000
-    VMPIDR_EL2        = 0xe005, // 11  100  0000  0000  101
-    CPACR_EL1         = 0xc082, // 11  000  0001  0000  010
-    SCTLR_EL1         = 0xc080, // 11  000  0001  0000  000
-    SCTLR_EL2         = 0xe080, // 11  100  0001  0000  000
-    SCTLR_EL3         = 0xf080, // 11  110  0001  0000  000
-    ACTLR_EL1         = 0xc081, // 11  000  0001  0000  001
-    ACTLR_EL2         = 0xe081, // 11  100  0001  0000  001
-    ACTLR_EL3         = 0xf081, // 11  110  0001  0000  001
-    HCR_EL2           = 0xe088, // 11  100  0001  0001  000
-    SCR_EL3           = 0xf088, // 11  110  0001  0001  000
-    MDCR_EL2          = 0xe089, // 11  100  0001  0001  001
-    SDER32_EL3        = 0xf089, // 11  110  0001  0001  001
-    CPTR_EL2          = 0xe08a, // 11  100  0001  0001  010
-    CPTR_EL3          = 0xf08a, // 11  110  0001  0001  010
-    HSTR_EL2          = 0xe08b, // 11  100  0001  0001  011
-    HACR_EL2          = 0xe08f, // 11  100  0001  0001  111
-    MDCR_EL3          = 0xf099, // 11  110  0001  0011  001
-    TTBR0_EL1         = 0xc100, // 11  000  0010  0000  000
-    TTBR0_EL2         = 0xe100, // 11  100  0010  0000  000
-    TTBR0_EL3         = 0xf100, // 11  110  0010  0000  000
-    TTBR1_EL1         = 0xc101, // 11  000  0010  0000  001
-    TCR_EL1           = 0xc102, // 11  000  0010  0000  010
-    TCR_EL2           = 0xe102, // 11  100  0010  0000  010
-    TCR_EL3           = 0xf102, // 11  110  0010  0000  010
-    VTTBR_EL2         = 0xe108, // 11  100  0010  0001  000
-    VTCR_EL2          = 0xe10a, // 11  100  0010  0001  010
-    DACR32_EL2        = 0xe180, // 11  100  0011  0000  000
-    SPSR_EL1          = 0xc200, // 11  000  0100  0000  000
-    SPSR_EL2          = 0xe200, // 11  100  0100  0000  000
-    SPSR_EL3          = 0xf200, // 11  110  0100  0000  000
-    ELR_EL1           = 0xc201, // 11  000  0100  0000  001
-    ELR_EL2           = 0xe201, // 11  100  0100  0000  001
-    ELR_EL3           = 0xf201, // 11  110  0100  0000  001
-    SP_EL0            = 0xc208, // 11  000  0100  0001  000
-    SP_EL1            = 0xe208, // 11  100  0100  0001  000
-    SP_EL2            = 0xf208, // 11  110  0100  0001  000
-    SPSel             = 0xc210, // 11  000  0100  0010  000
-    NZCV              = 0xda10, // 11  011  0100  0010  000
-    DAIF              = 0xda11, // 11  011  0100  0010  001
-    CurrentEL         = 0xc212, // 11  000  0100  0010  010
-    SPSR_irq          = 0xe218, // 11  100  0100  0011  000
-    SPSR_abt          = 0xe219, // 11  100  0100  0011  001
-    SPSR_und          = 0xe21a, // 11  100  0100  0011  010
-    SPSR_fiq          = 0xe21b, // 11  100  0100  0011  011
-    FPCR              = 0xda20, // 11  011  0100  0100  000
-    FPSR              = 0xda21, // 11  011  0100  0100  001
-    DSPSR_EL0         = 0xda28, // 11  011  0100  0101  000
-    DLR_EL0           = 0xda29, // 11  011  0100  0101  001
-    IFSR32_EL2        = 0xe281, // 11  100  0101  0000  001
-    AFSR0_EL1         = 0xc288, // 11  000  0101  0001  000
-    AFSR0_EL2         = 0xe288, // 11  100  0101  0001  000
-    AFSR0_EL3         = 0xf288, // 11  110  0101  0001  000
-    AFSR1_EL1         = 0xc289, // 11  000  0101  0001  001
-    AFSR1_EL2         = 0xe289, // 11  100  0101  0001  001
-    AFSR1_EL3         = 0xf289, // 11  110  0101  0001  001
-    ESR_EL1           = 0xc290, // 11  000  0101  0010  000
-    ESR_EL2           = 0xe290, // 11  100  0101  0010  000
-    ESR_EL3           = 0xf290, // 11  110  0101  0010  000
-    FPEXC32_EL2       = 0xe298, // 11  100  0101  0011  000
-    FAR_EL1           = 0xc300, // 11  000  0110  0000  000
-    FAR_EL2           = 0xe300, // 11  100  0110  0000  000
-    FAR_EL3           = 0xf300, // 11  110  0110  0000  000
-    HPFAR_EL2         = 0xe304, // 11  100  0110  0000  100
-    PAR_EL1           = 0xc3a0, // 11  000  0111  0100  000
-    PMCR_EL0          = 0xdce0, // 11  011  1001  1100  000
-    PMCNTENSET_EL0    = 0xdce1, // 11  011  1001  1100  001
-    PMCNTENCLR_EL0    = 0xdce2, // 11  011  1001  1100  010
-    PMOVSCLR_EL0      = 0xdce3, // 11  011  1001  1100  011
-    PMSELR_EL0        = 0xdce5, // 11  011  1001  1100  101
-    PMCCNTR_EL0       = 0xdce8, // 11  011  1001  1101  000
-    PMXEVTYPER_EL0    = 0xdce9, // 11  011  1001  1101  001
-    PMXEVCNTR_EL0     = 0xdcea, // 11  011  1001  1101  010
-    PMUSERENR_EL0     = 0xdcf0, // 11  011  1001  1110  000
-    PMINTENSET_EL1    = 0xc4f1, // 11  000  1001  1110  001
-    PMINTENCLR_EL1    = 0xc4f2, // 11  000  1001  1110  010
-    PMOVSSET_EL0      = 0xdcf3, // 11  011  1001  1110  011
-    MAIR_EL1          = 0xc510, // 11  000  1010  0010  000
-    MAIR_EL2          = 0xe510, // 11  100  1010  0010  000
-    MAIR_EL3          = 0xf510, // 11  110  1010  0010  000
-    AMAIR_EL1         = 0xc518, // 11  000  1010  0011  000
-    AMAIR_EL2         = 0xe518, // 11  100  1010  0011  000
-    AMAIR_EL3         = 0xf518, // 11  110  1010  0011  000
-    VBAR_EL1          = 0xc600, // 11  000  1100  0000  000
-    VBAR_EL2          = 0xe600, // 11  100  1100  0000  000
-    VBAR_EL3          = 0xf600, // 11  110  1100  0000  000
-    RMR_EL1           = 0xc602, // 11  000  1100  0000  010
-    RMR_EL2           = 0xe602, // 11  100  1100  0000  010
-    RMR_EL3           = 0xf602, // 11  110  1100  0000  010
-    CONTEXTIDR_EL1    = 0xc681, // 11  000  1101  0000  001
-    TPIDR_EL0         = 0xde82, // 11  011  1101  0000  010
-    TPIDR_EL2         = 0xe682, // 11  100  1101  0000  010
-    TPIDR_EL3         = 0xf682, // 11  110  1101  0000  010
-    TPIDRRO_EL0       = 0xde83, // 11  011  1101  0000  011
-    TPIDR_EL1         = 0xc684, // 11  000  1101  0000  100
-    CNTFRQ_EL0        = 0xdf00, // 11  011  1110  0000  000
-    CNTVOFF_EL2       = 0xe703, // 11  100  1110  0000  011
-    CNTKCTL_EL1       = 0xc708, // 11  000  1110  0001  000
-    CNTHCTL_EL2       = 0xe708, // 11  100  1110  0001  000
-    CNTP_TVAL_EL0     = 0xdf10, // 11  011  1110  0010  000
-    CNTHP_TVAL_EL2    = 0xe710, // 11  100  1110  0010  000
-    CNTPS_TVAL_EL1    = 0xff10, // 11  111  1110  0010  000
-    CNTP_CTL_EL0      = 0xdf11, // 11  011  1110  0010  001
-    CNTHP_CTL_EL2     = 0xe711, // 11  100  1110  0010  001
-    CNTPS_CTL_EL1     = 0xff11, // 11  111  1110  0010  001
-    CNTP_CVAL_EL0     = 0xdf12, // 11  011  1110  0010  010
-    CNTHP_CVAL_EL2    = 0xe712, // 11  100  1110  0010  010
-    CNTPS_CVAL_EL1    = 0xff12, // 11  111  1110  0010  010
-    CNTV_TVAL_EL0     = 0xdf18, // 11  011  1110  0011  000
-    CNTV_CTL_EL0      = 0xdf19, // 11  011  1110  0011  001
-    CNTV_CVAL_EL0     = 0xdf1a, // 11  011  1110  0011  010
-    PMEVCNTR0_EL0     = 0xdf40, // 11  011  1110  1000  000
-    PMEVCNTR1_EL0     = 0xdf41, // 11  011  1110  1000  001
-    PMEVCNTR2_EL0     = 0xdf42, // 11  011  1110  1000  010
-    PMEVCNTR3_EL0     = 0xdf43, // 11  011  1110  1000  011
-    PMEVCNTR4_EL0     = 0xdf44, // 11  011  1110  1000  100
-    PMEVCNTR5_EL0     = 0xdf45, // 11  011  1110  1000  101
-    PMEVCNTR6_EL0     = 0xdf46, // 11  011  1110  1000  110
-    PMEVCNTR7_EL0     = 0xdf47, // 11  011  1110  1000  111
-    PMEVCNTR8_EL0     = 0xdf48, // 11  011  1110  1001  000
-    PMEVCNTR9_EL0     = 0xdf49, // 11  011  1110  1001  001
-    PMEVCNTR10_EL0    = 0xdf4a, // 11  011  1110  1001  010
-    PMEVCNTR11_EL0    = 0xdf4b, // 11  011  1110  1001  011
-    PMEVCNTR12_EL0    = 0xdf4c, // 11  011  1110  1001  100
-    PMEVCNTR13_EL0    = 0xdf4d, // 11  011  1110  1001  101
-    PMEVCNTR14_EL0    = 0xdf4e, // 11  011  1110  1001  110
-    PMEVCNTR15_EL0    = 0xdf4f, // 11  011  1110  1001  111
-    PMEVCNTR16_EL0    = 0xdf50, // 11  011  1110  1010  000
-    PMEVCNTR17_EL0    = 0xdf51, // 11  011  1110  1010  001
-    PMEVCNTR18_EL0    = 0xdf52, // 11  011  1110  1010  010
-    PMEVCNTR19_EL0    = 0xdf53, // 11  011  1110  1010  011
-    PMEVCNTR20_EL0    = 0xdf54, // 11  011  1110  1010  100
-    PMEVCNTR21_EL0    = 0xdf55, // 11  011  1110  1010  101
-    PMEVCNTR22_EL0    = 0xdf56, // 11  011  1110  1010  110
-    PMEVCNTR23_EL0    = 0xdf57, // 11  011  1110  1010  111
-    PMEVCNTR24_EL0    = 0xdf58, // 11  011  1110  1011  000
-    PMEVCNTR25_EL0    = 0xdf59, // 11  011  1110  1011  001
-    PMEVCNTR26_EL0    = 0xdf5a, // 11  011  1110  1011  010
-    PMEVCNTR27_EL0    = 0xdf5b, // 11  011  1110  1011  011
-    PMEVCNTR28_EL0    = 0xdf5c, // 11  011  1110  1011  100
-    PMEVCNTR29_EL0    = 0xdf5d, // 11  011  1110  1011  101
-    PMEVCNTR30_EL0    = 0xdf5e, // 11  011  1110  1011  110
-    PMCCFILTR_EL0     = 0xdf7f, // 11  011  1110  1111  111
-    PMEVTYPER0_EL0    = 0xdf60, // 11  011  1110  1100  000
-    PMEVTYPER1_EL0    = 0xdf61, // 11  011  1110  1100  001
-    PMEVTYPER2_EL0    = 0xdf62, // 11  011  1110  1100  010
-    PMEVTYPER3_EL0    = 0xdf63, // 11  011  1110  1100  011
-    PMEVTYPER4_EL0    = 0xdf64, // 11  011  1110  1100  100
-    PMEVTYPER5_EL0    = 0xdf65, // 11  011  1110  1100  101
-    PMEVTYPER6_EL0    = 0xdf66, // 11  011  1110  1100  110
-    PMEVTYPER7_EL0    = 0xdf67, // 11  011  1110  1100  111
-    PMEVTYPER8_EL0    = 0xdf68, // 11  011  1110  1101  000
-    PMEVTYPER9_EL0    = 0xdf69, // 11  011  1110  1101  001
-    PMEVTYPER10_EL0   = 0xdf6a, // 11  011  1110  1101  010
-    PMEVTYPER11_EL0   = 0xdf6b, // 11  011  1110  1101  011
-    PMEVTYPER12_EL0   = 0xdf6c, // 11  011  1110  1101  100
-    PMEVTYPER13_EL0   = 0xdf6d, // 11  011  1110  1101  101
-    PMEVTYPER14_EL0   = 0xdf6e, // 11  011  1110  1101  110
-    PMEVTYPER15_EL0   = 0xdf6f, // 11  011  1110  1101  111
-    PMEVTYPER16_EL0   = 0xdf70, // 11  011  1110  1110  000
-    PMEVTYPER17_EL0   = 0xdf71, // 11  011  1110  1110  001
-    PMEVTYPER18_EL0   = 0xdf72, // 11  011  1110  1110  010
-    PMEVTYPER19_EL0   = 0xdf73, // 11  011  1110  1110  011
-    PMEVTYPER20_EL0   = 0xdf74, // 11  011  1110  1110  100
-    PMEVTYPER21_EL0   = 0xdf75, // 11  011  1110  1110  101
-    PMEVTYPER22_EL0   = 0xdf76, // 11  011  1110  1110  110
-    PMEVTYPER23_EL0   = 0xdf77, // 11  011  1110  1110  111
-    PMEVTYPER24_EL0   = 0xdf78, // 11  011  1110  1111  000
-    PMEVTYPER25_EL0   = 0xdf79, // 11  011  1110  1111  001
-    PMEVTYPER26_EL0   = 0xdf7a, // 11  011  1110  1111  010
-    PMEVTYPER27_EL0   = 0xdf7b, // 11  011  1110  1111  011
-    PMEVTYPER28_EL0   = 0xdf7c, // 11  011  1110  1111  100
-    PMEVTYPER29_EL0   = 0xdf7d, // 11  011  1110  1111  101
-    PMEVTYPER30_EL0   = 0xdf7e, // 11  011  1110  1111  110
-
-    // Trace registers
-    TRCPRGCTLR        = 0x8808, // 10  001  0000  0001  000
-    TRCPROCSELR       = 0x8810, // 10  001  0000  0010  000
-    TRCCONFIGR        = 0x8820, // 10  001  0000  0100  000
-    TRCAUXCTLR        = 0x8830, // 10  001  0000  0110  000
-    TRCEVENTCTL0R     = 0x8840, // 10  001  0000  1000  000
-    TRCEVENTCTL1R     = 0x8848, // 10  001  0000  1001  000
-    TRCSTALLCTLR      = 0x8858, // 10  001  0000  1011  000
-    TRCTSCTLR         = 0x8860, // 10  001  0000  1100  000
-    TRCSYNCPR         = 0x8868, // 10  001  0000  1101  000
-    TRCCCCTLR         = 0x8870, // 10  001  0000  1110  000
-    TRCBBCTLR         = 0x8878, // 10  001  0000  1111  000
-    TRCTRACEIDR       = 0x8801, // 10  001  0000  0000  001
-    TRCQCTLR          = 0x8809, // 10  001  0000  0001  001
-    TRCVICTLR         = 0x8802, // 10  001  0000  0000  010
-    TRCVIIECTLR       = 0x880a, // 10  001  0000  0001  010
-    TRCVISSCTLR       = 0x8812, // 10  001  0000  0010  010
-    TRCVIPCSSCTLR     = 0x881a, // 10  001  0000  0011  010
-    TRCVDCTLR         = 0x8842, // 10  001  0000  1000  010
-    TRCVDSACCTLR      = 0x884a, // 10  001  0000  1001  010
-    TRCVDARCCTLR      = 0x8852, // 10  001  0000  1010  010
-    TRCSEQEVR0        = 0x8804, // 10  001  0000  0000  100
-    TRCSEQEVR1        = 0x880c, // 10  001  0000  0001  100
-    TRCSEQEVR2        = 0x8814, // 10  001  0000  0010  100
-    TRCSEQRSTEVR      = 0x8834, // 10  001  0000  0110  100
-    TRCSEQSTR         = 0x883c, // 10  001  0000  0111  100
-    TRCEXTINSELR      = 0x8844, // 10  001  0000  1000  100
-    TRCCNTRLDVR0      = 0x8805, // 10  001  0000  0000  101
-    TRCCNTRLDVR1      = 0x880d, // 10  001  0000  0001  101
-    TRCCNTRLDVR2      = 0x8815, // 10  001  0000  0010  101
-    TRCCNTRLDVR3      = 0x881d, // 10  001  0000  0011  101
-    TRCCNTCTLR0       = 0x8825, // 10  001  0000  0100  101
-    TRCCNTCTLR1       = 0x882d, // 10  001  0000  0101  101
-    TRCCNTCTLR2       = 0x8835, // 10  001  0000  0110  101
-    TRCCNTCTLR3       = 0x883d, // 10  001  0000  0111  101
-    TRCCNTVR0         = 0x8845, // 10  001  0000  1000  101
-    TRCCNTVR1         = 0x884d, // 10  001  0000  1001  101
-    TRCCNTVR2         = 0x8855, // 10  001  0000  1010  101
-    TRCCNTVR3         = 0x885d, // 10  001  0000  1011  101
-    TRCIMSPEC0        = 0x8807, // 10  001  0000  0000  111
-    TRCIMSPEC1        = 0x880f, // 10  001  0000  0001  111
-    TRCIMSPEC2        = 0x8817, // 10  001  0000  0010  111
-    TRCIMSPEC3        = 0x881f, // 10  001  0000  0011  111
-    TRCIMSPEC4        = 0x8827, // 10  001  0000  0100  111
-    TRCIMSPEC5        = 0x882f, // 10  001  0000  0101  111
-    TRCIMSPEC6        = 0x8837, // 10  001  0000  0110  111
-    TRCIMSPEC7        = 0x883f, // 10  001  0000  0111  111
-    TRCRSCTLR2        = 0x8890, // 10  001  0001  0010  000
-    TRCRSCTLR3        = 0x8898, // 10  001  0001  0011  000
-    TRCRSCTLR4        = 0x88a0, // 10  001  0001  0100  000
-    TRCRSCTLR5        = 0x88a8, // 10  001  0001  0101  000
-    TRCRSCTLR6        = 0x88b0, // 10  001  0001  0110  000
-    TRCRSCTLR7        = 0x88b8, // 10  001  0001  0111  000
-    TRCRSCTLR8        = 0x88c0, // 10  001  0001  1000  000
-    TRCRSCTLR9        = 0x88c8, // 10  001  0001  1001  000
-    TRCRSCTLR10       = 0x88d0, // 10  001  0001  1010  000
-    TRCRSCTLR11       = 0x88d8, // 10  001  0001  1011  000
-    TRCRSCTLR12       = 0x88e0, // 10  001  0001  1100  000
-    TRCRSCTLR13       = 0x88e8, // 10  001  0001  1101  000
-    TRCRSCTLR14       = 0x88f0, // 10  001  0001  1110  000
-    TRCRSCTLR15       = 0x88f8, // 10  001  0001  1111  000
-    TRCRSCTLR16       = 0x8881, // 10  001  0001  0000  001
-    TRCRSCTLR17       = 0x8889, // 10  001  0001  0001  001
-    TRCRSCTLR18       = 0x8891, // 10  001  0001  0010  001
-    TRCRSCTLR19       = 0x8899, // 10  001  0001  0011  001
-    TRCRSCTLR20       = 0x88a1, // 10  001  0001  0100  001
-    TRCRSCTLR21       = 0x88a9, // 10  001  0001  0101  001
-    TRCRSCTLR22       = 0x88b1, // 10  001  0001  0110  001
-    TRCRSCTLR23       = 0x88b9, // 10  001  0001  0111  001
-    TRCRSCTLR24       = 0x88c1, // 10  001  0001  1000  001
-    TRCRSCTLR25       = 0x88c9, // 10  001  0001  1001  001
-    TRCRSCTLR26       = 0x88d1, // 10  001  0001  1010  001
-    TRCRSCTLR27       = 0x88d9, // 10  001  0001  1011  001
-    TRCRSCTLR28       = 0x88e1, // 10  001  0001  1100  001
-    TRCRSCTLR29       = 0x88e9, // 10  001  0001  1101  001
-    TRCRSCTLR30       = 0x88f1, // 10  001  0001  1110  001
-    TRCRSCTLR31       = 0x88f9, // 10  001  0001  1111  001
-    TRCSSCCR0         = 0x8882, // 10  001  0001  0000  010
-    TRCSSCCR1         = 0x888a, // 10  001  0001  0001  010
-    TRCSSCCR2         = 0x8892, // 10  001  0001  0010  010
-    TRCSSCCR3         = 0x889a, // 10  001  0001  0011  010
-    TRCSSCCR4         = 0x88a2, // 10  001  0001  0100  010
-    TRCSSCCR5         = 0x88aa, // 10  001  0001  0101  010
-    TRCSSCCR6         = 0x88b2, // 10  001  0001  0110  010
-    TRCSSCCR7         = 0x88ba, // 10  001  0001  0111  010
-    TRCSSCSR0         = 0x88c2, // 10  001  0001  1000  010
-    TRCSSCSR1         = 0x88ca, // 10  001  0001  1001  010
-    TRCSSCSR2         = 0x88d2, // 10  001  0001  1010  010
-    TRCSSCSR3         = 0x88da, // 10  001  0001  1011  010
-    TRCSSCSR4         = 0x88e2, // 10  001  0001  1100  010
-    TRCSSCSR5         = 0x88ea, // 10  001  0001  1101  010
-    TRCSSCSR6         = 0x88f2, // 10  001  0001  1110  010
-    TRCSSCSR7         = 0x88fa, // 10  001  0001  1111  010
-    TRCSSPCICR0       = 0x8883, // 10  001  0001  0000  011
-    TRCSSPCICR1       = 0x888b, // 10  001  0001  0001  011
-    TRCSSPCICR2       = 0x8893, // 10  001  0001  0010  011
-    TRCSSPCICR3       = 0x889b, // 10  001  0001  0011  011
-    TRCSSPCICR4       = 0x88a3, // 10  001  0001  0100  011
-    TRCSSPCICR5       = 0x88ab, // 10  001  0001  0101  011
-    TRCSSPCICR6       = 0x88b3, // 10  001  0001  0110  011
-    TRCSSPCICR7       = 0x88bb, // 10  001  0001  0111  011
-    TRCPDCR           = 0x88a4, // 10  001  0001  0100  100
-    TRCACVR0          = 0x8900, // 10  001  0010  0000  000
-    TRCACVR1          = 0x8910, // 10  001  0010  0010  000
-    TRCACVR2          = 0x8920, // 10  001  0010  0100  000
-    TRCACVR3          = 0x8930, // 10  001  0010  0110  000
-    TRCACVR4          = 0x8940, // 10  001  0010  1000  000
-    TRCACVR5          = 0x8950, // 10  001  0010  1010  000
-    TRCACVR6          = 0x8960, // 10  001  0010  1100  000
-    TRCACVR7          = 0x8970, // 10  001  0010  1110  000
-    TRCACVR8          = 0x8901, // 10  001  0010  0000  001
-    TRCACVR9          = 0x8911, // 10  001  0010  0010  001
-    TRCACVR10         = 0x8921, // 10  001  0010  0100  001
-    TRCACVR11         = 0x8931, // 10  001  0010  0110  001
-    TRCACVR12         = 0x8941, // 10  001  0010  1000  001
-    TRCACVR13         = 0x8951, // 10  001  0010  1010  001
-    TRCACVR14         = 0x8961, // 10  001  0010  1100  001
-    TRCACVR15         = 0x8971, // 10  001  0010  1110  001
-    TRCACATR0         = 0x8902, // 10  001  0010  0000  010
-    TRCACATR1         = 0x8912, // 10  001  0010  0010  010
-    TRCACATR2         = 0x8922, // 10  001  0010  0100  010
-    TRCACATR3         = 0x8932, // 10  001  0010  0110  010
-    TRCACATR4         = 0x8942, // 10  001  0010  1000  010
-    TRCACATR5         = 0x8952, // 10  001  0010  1010  010
-    TRCACATR6         = 0x8962, // 10  001  0010  1100  010
-    TRCACATR7         = 0x8972, // 10  001  0010  1110  010
-    TRCACATR8         = 0x8903, // 10  001  0010  0000  011
-    TRCACATR9         = 0x8913, // 10  001  0010  0010  011
-    TRCACATR10        = 0x8923, // 10  001  0010  0100  011
-    TRCACATR11        = 0x8933, // 10  001  0010  0110  011
-    TRCACATR12        = 0x8943, // 10  001  0010  1000  011
-    TRCACATR13        = 0x8953, // 10  001  0010  1010  011
-    TRCACATR14        = 0x8963, // 10  001  0010  1100  011
-    TRCACATR15        = 0x8973, // 10  001  0010  1110  011
-    TRCDVCVR0         = 0x8904, // 10  001  0010  0000  100
-    TRCDVCVR1         = 0x8924, // 10  001  0010  0100  100
-    TRCDVCVR2         = 0x8944, // 10  001  0010  1000  100
-    TRCDVCVR3         = 0x8964, // 10  001  0010  1100  100
-    TRCDVCVR4         = 0x8905, // 10  001  0010  0000  101
-    TRCDVCVR5         = 0x8925, // 10  001  0010  0100  101
-    TRCDVCVR6         = 0x8945, // 10  001  0010  1000  101
-    TRCDVCVR7         = 0x8965, // 10  001  0010  1100  101
-    TRCDVCMR0         = 0x8906, // 10  001  0010  0000  110
-    TRCDVCMR1         = 0x8926, // 10  001  0010  0100  110
-    TRCDVCMR2         = 0x8946, // 10  001  0010  1000  110
-    TRCDVCMR3         = 0x8966, // 10  001  0010  1100  110
-    TRCDVCMR4         = 0x8907, // 10  001  0010  0000  111
-    TRCDVCMR5         = 0x8927, // 10  001  0010  0100  111
-    TRCDVCMR6         = 0x8947, // 10  001  0010  1000  111
-    TRCDVCMR7         = 0x8967, // 10  001  0010  1100  111
-    TRCCIDCVR0        = 0x8980, // 10  001  0011  0000  000
-    TRCCIDCVR1        = 0x8990, // 10  001  0011  0010  000
-    TRCCIDCVR2        = 0x89a0, // 10  001  0011  0100  000
-    TRCCIDCVR3        = 0x89b0, // 10  001  0011  0110  000
-    TRCCIDCVR4        = 0x89c0, // 10  001  0011  1000  000
-    TRCCIDCVR5        = 0x89d0, // 10  001  0011  1010  000
-    TRCCIDCVR6        = 0x89e0, // 10  001  0011  1100  000
-    TRCCIDCVR7        = 0x89f0, // 10  001  0011  1110  000
-    TRCVMIDCVR0       = 0x8981, // 10  001  0011  0000  001
-    TRCVMIDCVR1       = 0x8991, // 10  001  0011  0010  001
-    TRCVMIDCVR2       = 0x89a1, // 10  001  0011  0100  001
-    TRCVMIDCVR3       = 0x89b1, // 10  001  0011  0110  001
-    TRCVMIDCVR4       = 0x89c1, // 10  001  0011  1000  001
-    TRCVMIDCVR5       = 0x89d1, // 10  001  0011  1010  001
-    TRCVMIDCVR6       = 0x89e1, // 10  001  0011  1100  001
-    TRCVMIDCVR7       = 0x89f1, // 10  001  0011  1110  001
-    TRCCIDCCTLR0      = 0x8982, // 10  001  0011  0000  010
-    TRCCIDCCTLR1      = 0x898a, // 10  001  0011  0001  010
-    TRCVMIDCCTLR0     = 0x8992, // 10  001  0011  0010  010
-    TRCVMIDCCTLR1     = 0x899a, // 10  001  0011  0011  010
-    TRCITCTRL         = 0x8b84, // 10  001  0111  0000  100
-    TRCCLAIMSET       = 0x8bc6, // 10  001  0111  1000  110
-    TRCCLAIMCLR       = 0x8bce, // 10  001  0111  1001  110
-
-    // GICv3 registers
-    ICC_BPR1_EL1      = 0xc663, // 11  000  1100  1100  011
-    ICC_BPR0_EL1      = 0xc643, // 11  000  1100  1000  011
-    ICC_PMR_EL1       = 0xc230, // 11  000  0100  0110  000
-    ICC_CTLR_EL1      = 0xc664, // 11  000  1100  1100  100
-    ICC_CTLR_EL3      = 0xf664, // 11  110  1100  1100  100
-    ICC_SRE_EL1       = 0xc665, // 11  000  1100  1100  101
-    ICC_SRE_EL2       = 0xe64d, // 11  100  1100  1001  101
-    ICC_SRE_EL3       = 0xf665, // 11  110  1100  1100  101
-    ICC_IGRPEN0_EL1   = 0xc666, // 11  000  1100  1100  110
-    ICC_IGRPEN1_EL1   = 0xc667, // 11  000  1100  1100  111
-    ICC_IGRPEN1_EL3   = 0xf667, // 11  110  1100  1100  111
-    ICC_SEIEN_EL1     = 0xc668, // 11  000  1100  1101  000
-    ICC_AP0R0_EL1     = 0xc644, // 11  000  1100  1000  100
-    ICC_AP0R1_EL1     = 0xc645, // 11  000  1100  1000  101
-    ICC_AP0R2_EL1     = 0xc646, // 11  000  1100  1000  110
-    ICC_AP0R3_EL1     = 0xc647, // 11  000  1100  1000  111
-    ICC_AP1R0_EL1     = 0xc648, // 11  000  1100  1001  000
-    ICC_AP1R1_EL1     = 0xc649, // 11  000  1100  1001  001
-    ICC_AP1R2_EL1     = 0xc64a, // 11  000  1100  1001  010
-    ICC_AP1R3_EL1     = 0xc64b, // 11  000  1100  1001  011
-    ICH_AP0R0_EL2     = 0xe640, // 11  100  1100  1000  000
-    ICH_AP0R1_EL2     = 0xe641, // 11  100  1100  1000  001
-    ICH_AP0R2_EL2     = 0xe642, // 11  100  1100  1000  010
-    ICH_AP0R3_EL2     = 0xe643, // 11  100  1100  1000  011
-    ICH_AP1R0_EL2     = 0xe648, // 11  100  1100  1001  000
-    ICH_AP1R1_EL2     = 0xe649, // 11  100  1100  1001  001
-    ICH_AP1R2_EL2     = 0xe64a, // 11  100  1100  1001  010
-    ICH_AP1R3_EL2     = 0xe64b, // 11  100  1100  1001  011
-    ICH_HCR_EL2       = 0xe658, // 11  100  1100  1011  000
-    ICH_MISR_EL2      = 0xe65a, // 11  100  1100  1011  010
-    ICH_VMCR_EL2      = 0xe65f, // 11  100  1100  1011  111
-    ICH_VSEIR_EL2     = 0xe64c, // 11  100  1100  1001  100
-    ICH_LR0_EL2       = 0xe660, // 11  100  1100  1100  000
-    ICH_LR1_EL2       = 0xe661, // 11  100  1100  1100  001
-    ICH_LR2_EL2       = 0xe662, // 11  100  1100  1100  010
-    ICH_LR3_EL2       = 0xe663, // 11  100  1100  1100  011
-    ICH_LR4_EL2       = 0xe664, // 11  100  1100  1100  100
-    ICH_LR5_EL2       = 0xe665, // 11  100  1100  1100  101
-    ICH_LR6_EL2       = 0xe666, // 11  100  1100  1100  110
-    ICH_LR7_EL2       = 0xe667, // 11  100  1100  1100  111
-    ICH_LR8_EL2       = 0xe668, // 11  100  1100  1101  000
-    ICH_LR9_EL2       = 0xe669, // 11  100  1100  1101  001
-    ICH_LR10_EL2      = 0xe66a, // 11  100  1100  1101  010
-    ICH_LR11_EL2      = 0xe66b, // 11  100  1100  1101  011
-    ICH_LR12_EL2      = 0xe66c, // 11  100  1100  1101  100
-    ICH_LR13_EL2      = 0xe66d, // 11  100  1100  1101  101
-    ICH_LR14_EL2      = 0xe66e, // 11  100  1100  1101  110
-    ICH_LR15_EL2      = 0xe66f  // 11  100  1100  1101  111
-  };
-
-  // Note that these do not inherit from NamedImmMapper. This class is
-  // sufficiently different in its behaviour that I don't believe it's worth
-  // burdening the common NamedImmMapper with abstractions only needed in
-  // this one case.
-  struct SysRegMapper {
-    static const NamedImmMapper::Mapping SysRegPairs[];
-
-    const NamedImmMapper::Mapping *InstPairs;
-    size_t NumInstPairs;
-
-    SysRegMapper() {}
-    uint32_t fromString(StringRef Name, bool &Valid) const;
-    std::string toString(uint32_t Bits, bool &Valid) const;
-  };
-
-  struct MSRMapper : SysRegMapper {
-    static const NamedImmMapper::Mapping MSRPairs[];
-    MSRMapper();
-  };
-
-  struct MRSMapper : SysRegMapper {
-    static const NamedImmMapper::Mapping MRSPairs[];
-    MRSMapper();
-  };
-
-  uint32_t ParseGenericRegister(StringRef Name, bool &Valid);
-}
-
-namespace A64TLBI {
-  enum TLBIValues {
-    Invalid = -1,          // Op0 Op1  CRn   CRm   Op2
-    IPAS2E1IS    = 0x6401, // 01  100  1000  0000  001
-    IPAS2LE1IS   = 0x6405, // 01  100  1000  0000  101
-    VMALLE1IS    = 0x4418, // 01  000  1000  0011  000
-    ALLE2IS      = 0x6418, // 01  100  1000  0011  000
-    ALLE3IS      = 0x7418, // 01  110  1000  0011  000
-    VAE1IS       = 0x4419, // 01  000  1000  0011  001
-    VAE2IS       = 0x6419, // 01  100  1000  0011  001
-    VAE3IS       = 0x7419, // 01  110  1000  0011  001
-    ASIDE1IS     = 0x441a, // 01  000  1000  0011  010
-    VAAE1IS      = 0x441b, // 01  000  1000  0011  011
-    ALLE1IS      = 0x641c, // 01  100  1000  0011  100
-    VALE1IS      = 0x441d, // 01  000  1000  0011  101
-    VALE2IS      = 0x641d, // 01  100  1000  0011  101
-    VALE3IS      = 0x741d, // 01  110  1000  0011  101
-    VMALLS12E1IS = 0x641e, // 01  100  1000  0011  110
-    VAALE1IS     = 0x441f, // 01  000  1000  0011  111
-    IPAS2E1      = 0x6421, // 01  100  1000  0100  001
-    IPAS2LE1     = 0x6425, // 01  100  1000  0100  101
-    VMALLE1      = 0x4438, // 01  000  1000  0111  000
-    ALLE2        = 0x6438, // 01  100  1000  0111  000
-    ALLE3        = 0x7438, // 01  110  1000  0111  000
-    VAE1         = 0x4439, // 01  000  1000  0111  001
-    VAE2         = 0x6439, // 01  100  1000  0111  001
-    VAE3         = 0x7439, // 01  110  1000  0111  001
-    ASIDE1       = 0x443a, // 01  000  1000  0111  010
-    VAAE1        = 0x443b, // 01  000  1000  0111  011
-    ALLE1        = 0x643c, // 01  100  1000  0111  100
-    VALE1        = 0x443d, // 01  000  1000  0111  101
-    VALE2        = 0x643d, // 01  100  1000  0111  101
-    VALE3        = 0x743d, // 01  110  1000  0111  101
-    VMALLS12E1   = 0x643e, // 01  100  1000  0111  110
-    VAALE1       = 0x443f  // 01  000  1000  0111  111
-  };
-
-  struct TLBIMapper : NamedImmMapper {
-    const static Mapping TLBIPairs[];
-
-    TLBIMapper();
-  };
-
-  static inline bool NeedsRegister(TLBIValues Val) {
-    switch (Val) {
-    case VMALLE1IS:
-    case ALLE2IS:
-    case ALLE3IS:
-    case ALLE1IS:
-    case VMALLS12E1IS:
-    case VMALLE1:
-    case ALLE2:
-    case ALLE3:
-    case ALLE1:
-    case VMALLS12E1:
-      return false;
-    default:
-      return true;
-    }
-  }
-}
-
-namespace AArch64II {
-
-  enum TOF {
-    //===--------------------------------------------------------------===//
-    // AArch64 Specific MachineOperand flags.
-
-    MO_NO_FLAG,
-
-    // MO_GOT - Represents a relocation referring to the GOT entry of a given
-    // symbol. Used in adrp.
-    MO_GOT,
-
-    // MO_GOT_LO12 - Represents a relocation referring to the low 12 bits of the
-    // GOT entry of a given symbol. Used in ldr only.
-    MO_GOT_LO12,
-
-    // MO_DTPREL_* - Represents a relocation referring to the offset from a
-    // module's dynamic thread pointer. Used in the local-dynamic TLS access
-    // model.
-    MO_DTPREL_G1,
-    MO_DTPREL_G0_NC,
-
-    // MO_GOTTPREL_* - Represents a relocation referring to a GOT entry
-    // providing the offset of a variable from the thread-pointer. Used in
-    // initial-exec TLS model where this offset is assigned in the static thread
-    // block and thus known by the dynamic linker.
-    MO_GOTTPREL,
-    MO_GOTTPREL_LO12,
-
-    // MO_TLSDESC_* - Represents a relocation referring to a GOT entry providing
-    // a TLS descriptor chosen by the dynamic linker. Used for the
-    // general-dynamic and local-dynamic TLS access models where very littls is
-    // known at link-time.
-    MO_TLSDESC,
-    MO_TLSDESC_LO12,
-
-    // MO_TPREL_* - Represents a relocation referring to the offset of a
-    // variable from the thread pointer itself. Used in the local-exec TLS
-    // access model.
-    MO_TPREL_G1,
-    MO_TPREL_G0_NC,
-
-    // MO_LO12 - On a symbol operand, this represents a relocation containing
-    // lower 12 bits of the address. Used in add/sub/ldr/str.
-    MO_LO12,
-
-    // MO_ABS_G* - Represent the 16-bit granules of an absolute reference using
-    // movz/movk instructions.
-    MO_ABS_G3,
-    MO_ABS_G2_NC,
-    MO_ABS_G1_NC,
-    MO_ABS_G0_NC
-  };
-}
-
-class APFloat;
-
-namespace A64Imms {
-  bool isFPImm(const APFloat &Val, uint32_t &Imm8Bits);
-
-  inline bool isFPImm(const APFloat &Val) {
-    uint32_t Imm8;
-    return isFPImm(Val, Imm8);
-  }
-
-  bool isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits);
-  bool isLogicalImmBits(unsigned RegWidth, uint32_t Bits, uint64_t &Imm);
-
-  bool isMOVZImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
-  bool isMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
-
-  // We sometimes want to know whether the immediate is representable with a
-  // MOVN but *not* with a MOVZ (because that would take priority).
-  bool isOnlyMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift);
-
-  uint64_t decodeNeonModImm(unsigned Val, unsigned OpCmode, unsigned &EltBits);
-  bool decodeNeonModShiftImm(unsigned OpCmode, unsigned &ShiftImm,
-                             unsigned &ShiftOnesIn);
-  }
-
-} // end namespace llvm;
-
-#endif

Removed: llvm/trunk/lib/Target/AArch64/Utils/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Utils/CMakeLists.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Utils/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/Utils/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMAArch64Utils
-  AArch64BaseInfo.cpp
-  )

Removed: llvm/trunk/lib/Target/AArch64/Utils/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Utils/LLVMBuild.txt?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Utils/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AArch64/Utils/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/AArch646/Utils/LLVMBuild.txt ----------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = AArch64Utils
-parent = AArch64
-required_libraries = Support
-add_to_library_groups = AArch64

Removed: llvm/trunk/lib/Target/AArch64/Utils/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/Utils/Makefile?rev=209575&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/Utils/Makefile (original)
+++ llvm/trunk/lib/Target/AArch64/Utils/Makefile (removed)
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/Utils/Makefile -------------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Utils
-
-# Hack: we need to include 'main' AArch64 target directory to grab private headers
-#CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Modified: llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp Sat May 24 07:42:26 2014
@@ -508,4 +508,7 @@ void ARM64AsmPrinter::EmitInstruction(co
 extern "C" void LLVMInitializeARM64AsmPrinter() {
   RegisterAsmPrinter<ARM64AsmPrinter> X(TheARM64leTarget);
   RegisterAsmPrinter<ARM64AsmPrinter> Y(TheARM64beTarget);
+
+  RegisterAsmPrinter<ARM64AsmPrinter> Z(TheAArch64leTarget);
+  RegisterAsmPrinter<ARM64AsmPrinter> W(TheAArch64beTarget);
 }

Modified: llvm/trunk/lib/Target/ARM64/ARM64TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64TargetMachine.cpp?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64TargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64TargetMachine.cpp Sat May 24 07:42:26 2014
@@ -57,6 +57,9 @@ extern "C" void LLVMInitializeARM64Targe
   // Register the target.
   RegisterTargetMachine<ARM64leTargetMachine> X(TheARM64leTarget);
   RegisterTargetMachine<ARM64beTargetMachine> Y(TheARM64beTarget);
+
+  RegisterTargetMachine<ARM64leTargetMachine> Z(TheAArch64leTarget);
+  RegisterTargetMachine<ARM64beTargetMachine> W(TheAArch64beTarget);
 }
 
 /// TargetMachine ctor - Create an ARM64 architecture model.

Modified: llvm/trunk/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/AsmParser/ARM64AsmParser.cpp Sat May 24 07:42:26 2014
@@ -3957,6 +3957,9 @@ ARM64AsmParser::classifySymbolRef(const
 extern "C" void LLVMInitializeARM64AsmParser() {
   RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
   RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
+
+  RegisterMCAsmParser<ARM64AsmParser> Z(TheAArch64leTarget);
+  RegisterMCAsmParser<ARM64AsmParser> W(TheAArch64beTarget);
 }
 
 #define GET_REGISTER_MATCHER

Modified: llvm/trunk/lib/Target/ARM64/Disassembler/ARM64Disassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/Disassembler/ARM64Disassembler.cpp?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/Disassembler/ARM64Disassembler.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/Disassembler/ARM64Disassembler.cpp Sat May 24 07:42:26 2014
@@ -242,6 +242,15 @@ extern "C" void LLVMInitializeARM64Disas
                                        createARM64ExternalSymbolizer);
   TargetRegistry::RegisterMCSymbolizer(TheARM64beTarget,
                                        createARM64ExternalSymbolizer);
+
+  TargetRegistry::RegisterMCDisassembler(TheAArch64leTarget,
+                                         createARM64Disassembler);
+  TargetRegistry::RegisterMCDisassembler(TheAArch64beTarget,
+                                         createARM64Disassembler);
+  TargetRegistry::RegisterMCSymbolizer(TheAArch64leTarget,
+                                       createARM64ExternalSymbolizer);
+  TargetRegistry::RegisterMCSymbolizer(TheAArch64beTarget,
+                                       createARM64ExternalSymbolizer);
 }
 
 static const unsigned FPR128DecoderTable[] = {

Modified: llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.cpp Sat May 24 07:42:26 2014
@@ -141,44 +141,70 @@ extern "C" void LLVMInitializeARM64Targe
   // Register the MC asm info.
   RegisterMCAsmInfoFn X(TheARM64leTarget, createARM64MCAsmInfo);
   RegisterMCAsmInfoFn Y(TheARM64beTarget, createARM64MCAsmInfo);
+  RegisterMCAsmInfoFn Z(TheAArch64leTarget, createARM64MCAsmInfo);
+  RegisterMCAsmInfoFn W(TheAArch64beTarget, createARM64MCAsmInfo);
 
   // Register the MC codegen info.
   TargetRegistry::RegisterMCCodeGenInfo(TheARM64leTarget,
                                         createARM64MCCodeGenInfo);
   TargetRegistry::RegisterMCCodeGenInfo(TheARM64beTarget,
                                         createARM64MCCodeGenInfo);
+  TargetRegistry::RegisterMCCodeGenInfo(TheAArch64leTarget,
+                                        createARM64MCCodeGenInfo);
+  TargetRegistry::RegisterMCCodeGenInfo(TheAArch64beTarget,
+                                        createARM64MCCodeGenInfo);
 
   // Register the MC instruction info.
   TargetRegistry::RegisterMCInstrInfo(TheARM64leTarget, createARM64MCInstrInfo);
   TargetRegistry::RegisterMCInstrInfo(TheARM64beTarget, createARM64MCInstrInfo);
+  TargetRegistry::RegisterMCInstrInfo(TheAArch64leTarget, createARM64MCInstrInfo);
+  TargetRegistry::RegisterMCInstrInfo(TheAArch64beTarget, createARM64MCInstrInfo);
 
   // Register the MC register info.
   TargetRegistry::RegisterMCRegInfo(TheARM64leTarget, createARM64MCRegisterInfo);
   TargetRegistry::RegisterMCRegInfo(TheARM64beTarget, createARM64MCRegisterInfo);
+  TargetRegistry::RegisterMCRegInfo(TheAArch64leTarget, createARM64MCRegisterInfo);
+  TargetRegistry::RegisterMCRegInfo(TheAArch64beTarget, createARM64MCRegisterInfo);
 
   // Register the MC subtarget info.
   TargetRegistry::RegisterMCSubtargetInfo(TheARM64leTarget,
                                           createARM64MCSubtargetInfo);
   TargetRegistry::RegisterMCSubtargetInfo(TheARM64beTarget,
                                           createARM64MCSubtargetInfo);
+  TargetRegistry::RegisterMCSubtargetInfo(TheAArch64leTarget,
+                                          createARM64MCSubtargetInfo);
+  TargetRegistry::RegisterMCSubtargetInfo(TheAArch64beTarget,
+                                          createARM64MCSubtargetInfo);
 
   // Register the asm backend.
   TargetRegistry::RegisterMCAsmBackend(TheARM64leTarget, createARM64leAsmBackend);
   TargetRegistry::RegisterMCAsmBackend(TheARM64beTarget, createARM64beAsmBackend);
+  TargetRegistry::RegisterMCAsmBackend(TheAArch64leTarget, createARM64leAsmBackend);
+  TargetRegistry::RegisterMCAsmBackend(TheAArch64beTarget, createARM64beAsmBackend);
 
   // Register the MC Code Emitter
   TargetRegistry::RegisterMCCodeEmitter(TheARM64leTarget,
                                         createARM64MCCodeEmitter);
   TargetRegistry::RegisterMCCodeEmitter(TheARM64beTarget,
                                         createARM64MCCodeEmitter);
+  TargetRegistry::RegisterMCCodeEmitter(TheAArch64leTarget,
+                                        createARM64MCCodeEmitter);
+  TargetRegistry::RegisterMCCodeEmitter(TheAArch64beTarget,
+                                        createARM64MCCodeEmitter);
 
   // Register the object streamer.
   TargetRegistry::RegisterMCObjectStreamer(TheARM64leTarget, createMCStreamer);
   TargetRegistry::RegisterMCObjectStreamer(TheARM64beTarget, createMCStreamer);
+  TargetRegistry::RegisterMCObjectStreamer(TheAArch64leTarget, createMCStreamer);
+  TargetRegistry::RegisterMCObjectStreamer(TheAArch64beTarget, createMCStreamer);
 
   // Register the MCInstPrinter.
   TargetRegistry::RegisterMCInstPrinter(TheARM64leTarget,
                                         createARM64MCInstPrinter);
   TargetRegistry::RegisterMCInstPrinter(TheARM64beTarget,
                                         createARM64MCInstPrinter);
+  TargetRegistry::RegisterMCInstPrinter(TheAArch64leTarget,
+                                        createARM64MCInstPrinter);
+  TargetRegistry::RegisterMCInstPrinter(TheAArch64beTarget,
+                                        createARM64MCInstPrinter);
 }

Modified: llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h (original)
+++ llvm/trunk/lib/Target/ARM64/MCTargetDesc/ARM64MCTargetDesc.h Sat May 24 07:42:26 2014
@@ -31,6 +31,8 @@ class raw_ostream;
 
 extern Target TheARM64leTarget;
 extern Target TheARM64beTarget;
+extern Target TheAArch64leTarget;
+extern Target TheAArch64beTarget;
 
 MCCodeEmitter *createARM64MCCodeEmitter(const MCInstrInfo &MCII,
                                         const MCRegisterInfo &MRI,

Modified: llvm/trunk/lib/Target/ARM64/TargetInfo/ARM64TargetInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/TargetInfo/ARM64TargetInfo.cpp?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/TargetInfo/ARM64TargetInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/TargetInfo/ARM64TargetInfo.cpp Sat May 24 07:42:26 2014
@@ -14,6 +14,8 @@ using namespace llvm;
 namespace llvm {
 Target TheARM64leTarget;
 Target TheARM64beTarget;
+Target TheAArch64leTarget;
+Target TheAArch64beTarget;
 } // end namespace llvm
 
 extern "C" void LLVMInitializeARM64TargetInfo() {
@@ -21,4 +23,9 @@ extern "C" void LLVMInitializeARM64Targe
                                                    "ARM64 (little endian)");
   RegisterTarget<Triple::arm64_be, /*HasJIT=*/true> Y(TheARM64beTarget, "arm64_be",
                                                       "ARM64 (big endian)");
+
+  RegisterTarget<Triple::aarch64, /*HasJIT=*/true> Z(
+      TheAArch64leTarget, "aarch64", "ARM64 (little endian)");
+  RegisterTarget<Triple::aarch64_be, /*HasJIT=*/true> W(
+      TheAArch64beTarget, "aarch64_be", "ARM64 (big endian)");
 }

Modified: llvm/trunk/lib/Target/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/LLVMBuild.txt?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/lib/Target/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/LLVMBuild.txt Sat May 24 07:42:26 2014
@@ -16,7 +16,7 @@
 ;===------------------------------------------------------------------------===;
 
 [common]
-subdirectories = AArch64 ARM ARM64 CppBackend Hexagon MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore
+subdirectories = ARM ARM64 CppBackend Hexagon MSP430 NVPTX Mips PowerPC R600 Sparc SystemZ X86 XCore
 
 ; This is a special group whose required libraries are extended (by llvm-build)
 ; with the best execution engine (the native JIT, if available, or the

Modified: llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=neon| FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=neon | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 
 define void @test_store_f128(fp128* %ptr, fp128 %val) #0 {
@@ -21,9 +20,6 @@ define void @test_vstrq_p128(i128* %ptr,
 ; CHECK-ARM64-LABEL: test_vstrq_p128
 ; CHECK-ARM64: stp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}]
 
-; CHECK-AARCH64-LABEL: test_vstrq_p128
-; CHECK-AARCH64: str {{x[0-9]+}}, [{{x[0-9]+}}, #8]
-; CHECK-AARCH64: str {{x[0-9]+}}, [{{x[0-9]+}}]
 entry:
   %0 = bitcast i128* %ptr to fp128*
   %1 = bitcast i128 %val to fp128
@@ -35,9 +31,6 @@ define i128 @test_vldrq_p128(i128* reado
 ; CHECK-ARM64-LABEL: test_vldrq_p128
 ; CHECK-ARM64: ldp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}]
 
-; CHECK-AARCH64-LABEL: test_vldrq_p128
-; CHECK-AARCH64: ldr {{x[0-9]+}}, [{{x[0-9]+}}]
-; CHECK-AARCH64: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #8]
 entry:
   %0 = bitcast i128* %ptr to fp128*
   %1 = load fp128* %0, align 16

Modified: llvm/trunk/test/CodeGen/AArch64/adc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/adc.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/adc.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/adc.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK --check-prefix=CHECK-LE %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64_be-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE %s
 

Modified: llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 @var32 = global i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/addsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 ; Note that this should be refactored (for efficiency if nothing else)

Modified: llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-linux-gnu | FileCheck %s
 
 @var8 = global i8 0

Modified: llvm/trunk/test/CodeGen/AArch64/alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/alloca.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/alloca.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/alloca.ll Sat May 24 07:42:26 2014
@@ -1,6 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP-AARCH64 %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP-ARM64 %s
 
 declare void @use_addr(i8*)
@@ -54,8 +52,6 @@ define i64 @test_alloca_with_local(i64 %
 ; CHECK: bl use_addr
 
   %val = load i64* %loc
-; CHECK-AARCH64: sub x[[TMP:[0-9]+]], x29, #[[LOC_FROM_FP]]
-; CHECK-AARCH64: ldr x0, [x[[TMP]]]
 
 ; CHECK-ARM64: ldur x0, [x29, #-[[LOC_FROM_FP]]]
 
@@ -68,13 +64,7 @@ define i64 @test_alloca_with_local(i64 %
 define void @test_variadic_alloca(i64 %n, ...) {
 ; CHECK-LABEL: test_variadic_alloca:
 
-; CHECK-AARCH64: sub     sp, sp, #{{[0-9]+}}
-; CHECK-AARCH64: add     x29, sp, #192
-; CHECK-AARCH64: sub     [[TMP:x[0-9]+]], x29, #192
-; CHECK-AARCH64: add     x8, [[TMP]], #0
-; CHECK-AARCH64-FP: str     q7, [x8, #112]
 ; [...]
-; CHECK-AARCH64-FP: str     q1, [x8, #16]
 
 
 ; CHECK-NOFP-AARCH64: sub     sp, sp, #80
@@ -112,9 +102,6 @@ define void @test_variadic_alloca(i64 %n
 ; CHECK: bl use_addr
 
   ret void
-; CHECK-AARCH64: sub sp, x29, #192
-; CHECK-AARCH64: ldp x29, x30, [sp, #192]
-; CHECK-AARCH64: add sp, sp, #208
 
 ; CHECK-NOFP-AARCH64: sub sp, x29, #64
 ; CHECK-NOFP-AARCH64: ldp x29, x30, [sp, #64]
@@ -127,11 +114,6 @@ define void @test_variadic_alloca(i64 %n
 define void @test_alloca_large_frame(i64 %n) {
 ; CHECK-LABEL: test_alloca_large_frame:
 
-; CHECK-AARCH64: sub sp, sp, #496
-; CHECK-AARCH64: stp x29, x30, [sp, #480]
-; CHECK-AARCH64: add x29, sp, #480
-; CHECK-AARCH64: sub sp, sp, #48
-; CHECK-AARCH64: sub sp, sp, #1953, lsl #12
 
 ; CHECK-ARM64: stp     x20, x19, [sp, #-32]!
 ; CHECK-ARM64: stp     x29, x30, [sp, #16]
@@ -145,9 +127,6 @@ define void @test_alloca_large_frame(i64
   call void @use_addr_loc(i8* %addr1, i64* %addr2)
 
   ret void
-; CHECK-AARCH64: sub sp, x29, #480
-; CHECK-AARCH64: ldp x29, x30, [sp, #480]
-; CHECK-AARCH64: add sp, sp, #496
 
 ; CHECK-ARM64: sub     sp, x29, #16
 ; CHECK-ARM64: ldp     x29, x30, [sp, #16]

Modified: llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/analyze-branch.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s
 
 ; This test checks that LLVM can do basic stripping and reapplying of branches

Removed: llvm/trunk/test/CodeGen/AArch64/andCmpBrToTBZ.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/andCmpBrToTBZ.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/andCmpBrToTBZ.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/andCmpBrToTBZ.ll (removed)
@@ -1,74 +0,0 @@
-; RUN: llc -O1 -march=aarch64 -enable-andcmp-sinking=true < %s | FileCheck %s
-; arm64 has separate copy of this test
-
-; ModuleID = 'and-cbz-extr-mr.bc'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
-target triple = "aarch64-none-linux-gnu"
-
-define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, i8* %str1, i8* %str2, i8* %str3, i8* %str4, i8* %str5, i8* %str6, i8* %str7, i8* %str8, i8* %str9, i8* %str10, i8* %str11, i8* %str12, i8* %str13, i32 %int1, i8* %str14) unnamed_addr #0 align 2 {
-; CHECK: foo:
-entry:
-  %tobool = icmp eq i8* %str14, null
-  br i1 %tobool, label %return, label %if.end
-
-; CHECK: %if.end
-; CHECK: tbz
-if.end:                                           ; preds = %entry
-  %and.i.i.i = and i32 %int1, 4
-  %tobool.i.i.i = icmp eq i32 %and.i.i.i, 0
-  br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i
-
-land.rhs.i:                                       ; preds = %if.end
-  %cmp.i.i.i = icmp eq i8* %str12, %str13
-  br i1 %cmp.i.i.i, label %if.then3, label %lor.rhs.i.i.i
-
-lor.rhs.i.i.i:                                    ; preds = %land.rhs.i
-  %cmp.i13.i.i.i = icmp eq i8* %str10, %str11
-  br i1 %cmp.i13.i.i.i, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, label %if.end5
-
-_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit: ; preds = %lor.rhs.i.i.i
-  %cmp.i.i.i.i = icmp eq i8* %str8, %str9
-  br i1 %cmp.i.i.i.i, label %if.then3, label %if.end5
-
-if.then3:                                         ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %land.rhs.i
-  %tmp11 = load i8* %str14, align 8
-  %tmp12 = and i8 %tmp11, 2
-  %tmp13 = icmp ne i8 %tmp12, 0
-  br label %return
-
-if.end5:                                          ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %lor.rhs.i.i.i
-; CHECK: %if.end5
-; CHECK: tbz
-  br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i19
-
-land.rhs.i19:                                     ; preds = %if.end5
-  %cmp.i.i.i18 = icmp eq i8* %str6, %str7
-  br i1 %cmp.i.i.i18, label %if.then7, label %lor.rhs.i.i.i23
-
-lor.rhs.i.i.i23:                                  ; preds = %land.rhs.i19
-  %cmp.i13.i.i.i22 = icmp eq i8* %str3, %str4
-  br i1 %cmp.i13.i.i.i22, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, label %if.end12
-
-_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28: ; preds = %lor.rhs.i.i.i23
-  %cmp.i.i.i.i26 = icmp eq i8* %str1, %str2
-  br i1 %cmp.i.i.i.i26, label %if.then7, label %if.end12
-
-if.then7:                                         ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, %land.rhs.i19
-  br i1 %isTextField, label %if.then9, label %if.end12
-
-if.then9:                                         ; preds = %if.then7
-  %tmp23 = load i8* %str5, align 8
-  %tmp24 = and i8 %tmp23, 2
-  %tmp25 = icmp ne i8 %tmp24, 0
-  br label %return
-
-if.end12:                                         ; preds = %if.then7, %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, %lor.rhs.i.i.i23, %if.end5, %if.end
-  %lnot = xor i1 %IsEditable, true
-  br label %return
-
-return:                                           ; preds = %if.end12, %if.then9, %if.then3, %entry
-  %retval.0 = phi i1 [ %tmp13, %if.then3 ], [ %tmp25, %if.then9 ], [ %lnot, %if.end12 ], [ true, %entry ]
-  ret i1 %retval.0
-}
-
-attributes #0 = { nounwind ssp }

Modified: llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 ; Test case related to <rdar://problem/15633429>.
 

Modified: llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 
 define i32 @foo(i32* %var, i1 %cond) {

Modified: llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-REG %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-REG
 
@@ -502,8 +500,6 @@ define i8 @test_atomic_load_min_i8(i8 %o
 ; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], sxtb
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
 
 ; CHECK-ARM64-NEXT: sxtb w[[OLD_EXT:[0-9]+]], w[[OLD]]
 ; CHECK-ARM64-NEXT: cmp w[[OLD_EXT]], w0, sxtb
@@ -528,8 +524,6 @@ define i16 @test_atomic_load_min_i16(i16
 ; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], sxth
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
 
 ; CHECK-ARM64-NEXT: sxth w[[OLD_EXT:[0-9]+]], w[[OLD]]
 ; CHECK-ARM64-NEXT: cmp w[[OLD_EXT]], w0, sxth
@@ -555,8 +549,6 @@ define i32 @test_atomic_load_min_i32(i32
 ; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, le
@@ -581,8 +573,6 @@ define i64 @test_atomic_load_min_i64(i64
 ; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
   ; x0 below is a reasonable guess but could change: it certainly comes into the
   ; function there.
-; CHECK-AARCH64-NEXT: cmp x0, x[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
 
 ; CHECK-ARM64-NEXT: cmp x[[OLD]], x0
 ; CHECK-ARM64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, le
@@ -607,8 +597,6 @@ define i8 @test_atomic_load_max_i8(i8 %o
 ; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], sxtb
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
 
 ; CHECK-ARM64-NEXT: sxtb w[[OLD_EXT:[0-9]+]], w[[OLD]]
 ; CHECK-ARM64-NEXT: cmp w[[OLD_EXT]], w0, sxtb
@@ -634,8 +622,6 @@ define i16 @test_atomic_load_max_i16(i16
 ; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], sxth
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
 
 ; CHECK-ARM64-NEXT: sxth w[[OLD_EXT:[0-9]+]], w[[OLD]]
 ; CHECK-ARM64-NEXT: cmp w[[OLD_EXT]], w0, sxth
@@ -661,8 +647,6 @@ define i32 @test_atomic_load_max_i32(i32
 ; CHECK: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
@@ -687,8 +671,6 @@ define i64 @test_atomic_load_max_i64(i64
 ; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
   ; x0 below is a reasonable guess but could change: it certainly comes into the
   ; function there.
-; CHECK-AARCH64-NEXT: cmp x0, x[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lt
 
 ; CHECK-ARM64-NEXT: cmp x[[OLD]], x0
 ; CHECK-ARM64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
@@ -713,8 +695,6 @@ define i8 @test_atomic_load_umin_i8(i8 %
 ; CHECK: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], uxtb
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0, uxtb
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, ls
@@ -739,8 +719,6 @@ define i16 @test_atomic_load_umin_i16(i1
 ; CHECK: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], uxth
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0, uxth
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, ls
@@ -765,8 +743,6 @@ define i32 @test_atomic_load_umin_i32(i3
 ; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, ls
@@ -791,8 +767,6 @@ define i64 @test_atomic_load_umin_i64(i6
 ; CHECK: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
   ; x0 below is a reasonable guess but could change: it certainly comes into the
   ; function there.
-; CHECK-AARCH64-NEXT: cmp x0, x[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi
 
 ; CHECK-ARM64-NEXT: cmp x[[OLD]], x0
 ; CHECK-ARM64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, ls
@@ -817,8 +791,6 @@ define i8 @test_atomic_load_umax_i8(i8 %
 ; CHECK: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], uxtb
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0, uxtb
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
@@ -843,8 +815,6 @@ define i16 @test_atomic_load_umax_i16(i1
 ; CHECK: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]], uxth
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0, uxth
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
@@ -869,8 +839,6 @@ define i32 @test_atomic_load_umax_i32(i3
 ; CHECK: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
   ; w0 below is a reasonable guess but could change: it certainly comes into the
   ;  function there.
-; CHECK-AARCH64-NEXT: cmp w0, w[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
 
 ; CHECK-ARM64-NEXT: cmp w[[OLD]], w0
 ; CHECK-ARM64-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
@@ -895,8 +863,6 @@ define i64 @test_atomic_load_umax_i64(i6
 ; CHECK: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
   ; x0 below is a reasonable guess but could change: it certainly comes into the
   ; function there.
-; CHECK-AARCH64-NEXT: cmp x0, x[[OLD]]
-; CHECK-AARCH64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lo
 
 ; CHECK-ARM64-NEXT: cmp x[[OLD]], x0
 ; CHECK-ARM64-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi

Modified: llvm/trunk/test/CodeGen/AArch64/basic-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/basic-pic.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/basic-pic.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/basic-pic.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
 ; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
 
 @var = global i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -disassemble - | FileCheck %s
 ; RUN: llc -mtriple=arm64-linux-gnu -filetype=obj -o - %s | llvm-objdump -disassemble - | FileCheck %s
 
 ; The encoding of lsb -> immr in the CGed bitfield instructions was wrong at one

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc -mtriple=arm64-none-linux-gnu < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 
 ; First, a simple example from Clang. The registers could plausibly be
@@ -64,8 +63,6 @@ define void @test_whole64(i64* %existing
 define void @test_whole32_from64(i64* %existing, i64* %new) {
 ; CHECK-LABEL: test_whole32_from64:
 
-; CHECK-AARCH64: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16
-; CHECK-AARCH64-NOT: and
 
 ; CHECK-ARM64: bfxil {{x[0-9]+}}, {{x[0-9]+}}, #0, #16
 
@@ -88,7 +85,6 @@ define void @test_32bit_masked(i32 *%exi
 
 ; CHECK-ARM64: and
 ; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
-; CHECK-AARCH64: and {{w[0-9]+}}, [[INSERT]], #0xff
 
   %oldval = load volatile i32* %existing
   %oldval_keep = and i32 %oldval, 135 ; = 0x87
@@ -107,7 +103,6 @@ define void @test_64bit_masked(i64 *%exi
 ; CHECK-LABEL: test_64bit_masked:
 ; CHECK-ARM64: and
 ; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
-; CHECK-AARCH64: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000
 
   %oldval = load volatile i64* %existing
   %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
@@ -128,7 +123,6 @@ define void @test_32bit_complexmask(i32
 
 ; CHECK-ARM64: and
 ; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
-; CHECK-AARCH64: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 
   %oldval = load volatile i32* %existing
   %oldval_keep = and i32 %oldval, 647 ; = 0x287

Modified: llvm/trunk/test/CodeGen/AArch64/bitfield.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 
 @var32 = global i32 0
@@ -24,7 +23,6 @@ define void @test_extendb(i8 %var) {
 
   %uxt64 = zext i8 %var to i64
   store volatile i64 %uxt64, i64* @var64
-; CHECK-AARCH64: uxtb {{x[0-9]+}}, {{w[0-9]+}}
 ; CHECK-ARM64: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
   ret void
 }
@@ -49,7 +47,6 @@ define void @test_extendh(i16 %var) {
 
   %uxt64 = zext i16 %var to i64
   store volatile i64 %uxt64, i64* @var64
-; CHECK-AARCH64: uxth {{x[0-9]+}}, {{w[0-9]+}}
 ; CHECK-ARM64: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
   ret void
 }
@@ -63,7 +60,6 @@ define void @test_extendw(i32 %var) {
 
   %uxt64 = zext i32 %var to i64
   store volatile i64 %uxt64, i64* @var64
-; CHECK-AARCH64: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #0, #32
 ; CHECK-ARM64: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #32
   ret void
 }

Modified: llvm/trunk/test/CodeGen/AArch64/blockaddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/blockaddress.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/blockaddress.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/blockaddress.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -code-model=large -mtriple=arm64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
 

Modified: llvm/trunk/test/CodeGen/AArch64/bool-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bool-loads.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bool-loads.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bool-loads.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
 
 @var = global i1 0

Modified: llvm/trunk/test/CodeGen/AArch64/breg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/breg.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/breg.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/breg.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @stored_label = global i8* null

Modified: llvm/trunk/test/CodeGen/AArch64/callee-save.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/callee-save.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/callee-save.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/callee-save.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK-ARM64
 
 @var = global float 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-linux-gnu -code-model=large -o - %s | FileCheck %s
 
 @var8 = global i8 0

Modified: llvm/trunk/test/CodeGen/AArch64/compare-branch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/compare-branch.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/compare-branch.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/compare-branch.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @var32 = global i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=-neon < %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -mattr=-neon < %s
 
 ; The DAG combiner decided to use a vector load/store for this struct copy

Removed: llvm/trunk/test/CodeGen/AArch64/concatvector-bugs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/concatvector-bugs.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/concatvector-bugs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/concatvector-bugs.ll (removed)
@@ -1,70 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon
-; Bug: i8 type in FRP8 register but not registering with register class causes segmentation fault.
-; Fix: Removed i8 type from FPR8 register class.
-
-; Not relevant to arm64.
-
-define void @test_concatvector_v8i8() {
-entry.split:
-  br i1 undef, label %if.then, label %if.end
-
-if.then:                                          ; preds = %entry.split
-  unreachable
-
-if.end:                                           ; preds = %entry.split
-  br i1 undef, label %if.then9, label %if.end18
-
-if.then9:                                         ; preds = %if.end
-  unreachable
-
-if.end18:                                         ; preds = %if.end
-  br label %for.body
-
-for.body:                                         ; preds = %for.inc, %if.end18
-  br i1 false, label %if.then30, label %for.inc
-
-if.then30:                                        ; preds = %for.body
-  unreachable
-
-for.inc:                                          ; preds = %for.body
-  br i1 undef, label %for.end, label %for.body
-
-for.end:                                          ; preds = %for.inc
-  br label %for.body77
-
-for.body77:                                       ; preds = %for.body77, %for.end
-  br i1 undef, label %for.end106, label %for.body77
-
-for.end106:                                       ; preds = %for.body77
-  br i1 undef, label %for.body130.us.us, label %stmt.for.body130.us.us
-
-stmt.for.body130.us.us:                     ; preds = %stmt.for.body130.us.us, %for.end106
-  %_p_splat.us = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <8 x i32> zeroinitializer
-  store <8 x i8> %_p_splat.us, <8 x i8>* undef, align 1
-  br label %stmt.for.body130.us.us
-
-for.body130.us.us:                                ; preds = %for.body130.us.us, %for.end106
-  br label %for.body130.us.us
-}
-
-declare <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32>, i32)
-
-define <8 x i16> @test_splat(i32 %l) nounwind {
-; CHECK-LABEL: test_splat:
-; CHECK: ret
-  %lhs = insertelement <1 x i32> undef, i32 %l, i32 0
-  %shift = tail call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %lhs, i32 11)
-  %vec = shufflevector <1 x i16> %shift, <1 x i16> undef, <8 x i32> zeroinitializer
-  ret <8 x i16> %vec
-}
-
-
-define <8 x i16> @test_notsplat(<8 x i16> %a, <8 x i16> %b, i32 %l) nounwind {
-; CHECK-LABEL: test_notsplat:
-; CHECK: ret
-entry:
-  %lhs = insertelement <1 x i32> undef, i32 %l, i32 0
-  %shift = tail call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %lhs, i32 11)
-  %vec = shufflevector <1 x i16> %shift, <1 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 0, i32 0, i32 0>
-  ret <8 x i16> %vec
-}

Modified: llvm/trunk/test/CodeGen/AArch64/cond-sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/cond-sel.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/cond-sel.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/cond-sel.ll Sat May 24 07:42:26 2014
@@ -1,6 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mcpu=cyclone | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 
 @var32 = global i32 0
@@ -47,7 +45,6 @@ define void @test_floatcsel(float %lhs32
 ; CHECK-NOFP-NOT: fcmp
   %val2 = select i1 %tst2, i64 9, i64 15
   store i64 %val2, i64* @var64
-; CHECK-AARCH64: movz x[[CONST15:[0-9]+]], #15
 ; CHECK-ARM64: orr w[[CONST15:[0-9]+]], wzr, #0xf
 ; CHECK: movz {{[wx]}}[[CONST9:[0-9]+]], #{{9|0x9}}
 ; CHECK: csel [[MAYBETRUE:x[0-9]+]], x[[CONST9]], x[[CONST15]], eq

Modified: llvm/trunk/test/CodeGen/AArch64/cpus.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/cpus.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/cpus.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/cpus.ll Sat May 24 07:42:26 2014
@@ -1,9 +1,5 @@
 ; This tests that llc accepts all valid AArch64 CPUs
 
-; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cortex-a53 2>&1 | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cortex-a57 2>&1 | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=invalidcpu 2>&1 | FileCheck %s --check-prefix=INVALID
 
 ; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=generic 2>&1 | FileCheck %s
 ; RUN: llc < %s -mtriple=arm64-unknown-unknown -mcpu=cortex-a53 2>&1 | FileCheck %s

Modified: llvm/trunk/test/CodeGen/AArch64/directcond.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/directcond.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/directcond.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/directcond.ll Sat May 24 07:42:26 2014
@@ -1,13 +1,9 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 
 define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
 ; CHECK-LABEL: test_select_i32:
   %val = select i1 %bit, i32 %a, i32 %b
-; CHECK-AARCH64: movz [[ONE:w[0-9]+]], #1
-; CHECK-AARCH64: tst w0, [[ONE]]
 ; CHECK-ARM64: tst w0, #0x1
 ; CHECK-NEXT: csel w0, w1, w2, ne
 
@@ -17,8 +13,6 @@ define i32 @test_select_i32(i1 %bit, i32
 define i64 @test_select_i64(i1 %bit, i64 %a, i64 %b) {
 ; CHECK-LABEL: test_select_i64:
   %val = select i1 %bit, i64 %a, i64 %b
-; CHECK-AARCH64: movz [[ONE:w[0-9]+]], #1
-; CHECK-AARCH64: tst w0, [[ONE]]
 ; CHECK-ARM64: tst w0, #0x1
 ; CHECK-NEXT: csel x0, x1, x2, ne
 
@@ -28,8 +22,6 @@ define i64 @test_select_i64(i1 %bit, i64
 define float @test_select_float(i1 %bit, float %a, float %b) {
 ; CHECK-LABEL: test_select_float:
   %val = select i1 %bit, float %a, float %b
-; CHECK-AARCH64: movz [[ONE:w[0-9]+]], #1
-; CHECK-AARCH64: tst w0, [[ONE]]
 ; CHECK-ARM64: tst w0, #0x1
 ; CHECK-NEXT: fcsel s0, s0, s1, ne
 ; CHECK-NOFP-NOT: fcsel
@@ -39,8 +31,6 @@ define float @test_select_float(i1 %bit,
 define double @test_select_double(i1 %bit, double %a, double %b) {
 ; CHECK-LABEL: test_select_double:
   %val = select i1 %bit, double %a, double %b
-; CHECK-AARCH64: movz [[ONE:w[0-9]+]], #1
-; CHECK-AARCH64: tst w0, [[ONE]]
 ; CHECK-ARM64: tst w0, #0x1
 ; CHECK-NEXT: fcsel d0, d0, d1, ne
 ; CHECK-NOFP-NOT: fcsel

Modified: llvm/trunk/test/CodeGen/AArch64/dp-3source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dp-3source.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dp-3source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dp-3source.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) {

Modified: llvm/trunk/test/CodeGen/AArch64/dp1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dp1.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dp1.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dp1.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @var32 = global i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/dp2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/dp2.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/dp2.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/dp2.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64 | FileCheck %s
 
 @var32_0 = global i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll Sat May 24 07:42:26 2014
@@ -1,11 +1,7 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK-AARCH64
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-apple-ios7.0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-ARM64
 
 ; Check  trunc i64 operation is translated as a subregister access
 ; eliminating an i32 induction varible.
-; CHECK-AARCH64: add {{x[0-9]+}}, {{x[0-9]+}}, #1
-; CHECK-AARCH64-NOT: add {{w[0-9]+}}, {{w[0-9]+}}, #1
-; CHECK-AARCH64-NEXT: cmp {{w[0-9]+}}, {{w[0-9]+}}, uxtw
 
 ; CHECK-ARM64-NOT: add {{x[0-9]+}}, {{x[0-9]+}}, #1
 ; CHECK-ARM64: add {{w[0-9]+}}, {{w[0-9]+}}, #1

Modified: llvm/trunk/test/CodeGen/AArch64/extern-weak.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/extern-weak.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/extern-weak.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/extern-weak.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -o - < %s | FileCheck %s --check-prefix=CHECK-AARCH64
-; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -o - %s | FileCheck %s --check-prefix=CHECK-ARM64
 ; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large -o - %s | FileCheck --check-prefix=CHECK-LARGE %s
 
@@ -9,10 +7,7 @@ define i32()* @foo() {
 ; The usual ADRP/ADD pair can't be used for a weak reference because it must
 ; evaluate to 0 if the symbol is undefined. We use a litpool entry.
   ret i32()* @var
-; CHECK-AARCH64: .LCPI0_0:
-; CHECK-AARCH64-NEXT: .xword var
 
-; CHECK-AARCH64: ldr x0, [{{x[0-9]+}}, #:lo12:.LCPI0_0]
 
 ; CHECK-ARM64: adrp x[[ADDRHI:[0-9]+]], :got:var
 ; CHECK-ARM64: ldr x0, [x[[ADDRHI]], :got_lo12:var]
@@ -30,11 +25,7 @@ define i32()* @foo() {
 
 define i32* @bar() {
   %addr = getelementptr [10 x i32]* @arr_var, i32 0, i32 5
-; CHECK-AARCH64: .LCPI1_0:
-; CHECK-AARCH64-NEXT: .xword arr_var
 
-; CHECK-AARCH64: ldr [[BASE:x[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI1_0]
-; CHECK-AARCH64: add x0, [[BASE]], #20
 
 ; CHECK-ARM64: adrp x[[ADDRHI:[0-9]+]], :got:arr_var
 ; CHECK-ARM64: ldr [[BASE:x[0-9]+]], [x[[ADDRHI]], :got_lo12:arr_var]
@@ -54,8 +45,6 @@ define i32* @bar() {
 
 define i32* @wibble() {
   ret i32* @defined_weak_var
-; CHECK-AARCH64: adrp [[BASE:x[0-9]+]], defined_weak_var
-; CHECK-AARCH64: add x0, [[BASE]], #:lo12:defined_weak_var
 
 ; CHECK-ARM64: adrp [[BASE:x[0-9]+]], defined_weak_var
 ; CHECK-ARM64: add x0, [[BASE]], :lo12:defined_weak_var

Modified: llvm/trunk/test/CodeGen/AArch64/extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/extract.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/extract.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/extract.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 define i64 @ror_i64(i64 %in) {

Modified: llvm/trunk/test/CodeGen/AArch64/fastcc-reserved.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fastcc-reserved.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fastcc-reserved.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fastcc-reserved.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -tailcallopt | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -tailcallopt | FileCheck %s --check-prefix=CHECK-ARM64
 
 ; This test is designed to be run in the situation where the

Modified: llvm/trunk/test/CodeGen/AArch64/fastcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fastcc.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fastcc.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fastcc.ll Sat May 24 07:42:26 2014
@@ -1,6 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -tailcallopt | FileCheck %s -check-prefix CHECK-TAIL
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -tailcallopt | FileCheck %s -check-prefix CHECK-ARM64-TAIL
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu | FileCheck --check-prefix=CHECK-ARM64 %s
 
 ; Without tailcallopt fastcc still means the caller cleans up the

Modified: llvm/trunk/test/CodeGen/AArch64/fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fcmp.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fcmp.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck %s
 
 declare void @bar(i32)

Modified: llvm/trunk/test/CodeGen/AArch64/fcvt-fixed.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fcvt-fixed.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fcvt-fixed.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fcvt-fixed.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -O0 | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -O0
 

Modified: llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fcvt-int.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 define i32 @test_floattoi32(float %in) {

Modified: llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/flags-multiuse.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 
 ; LLVM should be able to cope with multiple uses of the same flag-setting

Modified: llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 @varhalf = global half 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu -mcpu=cyclone | FileCheck %s
 
 @varfloat = global float 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp-cond-sel.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu -mcpu=cyclone | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 
 @varfloat = global float 0.0
@@ -13,7 +12,6 @@ define void @test_csel(i32 %lhs32, i32 %
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float 0.0, float 1.0
   store float %val1, float* @varfloat
-; CHECK-AARCH64: ldr s[[FLT0:[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
 ; CHECK-ARM64: movi v[[FLT0:[0-9]+]].2d, #0
 ; CHECK: fmov s[[FLT1:[0-9]+]], #1.0
 ; CHECK: fcsel {{s[0-9]+}}, s[[FLT0]], s[[FLT1]], hi
@@ -22,7 +20,6 @@ define void @test_csel(i32 %lhs32, i32 %
   %tst2 = icmp sle i64 %lhs64, %rhs64
   %val2 = select i1 %tst2, double 1.0, double 0.0
   store double %val2, double* @vardouble
-; CHECK-AARCH64: ldr d[[FLT0:[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
 ; FLT0 is reused from above on ARM64.
 ; CHECK: fmov d[[FLT1:[0-9]+]], #1.0
 ; CHECK: fcsel {{d[0-9]+}}, d[[FLT1]], d[[FLT0]], le

Modified: llvm/trunk/test/CodeGen/AArch64/fp-dp3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp-dp3.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp-dp3.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp-dp3.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -fp-contract=fast | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s -check-prefix=CHECK-NOFAST
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu -fp-contract=fast | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s -check-prefix=CHECK-NOFAST
 

Modified: llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 declare void @bar(i8*, i8*, i32*)
 

Removed: llvm/trunk/test/CodeGen/AArch64/fp128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp128.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp128.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp128.ll (removed)
@@ -1,282 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
-; arm64 has a separate copy of this test.
- at lhs = global fp128 zeroinitializer
- at rhs = global fp128 zeroinitializer
-
-define fp128 @test_add() {
-; CHECK-LABEL: test_add:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-  %val = fadd fp128 %lhs, %rhs
-; CHECK: bl __addtf3
-  ret fp128 %val
-}
-
-define fp128 @test_sub() {
-; CHECK-LABEL: test_sub:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-  %val = fsub fp128 %lhs, %rhs
-; CHECK: bl __subtf3
-  ret fp128 %val
-}
-
-define fp128 @test_mul() {
-; CHECK-LABEL: test_mul:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-  %val = fmul fp128 %lhs, %rhs
-; CHECK: bl __multf3
-  ret fp128 %val
-}
-
-define fp128 @test_div() {
-; CHECK-LABEL: test_div:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-  %val = fdiv fp128 %lhs, %rhs
-; CHECK: bl __divtf3
-  ret fp128 %val
-}
-
- at var32 = global i32 0
- at var64 = global i64 0
-
-define void @test_fptosi() {
-; CHECK-LABEL: test_fptosi:
-  %val = load fp128* @lhs
-
-  %val32 = fptosi fp128 %val to i32
-  store i32 %val32, i32* @var32
-; CHECK: bl __fixtfsi
-
-  %val64 = fptosi fp128 %val to i64
-  store i64 %val64, i64* @var64
-; CHECK: bl __fixtfdi
-
-  ret void
-}
-
-define void @test_fptoui() {
-; CHECK-LABEL: test_fptoui:
-  %val = load fp128* @lhs
-
-  %val32 = fptoui fp128 %val to i32
-  store i32 %val32, i32* @var32
-; CHECK: bl __fixunstfsi
-
-  %val64 = fptoui fp128 %val to i64
-  store i64 %val64, i64* @var64
-; CHECK: bl __fixunstfdi
-
-  ret void
-}
-
-define void @test_sitofp() {
-; CHECK-LABEL: test_sitofp:
-
-  %src32 = load i32* @var32
-  %val32 = sitofp i32 %src32 to fp128
-  store volatile fp128 %val32, fp128* @lhs
-; CHECK: bl __floatsitf
-
-  %src64 = load i64* @var64
-  %val64 = sitofp i64 %src64 to fp128
-  store volatile fp128 %val64, fp128* @lhs
-; CHECK: bl __floatditf
-
-  ret void
-}
-
-define void @test_uitofp() {
-; CHECK-LABEL: test_uitofp:
-
-  %src32 = load i32* @var32
-  %val32 = uitofp i32 %src32 to fp128
-  store volatile fp128 %val32, fp128* @lhs
-; CHECK: bl __floatunsitf
-
-  %src64 = load i64* @var64
-  %val64 = uitofp i64 %src64 to fp128
-  store volatile fp128 %val64, fp128* @lhs
-; CHECK: bl __floatunditf
-
-  ret void
-}
-
-define i1 @test_setcc1() {
-; CHECK-LABEL: test_setcc1:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-; Technically, everything after the call to __letf2 is redundant, but we'll let
-; LLVM have its fun for now.
-  %val = fcmp ole fp128 %lhs, %rhs
-; CHECK: bl __letf2
-; CHECK: cmp w0, #0
-; CHECK: cset w0, le
-
-  ret i1 %val
-; CHECK: ret
-}
-
-define i1 @test_setcc2() {
-; CHECK-LABEL: test_setcc2:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-; Technically, everything after the call to __letf2 is redundant, but we'll let
-; LLVM have its fun for now.
-  %val = fcmp ugt fp128 %lhs, %rhs
-; CHECK: bl      __gttf2
-; CHECK: cmp w0, #0
-; CHECK: cset   [[GT:w[0-9]+]], gt
-
-; CHECK: bl      __unordtf2
-; CHECK: cmp w0, #0
-; CHECK: cset   [[UNORDERED:w[0-9]+]], ne
-
-; CHECK: orr     w0, [[UNORDERED]], [[GT]]
-
-  ret i1 %val
-; CHECK: ret
-}
-
-define i32 @test_br_cc() {
-; CHECK-LABEL: test_br_cc:
-
-  %lhs = load fp128* @lhs
-  %rhs = load fp128* @rhs
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-; CHECK: ldr q1, [{{x[0-9]+}}, {{#?}}:lo12:rhs]
-
-  ; olt == !uge, which LLVM unfortunately "optimizes" this to.
-  %cond = fcmp olt fp128 %lhs, %rhs
-; CHECK: bl      __getf2
-; CHECK: cmp w0, #0
-; CHECK: cset   [[OGE:w[0-9]+]], ge
-
-; CHECK: bl      __unordtf2
-; CHECK: cmp w0, #0
-; CHECK: cset   [[UNORDERED:w[0-9]+]], ne
-
-; CHECK: orr     [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
-; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
-  br i1 %cond, label %iftrue, label %iffalse
-
-iftrue:
-  ret i32 42
-; CHECK-NEXT: BB#
-; CHECK-NEXT: movz {{x0|w0}}, #42
-; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
-
-iffalse:
-  ret i32 29
-; CHECK: [[RET29]]:
-; CHECK-NEXT: movz {{x0|w0}}, #29
-; CHECK-NEXT: [[REALRET]]:
-; CHECK: ret
-}
-
-define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
-; CHECK-LABEL: test_select:
-
-  %val = select i1 %cond, fp128 %lhs, fp128 %rhs
-  store fp128 %val, fp128* @lhs
-; CHECK-AARCH64: cmp {{w[0-9]+}}, #0
-; CHECK-AARCH64: str q1, [sp]
-; CHECK-ARM64: tst {{w[0-9]+}}, #0x1
-; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: BB#
-; CHECK-AARCH64-NEXT: str q0, [sp]
-; CHECK-ARM64-NEXT: orr v[[DEST:[0-9]+]].16b, v0.16b, v0.16b
-; CHECK-NEXT: [[IFFALSE]]:
-; CHECK-AARCH64-NEXT: ldr q[[DEST:[0-9]+]], [sp]
-; CHECK: str q[[DEST]], [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-  ret void
-; CHECK: ret
-}
-
- at varfloat = global float 0.0
- at vardouble = global double 0.0
-
-define void @test_round() {
-; CHECK-LABEL: test_round:
-
-  %val = load fp128* @lhs
-
-  %float = fptrunc fp128 %val to float
-  store float %float, float* @varfloat
-; CHECK: bl __trunctfsf2
-; CHECK: str s0, [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
-
-  %double = fptrunc fp128 %val to double
-  store double %double, double* @vardouble
-; CHECK: bl __trunctfdf2
-; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble]
-
-  ret void
-}
-
-define void @test_extend() {
-; CHECK-LABEL: test_extend:
-
-  %val = load fp128* @lhs
-
-  %float = load float* @varfloat
-  %fromfloat = fpext float %float to fp128
-  store volatile fp128 %fromfloat, fp128* @lhs
-; CHECK: bl __extendsftf2
-; CHECK: str q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-
-  %double = load double* @vardouble
-  %fromdouble = fpext double %double to fp128
-  store volatile fp128 %fromdouble, fp128* @lhs
-; CHECK: bl __extenddftf2
-; CHECK: str q0, [{{x[0-9]+}}, {{#?}}:lo12:lhs]
-
-  ret void
-; CHECK: ret
-}
-
-define fp128 @test_neg(fp128 %in) {
-; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
-; Make sure the weird hex constant below *is* -0.0
-; CHECK-NEXT: fp128 -0
-
-; CHECK-LABEL: test_neg:
-
-  ; Could in principle be optimized to fneg which we can't select, this makes
-  ; sure that doesn't happen.
-  %ret = fsub fp128 0xL00000000000000008000000000000000, %in
-; CHECK-AARCH64: str q0, [sp, #-16]
-; CHECK-AARCH64-NEXT: ldr q1, [sp], #16
-; CHECK-ARM64: orr v1.16b, v0.16b, v0.16b
-; CHECK: ldr q0, [{{x[0-9]+}}, {{#?}}:lo12:[[MINUS0]]]
-; CHECK: bl __subtf3
-
-  ret fp128 %ret
-; CHECK: ret
-}

Modified: llvm/trunk/test/CodeGen/AArch64/fpimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fpimm.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fpimm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fpimm.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-linux-gnu | FileCheck %s
 
 @varf32 = global float 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/frameaddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/frameaddr.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/frameaddr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/frameaddr.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu  | FileCheck %s
 ; RUN: llc -o - %s -mtriple=arm64-apple-ios7.0  | FileCheck %s
 
 define i8* @t() nounwind {

Modified: llvm/trunk/test/CodeGen/AArch64/free-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/free-zext.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/free-zext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/free-zext.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
 define i64 @test_free_zext(i8* %a, i16* %b) {

Modified: llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll Sat May 24 07:42:26 2014
@@ -1,7 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-AARCH64 --check-prefix=CHECK-LE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE-AARCH64 --check-prefix=CHECK-BE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARM64 %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@@ -67,8 +63,6 @@ define void @check_byval_align(i32* byva
 
     %val0 = load volatile i32* %addr0
     ; Some weird move means x0 is used for one access
-; CHECK-AARCH64: add x[[STRUCTVAL_ADDR:[0-9]+]], sp, #16
-; CHECK-AARCH64: ldr [[REG32:w[0-9]+]], [x[[STRUCTVAL_ADDR]], #12]
 ; CHECK-ARM64: ldr [[REG32:w[0-9]+]], [sp, #28]
     store i32 %val0, i32* @var32
 ; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
@@ -166,9 +160,7 @@ define void @stacked_fpu(float %var0, do
     ; Beware as above: the offset would be different on big-endian
     ; machines if the first ldr were changed to use s-registers.
 ; CHECK-ARM64: ldr {{[ds]}}[[VALFLOAT:[0-9]+]], [sp]
-; CHECK-AARCH64: ldr {{[ds]}}[[VALFLOAT:[0-9]+]], [sp]
 ; CHECK-ARM64: str s[[VALFLOAT]], [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
-; CHECK-AARCH64: str s[[VALFLOAT]], [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
 
     ret void
 }
@@ -196,7 +188,6 @@ define void @check_i128_stackalign(i32 %
 ; CHECK-BE-AARCH64: ldr {{x[0-9]+}}, [sp, #24]
 
     ; Important point is that we address sp+24 for second dword
-; CHECK-AARCH64: ldr     {{x[0-9]+}}, [sp, #16]
 
 ; CHECK-ARM64: ldp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
     ret void

Modified: llvm/trunk/test/CodeGen/AArch64/func-calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-calls.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-calls.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-calls.ll Sat May 24 07:42:26 2014
@@ -1,7 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK --check-prefix=CHECK-BE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-BE --check-prefix=CHECK-NOFP %s
 
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mattr=-neon | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARM64-NONEON %s
@@ -93,10 +89,6 @@ define void @check_stack_args() {
   ; Want to check that the final double is passed in registers and
   ; that varstruct is passed on the stack. Rather dependent on how a
   ; memcpy gets created, but the following works for now.
-; CHECK-AARCH64: mov x[[SPREG:[0-9]+]], sp
-; CHECK-AARCH64-DAG: str {{w[0-9]+}}, [x[[SPREG]]]
-; CHECK-AARCH64-DAG: str {{w[0-9]+}}, [x[[SPREG]], #12]
-; CHECK-AARCH64-DAG: fmov d0,
 
 ; CHECK-ARM64-DAG: str {{q[0-9]+}}, [sp]
 ; CHECK-ARM64-DAG: fmov d[[FINAL_DOUBLE:[0-9]+]], #1.0
@@ -112,9 +104,6 @@ define void @check_stack_args() {
   call void @stacked_fpu(float -1.0, double 1.0, float 4.0, float 2.0,
                          float -2.0, float -8.0, float 16.0, float 1.0,
                          float 64.0)
-; CHECK-AARCH64: ldr s[[STACKEDREG:[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:.LCPI
-; CHECK-AARCH64: mov x0, sp
-; CHECK-AARCH64: str d[[STACKEDREG]], [x0]
 
 ; CHECK-ARM64:  movz [[SIXTY_FOUR:w[0-9]+]], #0x4280, lsl #16
 ; CHECK-ARM64: str [[SIXTY_FOUR]], [sp]
@@ -142,9 +131,6 @@ define void @check_i128_align() {
                                    i32 42, i128 %val)
 ; CHECK: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:var128]
 ; CHECK: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, #8]
-; CHECK-AARCH64: mov x[[SPREG:[0-9]+]], sp
-; CHECK-AARCH64: str [[I128HI]], [x[[SPREG]], #24]
-; CHECK-AARCH64: str [[I128LO]], [x[[SPREG]], #16]
 ; CHECK-ARM64: stp [[I128LO]], [[I128HI]], [sp, #16]
 ; CHECK-ARM64-NONEON: stp [[I128LO]], [[I128HI]], [sp, #16]
 ; CHECK: bl check_i128_stackalign

Modified: llvm/trunk/test/CodeGen/AArch64/global-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/global-alignment.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/global-alignment.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/global-alignment.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 
 @var32 = global [3 x i32] zeroinitializer

Removed: llvm/trunk/test/CodeGen/AArch64/global_merge_1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/global_merge_1.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/global_merge_1.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/global_merge_1.ll (removed)
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-
- at m = internal global i32 0, align 4
- at n = internal global i32 0, align 4
-
-define void @f1(i32 %a1, i32 %a2) {
-; CHECK-LABEL: f1:
-; CHECK: adrp x{{[0-9]+}}, _MergedGlobals
-; CHECK-NOT: adrp
-  store i32 %a1, i32* @m, align 4
-  store i32 %a2, i32* @n, align 4
-  ret void
-}
-
-; CHECK:        .local _MergedGlobals
-; CHECK:        .comm  _MergedGlobals,8,8
-

Modified: llvm/trunk/test/CodeGen/AArch64/got-abuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/got-abuse.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/got-abuse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/got-abuse.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj < %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -filetype=obj -o - %s
 

Modified: llvm/trunk/test/CodeGen/AArch64/i128-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/i128-align.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/i128-align.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/i128-align.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-apple-ios7.0 -verify-machineinstrs -o - %s | FileCheck %s
 
 %struct = type { i32, i128, i8 }

Removed: llvm/trunk/test/CodeGen/AArch64/i128-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/i128-shift.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/i128-shift.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/i128-shift.ll (removed)
@@ -1,44 +0,0 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; arm64 has its own version of this in long-shift.ll. We'll just use that.
-
-define i128 @test_i128_lsl(i128 %a, i32 %shift) {
-; CHECK-LABEL: test_i128_lsl:
-
-  %sh_prom = zext i32 %shift to i128
-  %shl = shl i128 %a, %sh_prom
-
-; CHECK: movz [[SIXTYFOUR:x[0-9]+]], #64
-; CHECK-NEXT: sub [[REVSHAMT:x[0-9]+]], [[SIXTYFOUR]], [[SHAMT_32:w[0-9]+]], uxtw
-; CHECK-NEXT: lsr [[TMP1:x[0-9]+]], [[LO:x[0-9]+]], [[REVSHAMT]]
-; CHECK: lsl [[TMP2:x[0-9]+]], [[HI:x[0-9]+]], [[SHAMT:x[0-9]+]]
-; CHECK-NEXT: orr [[FALSEVAL:x[0-9]+]], [[TMP1]], [[TMP2]]
-; CHECK-NEXT: sub [[EXTRASHAMT:x[0-9]+]], [[SHAMT]], #64
-; CHECK-NEXT: lsl [[TMP3:x[0-9]+]], [[LO]], [[EXTRASHAMT]]
-; CHECK-NEXT: cmp [[EXTRASHAMT]], #0
-; CHECK-NEXT: csel [[RESULTHI:x[0-9]+]], [[TMP3]], [[FALSEVAL]], ge
-; CHECK-NEXT: lsl [[TMP4:x[0-9]+]], [[LO]], [[SHAMT]]
-; CHECK-NEXT: csel [[RESULTLO:x[0-9]+]], xzr, [[TMP4]], ge
-
-  ret i128 %shl
-}
-
-define i128 @test_i128_shr(i128 %a, i32 %shift) {
-; CHECK-LABEL: test_i128_shr:
-
-  %sh_prom = zext i32 %shift to i128
-  %shr = lshr i128 %a, %sh_prom
-
-; CHECK: movz [[SIXTYFOUR]], #64
-; CHECK-NEXT: sub [[REVSHAMT:x[0-9]+]], [[SIXTYFOUR]], [[SHAMT_32:w[0-9]+]], uxtw
-; CHECK-NEXT: lsl [[TMP2:x[0-9]+]], [[HI:x[0-9]+]], [[REVSHAMT]]
-; CHECK: lsr [[TMP1:x[0-9]+]], [[LO:x[0-9]+]], [[SHAMT:x[0-9]+]]
-; CHECK-NEXT: orr [[FALSEVAL:x[0-9]+]], [[TMP1]], [[TMP2]]
-; CHECK-NEXT: sub [[EXTRASHAMT:x[0-9]+]], [[SHAMT]], #64
-; CHECK-NEXT: lsr [[TRUEVAL:x[0-9]+]], [[HI]], [[EXTRASHAMT]]
-; CHECK-NEXT: cmp [[EXTRASHAMT]], #0
-; CHECK-NEXT: csel [[RESULTLO:x[0-9]+]], [[TRUEVAL]], [[FALSEVAL]], ge
-; CHECK-NEXT: lsr [[TMP3:x[0-9]+]], [[HI]], [[SHAMT]]
-; CHECK-NEXT: csel [[RESULTHI:x[0-9]+]], xzr, [[TMP3]], ge
-
-  ret i128 %shr
-}

Modified: llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/illegal-float-ops.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
 
 @varfloat = global float 0.0

Modified: llvm/trunk/test/CodeGen/AArch64/init-array.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/init-array.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/init-array.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/init-array.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -use-init-array < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-none-eabi -verify-machineinstrs -use-init-array < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -use-init-array -o - %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-none-eabi -verify-machineinstrs -use-init-array -o - %s | FileCheck %s
 

Modified: llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badI.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badI.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badI.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
 ; RUN: not llc -mtriple=arm64-none-linux-gnu -o - %s
 
 define void @foo() {

Modified: llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
 ; RUN: not llc -mtriple=arm64-apple-ios7.0 -o - %s
 
 define void @foo() {

Modified: llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
 ; RUN: not llc -mtriple=arm64-none-linux-gnu -o - %s
 
 define void @foo() {

Modified: llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badL.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badL.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints-badL.ll Sat May 24 07:42:26 2014
@@ -1,4 +1,3 @@
-; RUN: not llc -mtriple=aarch64-none-linux-gnu < %s
 ; RUN: not llc -mtriple=arm64-apple-ios7.0 -o - %s
 
 define void @foo() {

Removed: llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/inline-asm-constraints.ll (removed)
@@ -1,137 +0,0 @@
-;RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -no-integrated-as < %s | FileCheck %s
-
-define i64 @test_inline_constraint_r(i64 %base, i32 %offset) {
-; CHECK-LABEL: test_inline_constraint_r:
-  %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 %base, i32 %offset)
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
-  ret i64 %val
-}
-
-define i16 @test_small_reg(i16 %lhs, i16 %rhs) {
-; CHECK-LABEL: test_small_reg:
-  %val = call i16 asm sideeffect "add $0, $1, $2, sxth", "=r,r,r"(i16 %lhs, i16 %rhs)
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
-  ret i16 %val
-}
-
-define i64 @test_inline_constraint_r_imm(i64 %base, i32 %offset) {
-; CHECK-LABEL: test_inline_constraint_r_imm:
-  %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 4, i32 12)
-; CHECK: movz [[FOUR:x[0-9]+]], #4
-; CHECK: movz [[TWELVE:w[0-9]+]], #12
-; CHECK: add {{x[0-9]+}}, [[FOUR]], [[TWELVE]], sxtw
-  ret i64 %val
-}
-
-; m is permitted to have a base/offset form. We don't do that
-; currently though.
-define i32 @test_inline_constraint_m(i32 *%ptr) {
-; CHECK-LABEL: test_inline_constraint_m:
-  %val = call i32 asm "ldr $0, $1", "=r,m"(i32 *%ptr)
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
-  ret i32 %val
-}
-
- at arr = global [8 x i32] zeroinitializer
-
-; Q should *never* have base/offset form even if given the chance.
-define i32 @test_inline_constraint_Q(i32 *%ptr) {
-; CHECK-LABEL: test_inline_constraint_Q:
-  %val = call i32 asm "ldr $0, $1", "=r,Q"(i32* getelementptr([8 x i32]* @arr, i32 0, i32 1))
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
-  ret i32 %val
-}
-
- at dump = global fp128 zeroinitializer
-
-define void @test_inline_constraint_w(<8 x i8> %vec64, <4 x float> %vec128, half %hlf, float %flt, double %dbl, fp128 %quad) {
-; CHECK: test_inline_constraint_w:
-  call <8 x i8> asm sideeffect "add $0.8b, $1.8b, $1.8b", "=w,w"(<8 x i8> %vec64)
-  call <8 x i8> asm sideeffect "fadd $0.4s, $1.4s, $1.4s", "=w,w"(<4 x float> %vec128)
-; CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-; CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-
-  ; Arguably semantically dodgy to output "vN", but it's what GCC does
-  ; so purely for compatibility we want vector registers to be output.
-  call float asm sideeffect "fcvt ${0:s}, ${1:h}", "=w,w"(half undef)
-  call float asm sideeffect "fadd $0.2s, $0.2s, $0.2s", "=w,w"(float %flt)
-  call double asm sideeffect "fadd $0.2d, $0.2d, $0.2d", "=w,w"(double %dbl)
-  call fp128 asm sideeffect "fadd $0.2d, $0.2d, $0.2d", "=w,w"(fp128 %quad)
-; CHECK: fcvt {{s[0-9]+}}, {{h[0-9]+}}
-; CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-; CHECK: fadd {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-; CHECK: fadd {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-  ret void
-}
-
-define void @test_inline_constraint_I() {
-; CHECK-LABEL: test_inline_constraint_I:
-  call void asm sideeffect "add x0, x0, $0", "I"(i32 0)
-  call void asm sideeffect "add x0, x0, $0", "I"(i64 4095)
-; CHECK: add x0, x0, #0
-; CHECK: add x0, x0, #4095
-
-  ret void
-}
-
-; Skip J because it's useless
-
-define void @test_inline_constraint_K() {
-; CHECK-LABEL: test_inline_constraint_K:
-  call void asm sideeffect "and w0, w0, $0", "K"(i32 2863311530) ; = 0xaaaaaaaa
-  call void asm sideeffect "and w0, w0, $0", "K"(i32 65535)
-; CHECK: and w0, w0, #-1431655766
-; CHECK: and w0, w0, #65535
-
-  ret void
-}
-
-define void @test_inline_constraint_L() {
-; CHECK-LABEL: test_inline_constraint_L:
-  call void asm sideeffect "and x0, x0, $0", "L"(i64 4294967296) ; = 0xaaaaaaaa
-  call void asm sideeffect "and x0, x0, $0", "L"(i64 65535)
-; CHECK: and x0, x0, #4294967296
-; CHECK: and x0, x0, #65535
-
-  ret void
-}
-
-; Skip M and N because we don't support MOV pseudo-instructions yet.
-
- at var = global i32 0
-
-define void @test_inline_constraint_S() {
-; CHECK-LABEL: test_inline_constraint_S:
-  call void asm sideeffect "adrp x0, $0", "S"(i32* @var)
-  call void asm sideeffect "adrp x0, ${0:A}", "S"(i32* @var)
-  call void asm sideeffect "add x0, x0, ${0:L}", "S"(i32* @var)
-; CHECK: adrp x0, var
-; CHECK: adrp x0, var
-; CHECK: add x0, x0, #:lo12:var
-  ret void
-}
-
-define i32 @test_inline_constraint_S_label(i1 %in) {
-; CHECK-LABEL: test_inline_constraint_S_label:
-  call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc))
-; CHECK: adr x0, .Ltmp{{[0-9]+}}
-  br i1 %in, label %loc, label %loc2
-loc:
-  ret i32 0
-loc2:
-  ret i32 42
-}
-
-define void @test_inline_constraint_Y() {
-; CHECK-LABEL: test_inline_constraint_Y:
-  call void asm sideeffect "fcmp s0, $0", "Y"(float 0.0)
-; CHECK: fcmp s0, #0.0
-  ret void
-}
-
-define void @test_inline_constraint_Z() {
-; CHECK-LABEL: test_inline_constraint_Z:
-  call void asm sideeffect "cmp w0, $0", "Z"(i32 0)
-; CHECK: cmp w0, #0
-  ret void
-}

Removed: llvm/trunk/test/CodeGen/AArch64/inline-asm-modifiers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/inline-asm-modifiers.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/inline-asm-modifiers.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/inline-asm-modifiers.ll (removed)
@@ -1,147 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -no-integrated-as < %s | FileCheck %s
-
- at var_simple = hidden global i32 0
- at var_got = global i32 0
- at var_tlsgd = thread_local global i32 0
- at var_tlsld = thread_local(localdynamic) global i32 0
- at var_tlsie = thread_local(initialexec) global i32 0
- at var_tlsle = thread_local(localexec) global i32 0
-
-define void @test_inline_modifier_L() nounwind {
-; CHECK-LABEL: test_inline_modifier_L:
-  call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_simple)
-  call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_got)
-  call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsgd)
-  call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsld)
-  call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_tlsie)
-  call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsle)
-; CHECK: add x0, x0, #:lo12:var_simple
-; CHECK: ldr x0, [x0, #:got_lo12:var_got]
-; CHECK: add x0, x0, #:tlsdesc_lo12:var_tlsgd
-; CHECK: add x0, x0, #:dtprel_lo12:var_tlsld
-; CHECK: ldr x0, [x0, #:gottprel_lo12:var_tlsie]
-; CHECK: add x0, x0, #:tprel_lo12:var_tlsle
-
-  call void asm sideeffect "add x0, x0, ${0:L}", "Si,~{x0}"(i32 64)
-  call void asm sideeffect "ldr x0, [x0, ${0:L}]", "Si,~{x0}"(i32 64)
-; CHECK: add x0, x0, #64
-; CHECK: ldr x0, [x0, #64]
-
-  ret void
-}
-
-define void @test_inline_modifier_G() nounwind {
-; CHECK-LABEL: test_inline_modifier_G:
-  call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsld)
-  call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsle)
-; CHECK: add x0, x0, #:dtprel_hi12:var_tlsld, lsl #12
-; CHECK: add x0, x0, #:tprel_hi12:var_tlsle, lsl #12
-
-  call void asm sideeffect "add x0, x0, ${0:G}", "Si,~{x0}"(i32 42)
-; CHECK: add x0, x0, #42
-  ret void
-}
-
-define void @test_inline_modifier_A() nounwind {
-; CHECK-LABEL: test_inline_modifier_A:
-  call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_simple)
-  call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_got)
-  call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsgd)
-  call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsie)
-  ; N.b. All tprel and dtprel relocs are modified: lo12 or granules.
-; CHECK: adrp x0, var_simple
-; CHECK: adrp x0, :got:var_got
-; CHECK: adrp x0, :tlsdesc:var_tlsgd
-; CHECK: adrp x0, :gottprel:var_tlsie
-
-  call void asm sideeffect "adrp x0, ${0:A}", "Si,~{x0}"(i32 40)
-; CHECK: adrp x0, #40
-
-  ret void
-}
-
-define void @test_inline_modifier_wx(i32 %small, i64 %big) nounwind {
-; CHECK-LABEL: test_inline_modifier_wx:
-  call i32 asm sideeffect "add $0, $0, $0", "=r,0"(i32 %small)
-  call i32 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i32 %small)
-  call i32 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i32 %small)
-; CHECK: //APP
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-
-  call i64 asm sideeffect "add $0, $0, $0", "=r,0"(i64 %big)
-  call i64 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i64 %big)
-  call i64 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i64 %big)
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-
-  call i32 asm sideeffect "add ${0:w}, ${1:w}, ${1:w}", "=r,r"(i32 0)
-  call i32 asm sideeffect "add ${0:x}, ${1:x}, ${1:x}", "=r,r"(i32 0)
-; CHECK: add {{w[0-9]+}}, wzr, wzr
-; CHECK: add {{x[0-9]+}}, xzr, xzr
-
-  call i32 asm sideeffect "add ${0:w}, ${0:w}, ${1:w}", "=r,Ir,0"(i32 123, i32 %small)
-  call i64 asm sideeffect "add ${0:x}, ${0:x}, ${1:x}", "=r,Ir,0"(i32 456, i64 %big)
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #123
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #456
-
-  ret void
-}
-
-define void @test_inline_modifier_bhsdq() nounwind {
-; CHECK-LABEL: test_inline_modifier_bhsdq:
-  call float asm sideeffect "ldr ${0:b}, [sp]", "=w"()
-  call float asm sideeffect "ldr ${0:h}, [sp]", "=w"()
-  call float asm sideeffect "ldr ${0:s}, [sp]", "=w"()
-  call float asm sideeffect "ldr ${0:d}, [sp]", "=w"()
-  call float asm sideeffect "ldr ${0:q}, [sp]", "=w"()
-; CHECK: ldr b0, [sp]
-; CHECK: ldr h0, [sp]
-; CHECK: ldr s0, [sp]
-; CHECK: ldr d0, [sp]
-; CHECK: ldr q0, [sp]
-
-  call double asm sideeffect "ldr ${0:b}, [sp]", "=w"()
-  call double asm sideeffect "ldr ${0:h}, [sp]", "=w"()
-  call double asm sideeffect "ldr ${0:s}, [sp]", "=w"()
-  call double asm sideeffect "ldr ${0:d}, [sp]", "=w"()
-  call double asm sideeffect "ldr ${0:q}, [sp]", "=w"()
-; CHECK: ldr b0, [sp]
-; CHECK: ldr h0, [sp]
-; CHECK: ldr s0, [sp]
-; CHECK: ldr d0, [sp]
-; CHECK: ldr q0, [sp]
-
-  call void asm sideeffect "fcmp b0, ${0:b}", "Yw"(float 0.0)
-  call void asm sideeffect "fcmp h0, ${0:h}", "Yw"(float 0.0)
-  call void asm sideeffect "fcmp s0, ${0:s}", "Yw"(float 0.0)
-  call void asm sideeffect "fcmp d0, ${0:d}", "Yw"(float 0.0)
-  call void asm sideeffect "fcmp q0, ${0:q}", "Yw"(float 0.0)
-; CHECK: fcmp b0, #0
-; CHECK: fcmp h0, #0
-; CHECK: fcmp s0, #0
-; CHECK: fcmp d0, #0
-; CHECK: fcmp q0, #0
-
-  ret void
-}
-
-define void @test_inline_modifier_c() nounwind {
-; CHECK-LABEL: test_inline_modifier_c:
-  call void asm sideeffect "adr x0, ${0:c}", "i"(i32 3)
-; CHECK: adr x0, 3
-
-  ret void
-}
-
-define void @test_inline_modifier_a() nounwind {
-; CHECK-LABEL: test_inline_modifier_a:
-  call void asm sideeffect "prfm pldl1keep, ${0:a}", "r"(i32* @var_simple)
-; CHECK: adrp [[VARHI:x[0-9]+]], var_simple
-; CHECK: add x[[VARADDR:[0-9]+]], [[VARHI]], #:lo12:var_simple
-; CHECK: prfm pldl1keep, [x[[VARADDR]]]
-  ret void
-}
-

Modified: llvm/trunk/test/CodeGen/AArch64/jump-table.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/jump-table.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/jump-table.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/jump-table.ll Sat May 24 07:42:26 2014
@@ -1,6 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -code-model=large -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK-LARGE %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic <%s | FileCheck --check-prefix=CHECK-PIC %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck %s
 ; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck --check-prefix=CHECK-LARGE %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -o - %s | FileCheck --check-prefix=CHECK-PIC %s

Modified: llvm/trunk/test/CodeGen/AArch64/large-consts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/large-consts.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/large-consts.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/large-consts.ll Sat May 24 07:42:26 2014
@@ -1,14 +1,9 @@
-; RUN: llc -mtriple=aarch64-linux-gnu -o - %s -code-model=large -show-mc-encoding | FileCheck %s --check-prefix=CHECK-AARCH64
 ; RUN: llc -mtriple=arm64-linux-gnu -o - %s -code-model=large -show-mc-encoding | FileCheck %s --check-prefix=CHECK-ARM64
 
 ; Make sure the shift amount is encoded into the instructions by LLVM because
 ; it's not the linker's job to put it there.
 
 define double @foo() {
-; CHECK-AARCH64: movz [[CPADDR:x[0-9]+]], #:abs_g3:.LCPI0_0   // encoding: [A,A,0xe0'A',0xd2'A']
-; CHECK-AARCH64: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [A,A,0xc0'A',0xf2'A']
-; CHECK-AARCH64: movk [[CPADDR]], #:abs_g1_nc:.LCPI0_0 // encoding: [A,A,0xa0'A',0xf2'A']
-; CHECK-AARCH64: movk [[CPADDR]], #:abs_g0_nc:.LCPI0_0 // encoding: [A,A,0x80'A',0xf2'A']
 
 ; CHECK-ARM64: movz [[CPADDR:x[0-9]+]], #:abs_g3:.LCPI0_0   // encoding: [0bAAA01000,A,0b111AAAAA,0xd2]
 ; CHECK-ARM64: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [0bAAA01000,A,0b110AAAAA,0xf2]

Removed: llvm/trunk/test/CodeGen/AArch64/large-frame.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/large-frame.ll?rev=209575&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/large-frame.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/large-frame.ll (removed)
@@ -1,120 +0,0 @@
-; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
-; arm64 has a separate copy: aarch64-large-frame.ll (codegen was too different).
-declare void @use_addr(i8*)
-
- at addr = global i8* null
-
-define void @test_bigframe() {
-; CHECK-LABEL: test_bigframe:
-; CHECK: .cfi_startproc
-
-  %var1 = alloca i8, i32 20000000
-  %var2 = alloca i8, i32 16
-  %var3 = alloca i8, i32 20000000
-; CHECK: sub sp, sp, #496
-; CHECK: .cfi_def_cfa sp, 496
-; CHECK: str x30, [sp, #488]
-  ; Total adjust is 39999536
-; CHECK: movz [[SUBCONST:x[0-9]+]], #22576
-; CHECK: movk [[SUBCONST]], #610, lsl #16
-; CHECK: sub sp, sp, [[SUBCONST]]
-; CHECK: .cfi_def_cfa sp, 40000032
-; CHECK: .cfi_offset x30, -8
-
-  ; Total offset is 20000024
-; CHECK: movz [[VAR1OFFSET:x[0-9]+]], #11544
-; CHECK: movk [[VAR1OFFSET]], #305, lsl #16
-; CHECK: add {{x[0-9]+}}, sp, [[VAR1OFFSET]]
-  store volatile i8* %var1, i8** @addr
-
-  %var1plus2 = getelementptr i8* %var1, i32 2
-  store volatile i8* %var1plus2, i8** @addr
-
-; CHECK: movz [[VAR2OFFSET:x[0-9]+]], #11528
-; CHECK: movk [[VAR2OFFSET]], #305, lsl #16
-; CHECK: add {{x[0-9]+}}, sp, [[VAR2OFFSET]]
-  store volatile i8* %var2, i8** @addr
-
-  %var2plus2 = getelementptr i8* %var2, i32 2
-  store volatile i8* %var2plus2, i8** @addr
-
-  store volatile i8* %var3, i8** @addr
-
-  %var3plus2 = getelementptr i8* %var3, i32 2
-  store volatile i8* %var3plus2, i8** @addr
-
-; CHECK: movz [[ADDCONST:x[0-9]+]], #22576
-; CHECK: movk [[ADDCONST]], #610, lsl #16
-; CHECK: add sp, sp, [[ADDCONST]]
-; CHECK: .cfi_endproc
-  ret void
-}
-
-define void @test_mediumframe() {
-; CHECK-LABEL: test_mediumframe:
-  %var1 = alloca i8, i32 1000000
-  %var2 = alloca i8, i32 16
-  %var3 = alloca i8, i32 1000000
-; CHECK: sub sp, sp, #496
-; CHECK: str x30, [sp, #488]
-; CHECK: sub sp, sp, #688
-; CHECK-NEXT: sub sp, sp, #488, lsl #12
-
-  store volatile i8* %var1, i8** @addr
-; CHECK: add [[VAR1ADDR:x[0-9]+]], sp, #600
-; CHECK: add [[VAR1ADDR]], [[VAR1ADDR]], #244, lsl #12
-
-  %var1plus2 = getelementptr i8* %var1, i32 2
-  store volatile i8* %var1plus2, i8** @addr
-; CHECK: add [[VAR1PLUS2:x[0-9]+]], {{x[0-9]+}}, #2
-
-  store volatile i8* %var2, i8** @addr
-; CHECK: add [[VAR2ADDR:x[0-9]+]], sp, #584
-; CHECK: add [[VAR2ADDR]], [[VAR2ADDR]], #244, lsl #12
-
-  %var2plus2 = getelementptr i8* %var2, i32 2
-  store volatile i8* %var2plus2, i8** @addr
-; CHECK: add [[VAR2PLUS2:x[0-9]+]], {{x[0-9]+}}, #2
-
-  store volatile i8* %var3, i8** @addr
-
-  %var3plus2 = getelementptr i8* %var3, i32 2
-  store volatile i8* %var3plus2, i8** @addr
-
-; CHECK: add sp, sp, #688
-; CHECK: add sp, sp, #488, lsl #12
-; CHECK: ldr x30, [sp, #488]
-; CHECK: add sp, sp, #496
-  ret void
-}
-
-
- at bigspace = global [8 x i64] zeroinitializer
-
-; If temporary registers are allocated for adjustment, they should *not* clobber
-; argument registers.
-define void @test_tempallocation([8 x i64] %val) nounwind {
-; CHECK-LABEL: test_tempallocation:
-  %var = alloca i8, i32 1000000
-; CHECK: sub sp, sp,
-
-; Make sure the prologue is reasonably efficient
-; CHECK-NEXT: stp x29, x30, [sp,
-; CHECK-NEXT: stp x25, x26, [sp,
-; CHECK-NEXT: stp x23, x24, [sp,
-; CHECK-NEXT: stp x21, x22, [sp,
-; CHECK-NEXT: stp x19, x20, [sp,
-
-; Make sure we don't trash an argument register
-; CHECK-NOT: movz {{x[0-7],}}
-; CHECK: sub sp, sp,
-
-; CHECK-NOT: movz {{x[0-7],}}
-
-; CHECK: bl use_addr
-  call void @use_addr(i8* %var)
-
-  store [8 x i64] %val, [8 x i64]* @bigspace
-  ret void
-; CHECK: ret
-}

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll?rev=209576&r1=209575&r2=209576&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll Sat May 24 07:42:26 2014
@@ -1,5 +1,3 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-none-linux-gnu | FileCheck %s
 ; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
 





More information about the llvm-commits mailing list