[llvm] r205986 - For the ARM integrated assembler add checking of the
Kevin Enderby
enderby at apple.com
Thu Apr 10 13:18:58 PDT 2014
Author: enderby
Date: Thu Apr 10 15:18:58 2014
New Revision: 205986
URL: http://llvm.org/viewvc/llvm-project?rev=205986&view=rev
Log:
For the ARM integrated assembler add checking of the
alignments on vld/vst instructions. And report errors for
alignments that are not supported.
While this is a large diff and an big test case, the changes
are very straight forward. But pretty much had to touch
all vld/vst instructions changing the addrmode to one of the
new ones that where added will do the proper checking for
the specific instruction.
FYI, re-committing this with a tweak so MemoryOp's default
constructor is trivial and will work with MSVC 2012. Thanks
to Reid Kleckner and Jim Grosbach for help with the tweak.
rdar://11312406
Added:
llvm/trunk/test/MC/ARM/neon-vld-vst-align.s
Modified:
llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Modified: llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrInfo.td?rev=205986&r1=205985&r2=205986&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.td Thu Apr 10 15:18:58 2014
@@ -991,6 +991,81 @@ def addrmode6oneL32 : Operand<i32>,
let EncoderMethod = "getAddrMode6OneLane32AddressOpValue";
}
+// Base class for addrmode6 with specific alignment restrictions.
+class AddrMode6Align : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
+ let PrintMethod = "printAddrMode6Operand";
+ let MIOperandInfo = (ops GPR:$addr, i32imm:$align);
+ let EncoderMethod = "getAddrMode6AddressOpValue";
+ let DecoderMethod = "DecodeAddrMode6Operand";
+}
+
+// Special version of addrmode6 to handle no allowed alignment encoding for
+// VLD/VST instructions and checking the alignment is not specified.
+def AddrMode6AlignNoneAsmOperand : AsmOperandClass {
+ let Name = "AlignedMemoryNone";
+ let DiagnosticType = "AlignedMemoryRequiresNone";
+}
+def addrmode6alignNone : AddrMode6Align {
+ // The alignment specifier can only be omitted.
+ let ParserMatchClass = AddrMode6AlignNoneAsmOperand;
+}
+
+// Special version of addrmode6 to handle 16-bit alignment encoding for
+// VLD/VST instructions and checking the alignment value.
+def AddrMode6Align16AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory16";
+ let DiagnosticType = "AlignedMemoryRequires16";
+}
+def addrmode6align16 : AddrMode6Align {
+ // The alignment specifier can only be 16 or omitted.
+ let ParserMatchClass = AddrMode6Align16AsmOperand;
+}
+
+// Special version of addrmode6 to handle 32-bit alignment encoding for
+// VLD/VST instructions and checking the alignment value.
+def AddrMode6Align32AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory32";
+ let DiagnosticType = "AlignedMemoryRequires32";
+}
+def addrmode6align32 : AddrMode6Align {
+ // The alignment specifier can only be 32 or omitted.
+ let ParserMatchClass = AddrMode6Align32AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit alignment encoding for
+// VLD/VST instructions and checking the alignment value.
+def AddrMode6Align64AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory64";
+ let DiagnosticType = "AlignedMemoryRequires64";
+}
+def addrmode6align64 : AddrMode6Align {
+ // The alignment specifier can only be 64 or omitted.
+ let ParserMatchClass = AddrMode6Align64AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
+// for VLD/VST instructions and checking the alignment value.
+def AddrMode6Align64or128AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory64or128";
+ let DiagnosticType = "AlignedMemoryRequires64or128";
+}
+def addrmode6align64or128 : AddrMode6Align {
+ // The alignment specifier can only be 64, 128 or omitted.
+ let ParserMatchClass = AddrMode6Align64or128AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit, 128-bit or 256-bit alignment
+// encoding for VLD/VST instructions and checking the alignment value.
+def AddrMode6Align64or128or256AsmOperand : AsmOperandClass {
+ let Name = "AlignedMemory64or128or256";
+ let DiagnosticType = "AlignedMemoryRequires64or128or256";
+}
+def addrmode6align64or128or256 : AddrMode6Align {
+ // The alignment specifier can only be 64, 128, 256 or omitted.
+ let ParserMatchClass = AddrMode6Align64or128or256AsmOperand;
+}
+
// Special version of addrmode6 to handle alignment encoding for VLD-dup
// instructions, specifically VLD4-dup.
def addrmode6dup : Operand<i32>,
@@ -1003,6 +1078,69 @@ def addrmode6dup : Operand<i32>,
let ParserMatchClass = AddrMode6AsmOperand;
}
+// Base class for addrmode6dup with specific alignment restrictions.
+class AddrMode6DupAlign : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrMode6", [], [SDNPWantParent]>{
+ let PrintMethod = "printAddrMode6Operand";
+ let MIOperandInfo = (ops GPR:$addr, i32imm);
+ let EncoderMethod = "getAddrMode6DupAddressOpValue";
+}
+
+// Special version of addrmode6 to handle no allowed alignment encoding for
+// VLD-dup instruction and checking the alignment is not specified.
+def AddrMode6dupAlignNoneAsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemoryNone";
+ let DiagnosticType = "DupAlignedMemoryRequiresNone";
+}
+def addrmode6dupalignNone : AddrMode6DupAlign {
+ // The alignment specifier can only be omitted.
+ let ParserMatchClass = AddrMode6dupAlignNoneAsmOperand;
+}
+
+// Special version of addrmode6 to handle 16-bit alignment encoding for VLD-dup
+// instruction and checking the alignment value.
+def AddrMode6dupAlign16AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory16";
+ let DiagnosticType = "DupAlignedMemoryRequires16";
+}
+def addrmode6dupalign16 : AddrMode6DupAlign {
+ // The alignment specifier can only be 16 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign16AsmOperand;
+}
+
+// Special version of addrmode6 to handle 32-bit alignment encoding for VLD-dup
+// instruction and checking the alignment value.
+def AddrMode6dupAlign32AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory32";
+ let DiagnosticType = "DupAlignedMemoryRequires32";
+}
+def addrmode6dupalign32 : AddrMode6DupAlign {
+ // The alignment specifier can only be 32 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign32AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit alignment encoding for VLD
+// instructions and checking the alignment value.
+def AddrMode6dupAlign64AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory64";
+ let DiagnosticType = "DupAlignedMemoryRequires64";
+}
+def addrmode6dupalign64 : AddrMode6DupAlign {
+ // The alignment specifier can only be 64 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign64AsmOperand;
+}
+
+// Special version of addrmode6 to handle 64-bit or 128-bit alignment encoding
+// for VLD instructions and checking the alignment value.
+def AddrMode6dupAlign64or128AsmOperand : AsmOperandClass {
+ let Name = "DupAlignedMemory64or128";
+ let DiagnosticType = "DupAlignedMemoryRequires64or128";
+}
+def addrmode6dupalign64or128 : AddrMode6DupAlign {
+ // The alignment specifier can only be 64, 128 or omitted.
+ let ParserMatchClass = AddrMode6dupAlign64or128AsmOperand;
+}
+
// addrmodepc := pc + reg
//
def addrmodepc : Operand<i32>,
Modified: llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrNEON.td?rev=205986&r1=205985&r2=205986&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrNEON.td Thu Apr 10 15:18:58 2014
@@ -617,37 +617,37 @@ class VLDQQQQWBPseudo<InstrItinClass iti
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// VLD1 : Vector Load (multiple single elements)
-class VLD1D<bits<4> op7_4, string Dt>
+class VLD1D<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1,
+ (ins AddrMode:$Rn), IIC_VLD1,
"vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-class VLD1Q<bits<4> op7_4, string Dt>
+class VLD1Q<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1x2,
+ (ins AddrMode:$Rn), IIC_VLD1x2,
"vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
-def VLD1d16 : VLD1D<{0,1,0,?}, "16">;
-def VLD1d32 : VLD1D<{1,0,0,?}, "32">;
-def VLD1d64 : VLD1D<{1,1,0,?}, "64">;
-
-def VLD1q8 : VLD1Q<{0,0,?,?}, "8">;
-def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
-def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
-def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
+def VLD1d8 : VLD1D<{0,0,0,?}, "8", addrmode6align64>;
+def VLD1d16 : VLD1D<{0,1,0,?}, "16", addrmode6align64>;
+def VLD1d32 : VLD1D<{1,0,0,?}, "32", addrmode6align64>;
+def VLD1d64 : VLD1D<{1,1,0,?}, "64", addrmode6align64>;
+
+def VLD1q8 : VLD1Q<{0,0,?,?}, "8", addrmode6align64or128>;
+def VLD1q16 : VLD1Q<{0,1,?,?}, "16", addrmode6align64or128>;
+def VLD1q32 : VLD1Q<{1,0,?,?}, "32", addrmode6align64or128>;
+def VLD1q64 : VLD1Q<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with address register writeback:
-multiclass VLD1DWB<bits<4> op7_4, string Dt> {
+multiclass VLD1DWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10, 0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1u,
+ (ins AddrMode:$Rn), IIC_VLD1u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -655,16 +655,16 @@ multiclass VLD1DWB<bits<4> op7_4, string
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
}
-multiclass VLD1QWB<bits<4> op7_4, string Dt> {
+multiclass VLD1QWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1x2u,
+ (ins AddrMode:$Rn), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -672,7 +672,7 @@ multiclass VLD1QWB<bits<4> op7_4, string
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
@@ -680,27 +680,27 @@ multiclass VLD1QWB<bits<4> op7_4, string
}
}
-defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8">;
-defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16">;
-defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32">;
-defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64">;
-defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8">;
-defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16">;
-defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32">;
-defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64">;
+defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8", addrmode6align64>;
+defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16", addrmode6align64>;
+defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32", addrmode6align64>;
+defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64", addrmode6align64>;
+defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8", addrmode6align64or128>;
+defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16", addrmode6align64or128>;
+defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32", addrmode6align64or128>;
+defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with 3 registers
-class VLD1D3<bits<4> op7_4, string Dt>
+class VLD1D3<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
+ (ins AddrMode:$Rn), IIC_VLD1x3, "vld1", Dt,
"$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
+multiclass VLD1D3WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1x2u,
+ (ins AddrMode:$Rn), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -708,7 +708,7 @@ multiclass VLD1D3WB<bits<4> op7_4, strin
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
@@ -716,32 +716,32 @@ multiclass VLD1D3WB<bits<4> op7_4, strin
}
}
-def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
-def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
-def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
-def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
-
-defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8">;
-defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16">;
-defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32">;
-defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64">;
+def VLD1d8T : VLD1D3<{0,0,0,?}, "8", addrmode6align64>;
+def VLD1d16T : VLD1D3<{0,1,0,?}, "16", addrmode6align64>;
+def VLD1d32T : VLD1D3<{1,0,0,?}, "32", addrmode6align64>;
+def VLD1d64T : VLD1D3<{1,1,0,?}, "64", addrmode6align64>;
+
+defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8", addrmode6align64>;
+defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16", addrmode6align64>;
+defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32", addrmode6align64>;
+defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64", addrmode6align64>;
def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
def VLD1d64TPseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD1x3>;
def VLD1d64TPseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD1x3>;
// ...with 4 registers
-class VLD1D4<bits<4> op7_4, string Dt>
+class VLD1D4<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd),
- (ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
+ (ins AddrMode:$Rn), IIC_VLD1x4, "vld1", Dt,
"$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
+multiclass VLD1D4WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b10,0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), IIC_VLD1x2u,
+ (ins AddrMode:$Rn), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -749,7 +749,7 @@ multiclass VLD1D4WB<bits<4> op7_4, strin
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1x2u,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
@@ -757,15 +757,15 @@ multiclass VLD1D4WB<bits<4> op7_4, strin
}
}
-def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
-def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
-def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
-def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
-
-defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8">;
-defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16">;
-defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32">;
-defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64">;
+def VLD1d8Q : VLD1D4<{0,0,?,?}, "8", addrmode6align64or128or256>;
+def VLD1d16Q : VLD1D4<{0,1,?,?}, "16", addrmode6align64or128or256>;
+def VLD1d32Q : VLD1D4<{1,0,?,?}, "32", addrmode6align64or128or256>;
+def VLD1d64Q : VLD1D4<{1,1,?,?}, "64", addrmode6align64or128or256>;
+
+defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8", addrmode6align64or128or256>;
+defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16", addrmode6align64or128or256>;
+defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32", addrmode6align64or128or256>;
+defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64", addrmode6align64or128or256>;
def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
def VLD1d64QPseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD1x4>;
@@ -773,22 +773,28 @@ def VLD1d64QPseudoWB_register : VLDQQWBr
// VLD2 : Vector Load (multiple 2-element structures)
class VLD2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
- InstrItinClass itin>
+ InstrItinClass itin, Operand AddrMode>
: NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd),
- (ins addrmode6:$Rn), itin,
+ (ins AddrMode:$Rn), itin,
"vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST2Instruction";
}
-def VLD2d8 : VLD2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2>;
-def VLD2d16 : VLD2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2>;
-def VLD2d32 : VLD2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2>;
-
-def VLD2q8 : VLD2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2>;
-def VLD2q16 : VLD2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2>;
-def VLD2q32 : VLD2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2>;
+def VLD2d8 : VLD2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2d16 : VLD2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2d32 : VLD2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2,
+ addrmode6align64or128>;
+
+def VLD2q8 : VLD2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2,
+ addrmode6align64or128or256>;
+def VLD2q16 : VLD2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2,
+ addrmode6align64or128or256>;
+def VLD2q32 : VLD2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2,
+ addrmode6align64or128or256>;
def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
@@ -796,9 +802,9 @@ def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD
// ...with address register writeback:
multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt,
- RegisterOperand VdTy, InstrItinClass itin> {
+ RegisterOperand VdTy, InstrItinClass itin, Operand AddrMode> {
def _fixed : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6:$Rn), itin,
+ (ins AddrMode:$Rn), itin,
"vld2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -806,7 +812,7 @@ multiclass VLD2WB<bits<4> op11_8, bits<4
let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm), itin,
+ (ins AddrMode:$Rn, rGPR:$Rm), itin,
"vld2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
@@ -814,13 +820,19 @@ multiclass VLD2WB<bits<4> op11_8, bits<4
}
}
-defm VLD2d8wb : VLD2WB<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2u>;
-defm VLD2d16wb : VLD2WB<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2u>;
-defm VLD2d32wb : VLD2WB<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2u>;
-
-defm VLD2q8wb : VLD2WB<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2u>;
-defm VLD2q16wb : VLD2WB<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2u>;
-defm VLD2q32wb : VLD2WB<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2u>;
+defm VLD2d8wb : VLD2WB<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2d16wb : VLD2WB<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2d32wb : VLD2WB<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2u,
+ addrmode6align64or128>;
+
+defm VLD2q8wb : VLD2WB<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2u,
+ addrmode6align64or128or256>;
+defm VLD2q16wb : VLD2WB<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2u,
+ addrmode6align64or128or256>;
+defm VLD2q32wb : VLD2WB<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2u,
+ addrmode6align64or128or256>;
def VLD2q8PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
def VLD2q16PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
@@ -830,12 +842,18 @@ def VLD2q16PseudoWB_register : VLDQQWBre
def VLD2q32PseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD2x2u>;
// ...with double-spaced registers
-def VLD2b8 : VLD2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2>;
-def VLD2b16 : VLD2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2>;
-def VLD2b32 : VLD2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2>;
-defm VLD2b8wb : VLD2WB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2u>;
-defm VLD2b16wb : VLD2WB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2u>;
-defm VLD2b32wb : VLD2WB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2u>;
+def VLD2b8 : VLD2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2b16 : VLD2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2,
+ addrmode6align64or128>;
+def VLD2b32 : VLD2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2,
+ addrmode6align64or128>;
+defm VLD2b8wb : VLD2WB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2b16wb : VLD2WB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2u,
+ addrmode6align64or128>;
+defm VLD2b32wb : VLD2WB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2u,
+ addrmode6align64or128>;
// VLD3 : Vector Load (multiple 3-element structures)
class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -1293,47 +1311,55 @@ def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPse
} // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
// VLD1DUP : Vector Load (single element to all lanes)
-class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
+class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp,
+ Operand AddrMode>
: NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListOneDAllLanes:$Vd),
- (ins addrmode6dup:$Rn),
+ (ins AddrMode:$Rn),
IIC_VLD1dup, "vld1", Dt, "$Vd, $Rn", "",
[(set VecListOneDAllLanes:$Vd,
- (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
+ (Ty (NEONvdup (i32 (LoadOp AddrMode:$Rn)))))]> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
-def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
-def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
-def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
+def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8,
+ addrmode6dupalignNone>;
+def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16,
+ addrmode6dupalign16>;
+def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load,
+ addrmode6dupalign32>;
def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
(VLD1DUPd32 addrmode6:$addr)>;
-class VLD1QDUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
+class VLD1QDUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp,
+ Operand AddrMode>
: NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListDPairAllLanes:$Vd),
- (ins addrmode6dup:$Rn), IIC_VLD1dup,
+ (ins AddrMode:$Rn), IIC_VLD1dup,
"vld1", Dt, "$Vd, $Rn", "",
[(set VecListDPairAllLanes:$Vd,
- (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
+ (Ty (NEONvdup (i32 (LoadOp AddrMode:$Rn)))))]> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
-def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8", v16i8, extloadi8>;
-def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16>;
-def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32", v4i32, load>;
+def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8", v16i8, extloadi8,
+ addrmode6dupalignNone>;
+def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16,
+ addrmode6dupalign16>;
+def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32", v4i32, load,
+ addrmode6dupalign32>;
def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
(VLD1DUPq32 addrmode6:$addr)>;
let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// ...with address register writeback:
-multiclass VLD1DUPWB<bits<4> op7_4, string Dt> {
+multiclass VLD1DUPWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListOneDAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn), IIC_VLD1dupu,
+ (ins AddrMode:$Rn), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1342,17 +1368,17 @@ multiclass VLD1DUPWB<bits<4> op7_4, stri
}
def _register : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListOneDAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD1dupu,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
}
-multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
+multiclass VLD1QDUPWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListDPairAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn), IIC_VLD1dupu,
+ (ins AddrMode:$Rn), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1361,7 +1387,7 @@ multiclass VLD1QDUPWB<bits<4> op7_4, str
}
def _register : NLdSt<1, 0b10, 0b1100, op7_4,
(outs VecListDPairAllLanes:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD1dupu,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD1dupu,
"vld1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
@@ -1369,38 +1395,47 @@ multiclass VLD1QDUPWB<bits<4> op7_4, str
}
}
-defm VLD1DUPd8wb : VLD1DUPWB<{0,0,0,0}, "8">;
-defm VLD1DUPd16wb : VLD1DUPWB<{0,1,0,?}, "16">;
-defm VLD1DUPd32wb : VLD1DUPWB<{1,0,0,?}, "32">;
-
-defm VLD1DUPq8wb : VLD1QDUPWB<{0,0,1,0}, "8">;
-defm VLD1DUPq16wb : VLD1QDUPWB<{0,1,1,?}, "16">;
-defm VLD1DUPq32wb : VLD1QDUPWB<{1,0,1,?}, "32">;
+defm VLD1DUPd8wb : VLD1DUPWB<{0,0,0,0}, "8", addrmode6dupalignNone>;
+defm VLD1DUPd16wb : VLD1DUPWB<{0,1,0,?}, "16", addrmode6dupalign16>;
+defm VLD1DUPd32wb : VLD1DUPWB<{1,0,0,?}, "32", addrmode6dupalign32>;
+
+defm VLD1DUPq8wb : VLD1QDUPWB<{0,0,1,0}, "8", addrmode6dupalignNone>;
+defm VLD1DUPq16wb : VLD1QDUPWB<{0,1,1,?}, "16", addrmode6dupalign16>;
+defm VLD1DUPq32wb : VLD1QDUPWB<{1,0,1,?}, "32", addrmode6dupalign32>;
// VLD2DUP : Vector Load (single 2-element structure to all lanes)
-class VLD2DUP<bits<4> op7_4, string Dt, RegisterOperand VdTy>
+class VLD2DUP<bits<4> op7_4, string Dt, RegisterOperand VdTy, Operand AddrMode>
: NLdSt<1, 0b10, 0b1101, op7_4, (outs VdTy:$Vd),
- (ins addrmode6dup:$Rn), IIC_VLD2dup,
+ (ins AddrMode:$Rn), IIC_VLD2dup,
"vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD2DupInstruction";
}
-def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8", VecListDPairAllLanes>;
-def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16", VecListDPairAllLanes>;
-def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32", VecListDPairAllLanes>;
+def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8", VecListDPairAllLanes,
+ addrmode6dupalign16>;
+def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16", VecListDPairAllLanes,
+ addrmode6dupalign32>;
+def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32", VecListDPairAllLanes,
+ addrmode6dupalign64>;
+// HACK this one, VLD2DUPd8x2 must be changed at the same time with VLD2b8 or
+// "vld2.8 {d0[], d2[]}, [r4:32]" will become "vld2.8 {d0, d2}, [r4:32]".
// ...with double-spaced registers
-def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8", VecListDPairSpacedAllLanes>;
-def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16", VecListDPairSpacedAllLanes>;
-def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32", VecListDPairSpacedAllLanes>;
+def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8", VecListDPairSpacedAllLanes,
+ addrmode6dupalign16>;
+def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16", VecListDPairSpacedAllLanes,
+ addrmode6dupalign32>;
+def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32", VecListDPairSpacedAllLanes,
+ addrmode6dupalign64>;
// ...with address register writeback:
-multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
+multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy,
+ Operand AddrMode> {
def _fixed : NLdSt<1, 0b10, 0b1101, op7_4,
(outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn), IIC_VLD2dupu,
+ (ins AddrMode:$Rn), IIC_VLD2dupu,
"vld2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1409,7 +1444,7 @@ multiclass VLD2DUPWB<bits<4> op7_4, stri
}
def _register : NLdSt<1, 0b10, 0b1101, op7_4,
(outs VdTy:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD2dupu,
+ (ins AddrMode:$Rn, rGPR:$Rm), IIC_VLD2dupu,
"vld2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
@@ -1417,13 +1452,19 @@ multiclass VLD2DUPWB<bits<4> op7_4, stri
}
}
-defm VLD2DUPd8wb : VLD2DUPWB<{0,0,0,0}, "8", VecListDPairAllLanes>;
-defm VLD2DUPd16wb : VLD2DUPWB<{0,1,0,?}, "16", VecListDPairAllLanes>;
-defm VLD2DUPd32wb : VLD2DUPWB<{1,0,0,?}, "32", VecListDPairAllLanes>;
-
-defm VLD2DUPd8x2wb : VLD2DUPWB<{0,0,1,0}, "8", VecListDPairSpacedAllLanes>;
-defm VLD2DUPd16x2wb : VLD2DUPWB<{0,1,1,?}, "16", VecListDPairSpacedAllLanes>;
-defm VLD2DUPd32x2wb : VLD2DUPWB<{1,0,1,?}, "32", VecListDPairSpacedAllLanes>;
+defm VLD2DUPd8wb : VLD2DUPWB<{0,0,0,0}, "8", VecListDPairAllLanes,
+ addrmode6dupalign16>;
+defm VLD2DUPd16wb : VLD2DUPWB<{0,1,0,?}, "16", VecListDPairAllLanes,
+ addrmode6dupalign32>;
+defm VLD2DUPd32wb : VLD2DUPWB<{1,0,0,?}, "32", VecListDPairAllLanes,
+ addrmode6dupalign64>;
+
+defm VLD2DUPd8x2wb : VLD2DUPWB<{0,0,1,0}, "8", VecListDPairSpacedAllLanes,
+ addrmode6dupalign16>;
+defm VLD2DUPd16x2wb : VLD2DUPWB<{0,1,1,?}, "16", VecListDPairSpacedAllLanes,
+ addrmode6dupalign32>;
+defm VLD2DUPd32x2wb : VLD2DUPWB<{1,0,1,?}, "32", VecListDPairSpacedAllLanes,
+ addrmode6dupalign64>;
// VLD3DUP : Vector Load (single 3-element structure to all lanes)
class VLD3DUP<bits<4> op7_4, string Dt>
@@ -1449,22 +1490,22 @@ def VLD3DUPq16 : VLD3DUP<{0,1,1,?}, "16"
def VLD3DUPq32 : VLD3DUP<{1,0,1,?}, "32">;
// ...with address register writeback:
-class VLD3DUPWB<bits<4> op7_4, string Dt>
+class VLD3DUPWB<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD3dupu,
+ (ins AddrMode:$Rn, am6offset:$Rm), IIC_VLD3dupu,
"vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn$Rm",
"$Rn.addr = $wb", []> {
let Inst{4} = 0;
let DecoderMethod = "DecodeVLD3DupInstruction";
}
-def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
-def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
-def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
-
-def VLD3DUPq8_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
-def VLD3DUPq16_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
-def VLD3DUPq32_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
+def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8", addrmode6dupalign64>;
+def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16", addrmode6dupalign64>;
+def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32", addrmode6dupalign64>;
+
+def VLD3DUPq8_UPD : VLD3DUPWB<{0,0,1,0}, "8", addrmode6dupalign64>;
+def VLD3DUPq16_UPD : VLD3DUPWB<{0,1,1,?}, "16", addrmode6dupalign64>;
+def VLD3DUPq32_UPD : VLD3DUPWB<{1,0,1,?}, "32", addrmode6dupalign64>;
def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
@@ -1560,35 +1601,35 @@ class VSTQQQQWBPseudo<InstrItinClass iti
"$addr.addr = $wb">;
// VST1 : Vector Store (multiple single elements)
-class VST1D<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, VecListOneD:$Vd),
+class VST1D<bits<4> op7_4, string Dt, Operand AddrMode>
+ : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins AddrMode:$Rn, VecListOneD:$Vd),
IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-class VST1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListDPair:$Vd),
+class VST1Q<bits<4> op7_4, string Dt, Operand AddrMode>
+ : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins AddrMode:$Rn, VecListDPair:$Vd),
IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-def VST1d8 : VST1D<{0,0,0,?}, "8">;
-def VST1d16 : VST1D<{0,1,0,?}, "16">;
-def VST1d32 : VST1D<{1,0,0,?}, "32">;
-def VST1d64 : VST1D<{1,1,0,?}, "64">;
-
-def VST1q8 : VST1Q<{0,0,?,?}, "8">;
-def VST1q16 : VST1Q<{0,1,?,?}, "16">;
-def VST1q32 : VST1Q<{1,0,?,?}, "32">;
-def VST1q64 : VST1Q<{1,1,?,?}, "64">;
+def VST1d8 : VST1D<{0,0,0,?}, "8", addrmode6align64>;
+def VST1d16 : VST1D<{0,1,0,?}, "16", addrmode6align64>;
+def VST1d32 : VST1D<{1,0,0,?}, "32", addrmode6align64>;
+def VST1d64 : VST1D<{1,1,0,?}, "64", addrmode6align64>;
+
+def VST1q8 : VST1Q<{0,0,?,?}, "8", addrmode6align64or128>;
+def VST1q16 : VST1Q<{0,1,?,?}, "16", addrmode6align64or128>;
+def VST1q32 : VST1Q<{1,0,?,?}, "32", addrmode6align64or128>;
+def VST1q64 : VST1Q<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with address register writeback:
-multiclass VST1DWB<bits<4> op7_4, string Dt> {
+multiclass VST1DWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00, 0b0111,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListOneD:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, VecListOneD:$Vd), IIC_VLD1u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1596,7 +1637,7 @@ multiclass VST1DWB<bits<4> op7_4, string
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListOneD:$Vd),
IIC_VLD1u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1604,9 +1645,9 @@ multiclass VST1DWB<bits<4> op7_4, string
let DecoderMethod = "DecodeVLDST1Instruction";
}
}
-multiclass VST1QWB<bits<4> op7_4, string Dt> {
+multiclass VST1QWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListDPair:$Vd), IIC_VLD1x2u,
+ (ins AddrMode:$Rn, VecListDPair:$Vd), IIC_VLD1x2u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1614,7 +1655,7 @@ multiclass VST1QWB<bits<4> op7_4, string
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListDPair:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListDPair:$Vd),
IIC_VLD1x2u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1623,28 +1664,28 @@ multiclass VST1QWB<bits<4> op7_4, string
}
}
-defm VST1d8wb : VST1DWB<{0,0,0,?}, "8">;
-defm VST1d16wb : VST1DWB<{0,1,0,?}, "16">;
-defm VST1d32wb : VST1DWB<{1,0,0,?}, "32">;
-defm VST1d64wb : VST1DWB<{1,1,0,?}, "64">;
-
-defm VST1q8wb : VST1QWB<{0,0,?,?}, "8">;
-defm VST1q16wb : VST1QWB<{0,1,?,?}, "16">;
-defm VST1q32wb : VST1QWB<{1,0,?,?}, "32">;
-defm VST1q64wb : VST1QWB<{1,1,?,?}, "64">;
+defm VST1d8wb : VST1DWB<{0,0,0,?}, "8", addrmode6align64>;
+defm VST1d16wb : VST1DWB<{0,1,0,?}, "16", addrmode6align64>;
+defm VST1d32wb : VST1DWB<{1,0,0,?}, "32", addrmode6align64>;
+defm VST1d64wb : VST1DWB<{1,1,0,?}, "64", addrmode6align64>;
+
+defm VST1q8wb : VST1QWB<{0,0,?,?}, "8", addrmode6align64or128>;
+defm VST1q16wb : VST1QWB<{0,1,?,?}, "16", addrmode6align64or128>;
+defm VST1q32wb : VST1QWB<{1,0,?,?}, "32", addrmode6align64or128>;
+defm VST1q64wb : VST1QWB<{1,1,?,?}, "64", addrmode6align64or128>;
// ...with 3 registers
-class VST1D3<bits<4> op7_4, string Dt>
+class VST1D3<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0, 0b00, 0b0110, op7_4, (outs),
- (ins addrmode6:$Rn, VecListThreeD:$Vd),
+ (ins AddrMode:$Rn, VecListThreeD:$Vd),
IIC_VST1x3, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VST1D3WB<bits<4> op7_4, string Dt> {
+multiclass VST1D3WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListThreeD:$Vd), IIC_VLD1x3u,
+ (ins AddrMode:$Rn, VecListThreeD:$Vd), IIC_VLD1x3u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1652,7 +1693,7 @@ multiclass VST1D3WB<bits<4> op7_4, strin
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
IIC_VLD1x3u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1661,33 +1702,33 @@ multiclass VST1D3WB<bits<4> op7_4, strin
}
}
-def VST1d8T : VST1D3<{0,0,0,?}, "8">;
-def VST1d16T : VST1D3<{0,1,0,?}, "16">;
-def VST1d32T : VST1D3<{1,0,0,?}, "32">;
-def VST1d64T : VST1D3<{1,1,0,?}, "64">;
-
-defm VST1d8Twb : VST1D3WB<{0,0,0,?}, "8">;
-defm VST1d16Twb : VST1D3WB<{0,1,0,?}, "16">;
-defm VST1d32Twb : VST1D3WB<{1,0,0,?}, "32">;
-defm VST1d64Twb : VST1D3WB<{1,1,0,?}, "64">;
+def VST1d8T : VST1D3<{0,0,0,?}, "8", addrmode6align64>;
+def VST1d16T : VST1D3<{0,1,0,?}, "16", addrmode6align64>;
+def VST1d32T : VST1D3<{1,0,0,?}, "32", addrmode6align64>;
+def VST1d64T : VST1D3<{1,1,0,?}, "64", addrmode6align64>;
+
+defm VST1d8Twb : VST1D3WB<{0,0,0,?}, "8", addrmode6align64>;
+defm VST1d16Twb : VST1D3WB<{0,1,0,?}, "16", addrmode6align64>;
+defm VST1d32Twb : VST1D3WB<{1,0,0,?}, "32", addrmode6align64>;
+defm VST1d64Twb : VST1D3WB<{1,1,0,?}, "64", addrmode6align64>;
def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
def VST1d64TPseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST1x3u>;
def VST1d64TPseudoWB_register : VSTQQWBPseudo<IIC_VST1x3u>;
// ...with 4 registers
-class VST1D4<bits<4> op7_4, string Dt>
+class VST1D4<bits<4> op7_4, string Dt, Operand AddrMode>
: NLdSt<0, 0b00, 0b0010, op7_4, (outs),
- (ins addrmode6:$Rn, VecListFourD:$Vd),
+ (ins AddrMode:$Rn, VecListFourD:$Vd),
IIC_VST1x4, "vst1", Dt, "$Vd, $Rn", "",
[]> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST1Instruction";
}
-multiclass VST1D4WB<bits<4> op7_4, string Dt> {
+multiclass VST1D4WB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1x4u,
+ (ins AddrMode:$Rn, VecListFourD:$Vd), IIC_VLD1x4u,
"vst1", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1695,7 +1736,7 @@ multiclass VST1D4WB<bits<4> op7_4, strin
let DecoderMethod = "DecodeVLDST1Instruction";
}
def _register : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListFourD:$Vd),
IIC_VLD1x4u,
"vst1", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1704,15 +1745,15 @@ multiclass VST1D4WB<bits<4> op7_4, strin
}
}
-def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
-def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
-def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
-def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
-
-defm VST1d8Qwb : VST1D4WB<{0,0,?,?}, "8">;
-defm VST1d16Qwb : VST1D4WB<{0,1,?,?}, "16">;
-defm VST1d32Qwb : VST1D4WB<{1,0,?,?}, "32">;
-defm VST1d64Qwb : VST1D4WB<{1,1,?,?}, "64">;
+def VST1d8Q : VST1D4<{0,0,?,?}, "8", addrmode6align64or128or256>;
+def VST1d16Q : VST1D4<{0,1,?,?}, "16", addrmode6align64or128or256>;
+def VST1d32Q : VST1D4<{1,0,?,?}, "32", addrmode6align64or128or256>;
+def VST1d64Q : VST1D4<{1,1,?,?}, "64", addrmode6align64or128or256>;
+
+defm VST1d8Qwb : VST1D4WB<{0,0,?,?}, "8", addrmode6align64or128or256>;
+defm VST1d16Qwb : VST1D4WB<{0,1,?,?}, "16", addrmode6align64or128or256>;
+defm VST1d32Qwb : VST1D4WB<{1,0,?,?}, "32", addrmode6align64or128or256>;
+defm VST1d64Qwb : VST1D4WB<{1,1,?,?}, "64", addrmode6align64or128or256>;
def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
def VST1d64QPseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST1x4u>;
@@ -1720,21 +1761,27 @@ def VST1d64QPseudoWB_register : VSTQQWBP
// VST2 : Vector Store (multiple 2-element structures)
class VST2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
- InstrItinClass itin>
- : NLdSt<0, 0b00, op11_8, op7_4, (outs), (ins addrmode6:$Rn, VdTy:$Vd),
+ InstrItinClass itin, Operand AddrMode>
+ : NLdSt<0, 0b00, op11_8, op7_4, (outs), (ins AddrMode:$Rn, VdTy:$Vd),
itin, "vst2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST2Instruction";
}
-def VST2d8 : VST2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VST2>;
-def VST2d16 : VST2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VST2>;
-def VST2d32 : VST2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VST2>;
-
-def VST2q8 : VST2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VST2x2>;
-def VST2q16 : VST2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VST2x2>;
-def VST2q32 : VST2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VST2x2>;
+def VST2d8 : VST2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VST2,
+ addrmode6align64or128>;
+def VST2d16 : VST2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VST2,
+ addrmode6align64or128>;
+def VST2d32 : VST2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VST2,
+ addrmode6align64or128>;
+
+def VST2q8 : VST2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VST2x2,
+ addrmode6align64or128or256>;
+def VST2q16 : VST2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VST2x2,
+ addrmode6align64or128or256>;
+def VST2q32 : VST2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VST2x2,
+ addrmode6align64or128or256>;
def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
@@ -1742,9 +1789,9 @@ def VST2q32Pseudo : VSTQQPseudo<IIC_VST
// ...with address register writeback:
multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt,
- RegisterOperand VdTy> {
+ RegisterOperand VdTy, Operand AddrMode> {
def _fixed : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VdTy:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, VdTy:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1752,16 +1799,16 @@ multiclass VST2DWB<bits<4> op11_8, bits<
let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDST2Instruction";
}
}
-multiclass VST2QWB<bits<4> op7_4, string Dt> {
+multiclass VST2QWB<bits<4> op7_4, string Dt, Operand AddrMode> {
def _fixed : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1u,
+ (ins AddrMode:$Rn, VecListFourD:$Vd), IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn!",
"$Rn.addr = $wb", []> {
let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
@@ -1769,7 +1816,7 @@ multiclass VST2QWB<bits<4> op7_4, string
let DecoderMethod = "DecodeVLDST2Instruction";
}
def _register : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
+ (ins AddrMode:$Rn, rGPR:$Rm, VecListFourD:$Vd),
IIC_VLD1u,
"vst2", Dt, "$Vd, $Rn, $Rm",
"$Rn.addr = $wb", []> {
@@ -1778,13 +1825,16 @@ multiclass VST2QWB<bits<4> op7_4, string
}
}
-defm VST2d8wb : VST2DWB<0b1000, {0,0,?,?}, "8", VecListDPair>;
-defm VST2d16wb : VST2DWB<0b1000, {0,1,?,?}, "16", VecListDPair>;
-defm VST2d32wb : VST2DWB<0b1000, {1,0,?,?}, "32", VecListDPair>;
-
-defm VST2q8wb : VST2QWB<{0,0,?,?}, "8">;
-defm VST2q16wb : VST2QWB<{0,1,?,?}, "16">;
-defm VST2q32wb : VST2QWB<{1,0,?,?}, "32">;
+defm VST2d8wb : VST2DWB<0b1000, {0,0,?,?}, "8", VecListDPair,
+ addrmode6align64or128>;
+defm VST2d16wb : VST2DWB<0b1000, {0,1,?,?}, "16", VecListDPair,
+ addrmode6align64or128>;
+defm VST2d32wb : VST2DWB<0b1000, {1,0,?,?}, "32", VecListDPair,
+ addrmode6align64or128>;
+
+defm VST2q8wb : VST2QWB<{0,0,?,?}, "8", addrmode6align64or128or256>;
+defm VST2q16wb : VST2QWB<{0,1,?,?}, "16", addrmode6align64or128or256>;
+defm VST2q32wb : VST2QWB<{1,0,?,?}, "32", addrmode6align64or128or256>;
def VST2q8PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
def VST2q16PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
@@ -1794,12 +1844,18 @@ def VST2q16PseudoWB_register : VSTQQWBre
def VST2q32PseudoWB_register : VSTQQWBregisterPseudo<IIC_VST2x2u>;
// ...with double-spaced registers
-def VST2b8 : VST2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VST2>;
-def VST2b16 : VST2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VST2>;
-def VST2b32 : VST2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VST2>;
-defm VST2b8wb : VST2DWB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced>;
-defm VST2b16wb : VST2DWB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced>;
-defm VST2b32wb : VST2DWB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced>;
+def VST2b8 : VST2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VST2,
+ addrmode6align64or128>;
+def VST2b16 : VST2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VST2,
+ addrmode6align64or128>;
+def VST2b32 : VST2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VST2,
+ addrmode6align64or128>;
+defm VST2b8wb : VST2DWB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced,
+ addrmode6align64or128>;
+defm VST2b16wb : VST2DWB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced,
+ addrmode6align64or128>;
+defm VST2b32wb : VST2DWB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced,
+ addrmode6align64or128>;
// VST3 : Vector Store (multiple 3-element structures)
class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -6311,379 +6367,442 @@ defm : NEONDTAnyInstAlias<"vorr${p}", "$
// VLD1 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD1LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD1LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD1LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr!",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD1LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr!",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD1LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr!",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD1LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr, $Rm",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD1LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr, $Rm",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VLD1LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr, $Rm",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
// VST1 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST1LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST1LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST1LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr!",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST1LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr!",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST1LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr!",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST1LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr, $Rm",
- (ins VecListOneDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VST1LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr, $Rm",
- (ins VecListOneDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDHWordIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VST1LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr, $Rm",
- (ins VecListOneDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListOneDWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
// VLD2 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD2LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD2LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr, pred:$p)>;
def VLD2LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD2LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr!",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VLD2LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr!",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr!",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD2LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr!",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD2LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr!",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD2LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr, $Rm",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr, $Rm",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr, $Rm",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD2LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VST2 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST2LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST2LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr!",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
+ pred:$p)>;
def VST2LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr!",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr!",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr!",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST2LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr!",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST2LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr, $Rm",
- (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDByteIndexed:$list, addrmode6align16:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16","$list, $addr, $Rm",
- (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoDWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".16","$list, $addr, $Rm",
- (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQHWordIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VST2LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr, $Rm",
- (ins VecListTwoQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListTwoQWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VLD3 all-lanes pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD3DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdAsm_16: NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdAsm_32: NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqAsm_16: NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqAsm_32: NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
+ pred:$p)>;
def VLD3DUPdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeDAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPqWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3DUPqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ (ins VecListThreeQAllLanes:$list, addrmode6dupalignNone:$addr,
rGPR:$Rm, pred:$p)>;
// VLD3 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD3LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VLD3LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VLD3LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VLD3LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
// VLD3 multiple structure pseudo-instructions. These need special handling for
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VLD3dAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VLD3dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD3qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VST3 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST3LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
+ pred:$p)>;
def VST3LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDByteIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VST3LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeDHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VST3LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeDWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeDWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
def VST3LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListThreeQHWordIndexed:$list,
+ addrmode6alignNone:$addr, rGPR:$Rm, pred:$p)>;
def VST3LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQWordIndexed:$list, addrmode6:$addr,
+ (ins VecListThreeQWordIndexed:$list, addrmode6alignNone:$addr,
rGPR:$Rm, pred:$p)>;
@@ -6691,168 +6810,190 @@ def VST3LNqWB_register_Asm_32 :
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VST3dAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeD:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
- (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListThreeQ:$list, addrmode6align64:$addr, pred:$p)>;
def VST3dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeD:$list, addrmode6:$addr,
+ (ins VecListThreeD:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST3qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
- (ins VecListThreeQ:$list, addrmode6:$addr,
+ (ins VecListThreeQ:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
// VLD4 all-lanes pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD4DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPdAsm_16: NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPdAsm_32: NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPqAsm_16: NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPqAsm_32: NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPqWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign32:$addr,
+ pred:$p)>;
def VLD4DUPqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64:$addr,
+ pred:$p)>;
def VLD4DUPqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64or128:$addr,
+ pred:$p)>;
def VLD4DUPdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourDAllLanes:$list, addrmode6dupalign64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourDAllLanes:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourDAllLanes:$list,
+ addrmode6dupalign64or128:$addr, rGPR:$Rm, pred:$p)>;
def VLD4DUPqWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ (ins VecListFourQAllLanes:$list, addrmode6dupalign64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4DUPqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQAllLanes:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourQAllLanes:$list,
+ addrmode6dupalign64or128:$addr, rGPR:$Rm, pred:$p)>;
// VLD4 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD4LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD4LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VLD4LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VLD4LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VLD4LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
def VLD4LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
@@ -6860,168 +7001,202 @@ def VLD4LNqWB_register_Asm_32 :
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VLD4dAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VLD4dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VLD4qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
// VST4 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VST4LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST4LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNdWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
+ pred:$p)>;
def VST4LNdWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNdWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNqWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
+ pred:$p)>;
def VST4LNqWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list, addrmode6align64or128:$addr,
+ pred:$p)>;
def VST4LNdWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourDByteIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDByteIndexed:$list, addrmode6align32:$addr,
rGPR:$Rm, pred:$p)>;
def VST4LNdWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourDHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourDHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST4LNdWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourDWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourDWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
def VST4LNqWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQHWordIndexed:$list, addrmode6:$addr,
+ (ins VecListFourQHWordIndexed:$list, addrmode6align64:$addr,
rGPR:$Rm, pred:$p)>;
def VST4LNqWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQWordIndexed:$list, addrmode6:$addr,
- rGPR:$Rm, pred:$p)>;
+ (ins VecListFourQWordIndexed:$list,
+ addrmode6align64or128:$addr, rGPR:$Rm, pred:$p)>;
// VST4 multiple structure pseudo-instructions. These need special handling for
// the vector operands that the normal instructions don't yet model.
// FIXME: Remove these when the register classes and instructions are updated.
def VST4dAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qWB_fixed_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qWB_fixed_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4qWB_fixed_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
- (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
+ pred:$p)>;
def VST4dWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4dWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4dWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourD:$list, addrmode6:$addr,
+ (ins VecListFourD:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4qWB_register_Asm_8 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4qWB_register_Asm_16 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
def VST4qWB_register_Asm_32 :
NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
- (ins VecListFourQ:$list, addrmode6:$addr,
+ (ins VecListFourQ:$list, addrmode6align64or128or256:$addr,
rGPR:$Rm, pred:$p)>;
// VMOV/VMVN takes an optional datatype suffix
Modified: llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp?rev=205986&r1=205985&r2=205986&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (original)
+++ llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp Thu Apr 10 15:18:58 2014
@@ -416,7 +416,7 @@ class ARMOperand : public MCParsedAsmOpe
k_Token
} Kind;
- SMLoc StartLoc, EndLoc;
+ SMLoc StartLoc, EndLoc, AlignmentLoc;
SmallVector<unsigned, 8> Registers;
struct CCOp {
@@ -633,6 +633,12 @@ public:
/// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
+ /// getAlignmentLoc - Get the location of the Alignment token of this operand.
+ SMLoc getAlignmentLoc() const {
+ assert(Kind == k_Memory && "Invalid access!");
+ return AlignmentLoc;
+ }
+
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CC.Val;
@@ -1089,12 +1095,12 @@ public:
bool isPostIdxReg() const {
return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
- bool isMemNoOffset(bool alignOK = false) const {
+ bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
if (!isMem())
return false;
// No offset of any kind.
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
- (alignOK || Memory.Alignment == 0);
+ (alignOK || Memory.Alignment == Alignment);
}
bool isMemPCRelImm12() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
@@ -1110,6 +1116,65 @@ public:
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
+ bool isAlignedMemoryNone() const {
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemoryNone() const {
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory16() const {
+ if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory16() const {
+ if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory32() const {
+ if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory32() const {
+ if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory64() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory64() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory64or128() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isDupAlignedMemory64or128() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
+ bool isAlignedMemory64or128or256() const {
+ if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
+ return true;
+ if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
+ return true;
+ if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
+ return true;
+ return isMemNoOffset(false, 0);
+ }
bool isAddrMode2() const {
if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
@@ -1926,6 +1991,50 @@ public:
Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
}
+ void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
+ void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
+ addAlignedMemoryOperands(Inst, N);
+ }
+
void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
@@ -2523,7 +2632,8 @@ public:
unsigned ShiftImm,
unsigned Alignment,
bool isNegative,
- SMLoc S, SMLoc E) {
+ SMLoc S, SMLoc E,
+ SMLoc AlignmentLoc = SMLoc()) {
ARMOperand *Op = new ARMOperand(k_Memory);
Op->Memory.BaseRegNum = BaseRegNum;
Op->Memory.OffsetImm = OffsetImm;
@@ -2534,6 +2644,7 @@ public:
Op->Memory.isNegative = isNegative;
Op->StartLoc = S;
Op->EndLoc = E;
+ Op->AlignmentLoc = AlignmentLoc;
return Op;
}
@@ -4346,6 +4457,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmO
if (Parser.getTok().is(AsmToken::Colon)) {
Parser.Lex(); // Eat the ':'.
E = Parser.getTok().getLoc();
+ SMLoc AlignmentLoc = Tok.getLoc();
const MCExpr *Expr;
if (getParser().parseExpression(Expr))
@@ -4380,7 +4492,7 @@ parseMemory(SmallVectorImpl<MCParsedAsmO
// the is*() predicates.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
ARM_AM::no_shift, 0, Align,
- false, S, E));
+ false, S, E, AlignmentLoc));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
@@ -7968,6 +8080,42 @@ MatchAndEmitInstruction(SMLoc IDLoc, uns
if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
}
+ case Match_AlignedMemoryRequiresNone:
+ case Match_DupAlignedMemoryRequiresNone:
+ case Match_AlignedMemoryRequires16:
+ case Match_DupAlignedMemoryRequires16:
+ case Match_AlignedMemoryRequires32:
+ case Match_DupAlignedMemoryRequires32:
+ case Match_AlignedMemoryRequires64:
+ case Match_DupAlignedMemoryRequires64:
+ case Match_AlignedMemoryRequires64or128:
+ case Match_DupAlignedMemoryRequires64or128:
+ case Match_AlignedMemoryRequires64or128or256:
+ {
+ SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getAlignmentLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ switch (MatchResult) {
+ default:
+ llvm_unreachable("Missing Match_Aligned type");
+ case Match_AlignedMemoryRequiresNone:
+ case Match_DupAlignedMemoryRequiresNone:
+ return Error(ErrorLoc, "alignment must be omitted");
+ case Match_AlignedMemoryRequires16:
+ case Match_DupAlignedMemoryRequires16:
+ return Error(ErrorLoc, "alignment must be 16 or omitted");
+ case Match_AlignedMemoryRequires32:
+ case Match_DupAlignedMemoryRequires32:
+ return Error(ErrorLoc, "alignment must be 32 or omitted");
+ case Match_AlignedMemoryRequires64:
+ case Match_DupAlignedMemoryRequires64:
+ return Error(ErrorLoc, "alignment must be 64 or omitted");
+ case Match_AlignedMemoryRequires64or128:
+ case Match_DupAlignedMemoryRequires64or128:
+ return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
+ case Match_AlignedMemoryRequires64or128or256:
+ return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
+ }
+ }
}
llvm_unreachable("Implement any new match types added!");
Added: llvm/trunk/test/MC/ARM/neon-vld-vst-align.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM/neon-vld-vst-align.s?rev=205986&view=auto
==============================================================================
--- llvm/trunk/test/MC/ARM/neon-vld-vst-align.s (added)
+++ llvm/trunk/test/MC/ARM/neon-vld-vst-align.s Thu Apr 10 15:18:58 2014
@@ -0,0 +1,8354 @@
+@ RUN: not llvm-mc -triple=thumbv7-apple-darwin -show-encoding < %s > %t 2> %e
+@ RUN: FileCheck < %t %s
+@ RUN: FileCheck --check-prefix=CHECK-ERRORS < %e %s
+
+ vld1.8 {d0}, [r4]
+ vld1.8 {d0}, [r4:16]
+ vld1.8 {d0}, [r4:32]
+ vld1.8 {d0}, [r4:64]
+ vld1.8 {d0}, [r4:128]
+ vld1.8 {d0}, [r4:256]
+
+@ CHECK: vld1.8 {d0}, [r4] @ encoding: [0x24,0xf9,0x0f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0}, [r4]!
+ vld1.8 {d0}, [r4:16]!
+ vld1.8 {d0}, [r4:32]!
+ vld1.8 {d0}, [r4:64]!
+ vld1.8 {d0}, [r4:128]!
+ vld1.8 {d0}, [r4:256]!
+
+@ CHECK: vld1.8 {d0}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0}, [r4], r6
+ vld1.8 {d0}, [r4:16], r6
+ vld1.8 {d0}, [r4:32], r6
+ vld1.8 {d0}, [r4:64], r6
+ vld1.8 {d0}, [r4:128], r6
+ vld1.8 {d0}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1}, [r4]
+ vld1.8 {d0, d1}, [r4:16]
+ vld1.8 {d0, d1}, [r4:32]
+ vld1.8 {d0, d1}, [r4:64]
+ vld1.8 {d0, d1}, [r4:128]
+ vld1.8 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.8 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x0f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x0a]
+@ CHECK: vld1.8 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1}, [r4]!
+ vld1.8 {d0, d1}, [r4:16]!
+ vld1.8 {d0, d1}, [r4:32]!
+ vld1.8 {d0, d1}, [r4:64]!
+ vld1.8 {d0, d1}, [r4:128]!
+ vld1.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.8 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x0a]
+@ CHECK: vld1.8 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1}, [r4], r6
+ vld1.8 {d0, d1}, [r4:16], r6
+ vld1.8 {d0, d1}, [r4:32], r6
+ vld1.8 {d0, d1}, [r4:64], r6
+ vld1.8 {d0, d1}, [r4:128], r6
+ vld1.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x0a]
+@ CHECK: vld1.8 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2}, [r4]
+ vld1.8 {d0, d1, d2}, [r4:16]
+ vld1.8 {d0, d1, d2}, [r4:32]
+ vld1.8 {d0, d1, d2}, [r4:64]
+ vld1.8 {d0, d1, d2}, [r4:128]
+ vld1.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.8 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x0f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2}, [r4]!
+ vld1.8 {d0, d1, d2}, [r4:16]!
+ vld1.8 {d0, d1, d2}, [r4:32]!
+ vld1.8 {d0, d1, d2}, [r4:64]!
+ vld1.8 {d0, d1, d2}, [r4:128]!
+ vld1.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.8 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2}, [r4], r6
+ vld1.8 {d0, d1, d2}, [r4:16], r6
+ vld1.8 {d0, d1, d2}, [r4:32], r6
+ vld1.8 {d0, d1, d2}, [r4:64], r6
+ vld1.8 {d0, d1, d2}, [r4:128], r6
+ vld1.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0, d1, d2, d3}, [r4]
+ vld1.8 {d0, d1, d2, d3}, [r4:16]
+ vld1.8 {d0, d1, d2, d3}, [r4:32]
+ vld1.8 {d0, d1, d2, d3}, [r4:64]
+ vld1.8 {d0, d1, d2, d3}, [r4:128]
+ vld1.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x0f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x02]
+
+ vld1.8 {d0, d1, d2, d3}, [r4]!
+ vld1.8 {d0, d1, d2, d3}, [r4:16]!
+ vld1.8 {d0, d1, d2, d3}, [r4:32]!
+ vld1.8 {d0, d1, d2, d3}, [r4:64]!
+ vld1.8 {d0, d1, d2, d3}, [r4:128]!
+ vld1.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x02]
+
+ vld1.8 {d0, d1, d2, d3}, [r4], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x02]
+@ CHECK: vld1.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x02]
+
+ vld1.8 {d0[2]}, [r4]
+ vld1.8 {d0[2]}, [r4:16]
+ vld1.8 {d0[2]}, [r4:32]
+ vld1.8 {d0[2]}, [r4:64]
+ vld1.8 {d0[2]}, [r4:128]
+ vld1.8 {d0[2]}, [r4:256]
+
+@ CHECK: vld1.8 {d0[2]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[2]}, [r4]!
+ vld1.8 {d0[2]}, [r4:16]!
+ vld1.8 {d0[2]}, [r4:32]!
+ vld1.8 {d0[2]}, [r4:64]!
+ vld1.8 {d0[2]}, [r4:128]!
+ vld1.8 {d0[2]}, [r4:256]!
+
+@ CHECK: vld1.8 {d0[2]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[2]}, [r4], r6
+ vld1.8 {d0[2]}, [r4:16], r6
+ vld1.8 {d0[2]}, [r4:32], r6
+ vld1.8 {d0[2]}, [r4:64], r6
+ vld1.8 {d0[2]}, [r4:128], r6
+ vld1.8 {d0[2]}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[]}, [r4]
+ vld1.8 {d0[]}, [r4:16]
+ vld1.8 {d0[]}, [r4:32]
+ vld1.8 {d0[]}, [r4:64]
+ vld1.8 {d0[]}, [r4:128]
+ vld1.8 {d0[]}, [r4:256]
+
+@ CHECK: vld1.8 {d0[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[]}, [r4]!
+ vld1.8 {d0[]}, [r4:16]!
+ vld1.8 {d0[]}, [r4:32]!
+ vld1.8 {d0[]}, [r4:64]!
+ vld1.8 {d0[]}, [r4:128]!
+ vld1.8 {d0[]}, [r4:256]!
+
+@ CHECK: vld1.8 {d0[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[]}, [r4], r6
+ vld1.8 {d0[]}, [r4:16], r6
+ vld1.8 {d0[]}, [r4:32], r6
+ vld1.8 {d0[]}, [r4:64], r6
+ vld1.8 {d0[]}, [r4:128], r6
+ vld1.8 {d0[]}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[], d1[]}, [r4]
+ vld1.8 {d0[], d1[]}, [r4:16]
+ vld1.8 {d0[], d1[]}, [r4:32]
+ vld1.8 {d0[], d1[]}, [r4:64]
+ vld1.8 {d0[], d1[]}, [r4:128]
+ vld1.8 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld1.8 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[], d1[]}, [r4]!
+ vld1.8 {d0[], d1[]}, [r4:16]!
+ vld1.8 {d0[], d1[]}, [r4:32]!
+ vld1.8 {d0[], d1[]}, [r4:64]!
+ vld1.8 {d0[], d1[]}, [r4:128]!
+ vld1.8 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld1.8 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.8 {d0[], d1[]}, [r4], r6
+ vld1.8 {d0[], d1[]}, [r4:16], r6
+ vld1.8 {d0[], d1[]}, [r4:32], r6
+ vld1.8 {d0[], d1[]}, [r4:64], r6
+ vld1.8 {d0[], d1[]}, [r4:128], r6
+ vld1.8 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld1.8 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0c]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld1.8 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0}, [r4]
+ vld1.16 {d0}, [r4:16]
+ vld1.16 {d0}, [r4:32]
+ vld1.16 {d0}, [r4:64]
+ vld1.16 {d0}, [r4:128]
+ vld1.16 {d0}, [r4:256]
+
+@ CHECK: vld1.16 {d0}, [r4] @ encoding: [0x24,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0}, [r4]!
+ vld1.16 {d0}, [r4:16]!
+ vld1.16 {d0}, [r4:32]!
+ vld1.16 {d0}, [r4:64]!
+ vld1.16 {d0}, [r4:128]!
+ vld1.16 {d0}, [r4:256]!
+
+@ CHECK: vld1.16 {d0}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0}, [r4], r6
+ vld1.16 {d0}, [r4:16], r6
+ vld1.16 {d0}, [r4:32], r6
+ vld1.16 {d0}, [r4:64], r6
+ vld1.16 {d0}, [r4:128], r6
+ vld1.16 {d0}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1}, [r4]
+ vld1.16 {d0, d1}, [r4:16]
+ vld1.16 {d0, d1}, [r4:32]
+ vld1.16 {d0, d1}, [r4:64]
+ vld1.16 {d0, d1}, [r4:128]
+ vld1.16 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.16 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x4f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x0a]
+@ CHECK: vld1.16 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1}, [r4]!
+ vld1.16 {d0, d1}, [r4:16]!
+ vld1.16 {d0, d1}, [r4:32]!
+ vld1.16 {d0, d1}, [r4:64]!
+ vld1.16 {d0, d1}, [r4:128]!
+ vld1.16 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.16 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x0a]
+@ CHECK: vld1.16 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1}, [r4], r6
+ vld1.16 {d0, d1}, [r4:16], r6
+ vld1.16 {d0, d1}, [r4:32], r6
+ vld1.16 {d0, d1}, [r4:64], r6
+ vld1.16 {d0, d1}, [r4:128], r6
+ vld1.16 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x0a]
+@ CHECK: vld1.16 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2}, [r4]
+ vld1.16 {d0, d1, d2}, [r4:16]
+ vld1.16 {d0, d1, d2}, [r4:32]
+ vld1.16 {d0, d1, d2}, [r4:64]
+ vld1.16 {d0, d1, d2}, [r4:128]
+ vld1.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.16 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2}, [r4]!
+ vld1.16 {d0, d1, d2}, [r4:16]!
+ vld1.16 {d0, d1, d2}, [r4:32]!
+ vld1.16 {d0, d1, d2}, [r4:64]!
+ vld1.16 {d0, d1, d2}, [r4:128]!
+ vld1.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.16 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2}, [r4], r6
+ vld1.16 {d0, d1, d2}, [r4:16], r6
+ vld1.16 {d0, d1, d2}, [r4:32], r6
+ vld1.16 {d0, d1, d2}, [r4:64], r6
+ vld1.16 {d0, d1, d2}, [r4:128], r6
+ vld1.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0, d1, d2, d3}, [r4]
+ vld1.16 {d0, d1, d2, d3}, [r4:16]
+ vld1.16 {d0, d1, d2, d3}, [r4:32]
+ vld1.16 {d0, d1, d2, d3}, [r4:64]
+ vld1.16 {d0, d1, d2, d3}, [r4:128]
+ vld1.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x4f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x02]
+
+ vld1.16 {d0, d1, d2, d3}, [r4]!
+ vld1.16 {d0, d1, d2, d3}, [r4:16]!
+ vld1.16 {d0, d1, d2, d3}, [r4:32]!
+ vld1.16 {d0, d1, d2, d3}, [r4:64]!
+ vld1.16 {d0, d1, d2, d3}, [r4:128]!
+ vld1.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x02]
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x02]
+
+ vld1.16 {d0, d1, d2, d3}, [r4], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x02]
+@ CHECK: vld1.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x02]
+
+ vld1.16 {d0[2]}, [r4]
+ vld1.16 {d0[2]}, [r4:16]
+ vld1.16 {d0[2]}, [r4:32]
+ vld1.16 {d0[2]}, [r4:64]
+ vld1.16 {d0[2]}, [r4:128]
+ vld1.16 {d0[2]}, [r4:256]
+
+@ CHECK: vld1.16 {d0[2]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x04]
+@ CHECK: vld1.16 {d0[2]}, [r4:16] @ encoding: [0xa4,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[2]}, [r4]!
+ vld1.16 {d0[2]}, [r4:16]!
+ vld1.16 {d0[2]}, [r4:32]!
+ vld1.16 {d0[2]}, [r4:64]!
+ vld1.16 {d0[2]}, [r4:128]!
+ vld1.16 {d0[2]}, [r4:256]!
+
+@ CHECK: vld1.16 {d0[2]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x04]
+@ CHECK: vld1.16 {d0[2]}, [r4:16]! @ encoding: [0xa4,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[2]}, [r4], r6
+ vld1.16 {d0[2]}, [r4:16], r6
+ vld1.16 {d0[2]}, [r4:32], r6
+ vld1.16 {d0[2]}, [r4:64], r6
+ vld1.16 {d0[2]}, [r4:128], r6
+ vld1.16 {d0[2]}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x04]
+@ CHECK: vld1.16 {d0[2]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[]}, [r4]
+ vld1.16 {d0[]}, [r4:16]
+ vld1.16 {d0[]}, [r4:32]
+ vld1.16 {d0[]}, [r4:64]
+ vld1.16 {d0[]}, [r4:128]
+ vld1.16 {d0[]}, [r4:256]
+
+@ CHECK: vld1.16 {d0[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0c]
+@ CHECK: vld1.16 {d0[]}, [r4:16] @ encoding: [0xa4,0xf9,0x5f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[]}, [r4]!
+ vld1.16 {d0[]}, [r4:16]!
+ vld1.16 {d0[]}, [r4:32]!
+ vld1.16 {d0[]}, [r4:64]!
+ vld1.16 {d0[]}, [r4:128]!
+ vld1.16 {d0[]}, [r4:256]!
+
+@ CHECK: vld1.16 {d0[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0c]
+@ CHECK: vld1.16 {d0[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x5d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[]}, [r4], r6
+ vld1.16 {d0[]}, [r4:16], r6
+ vld1.16 {d0[]}, [r4:32], r6
+ vld1.16 {d0[]}, [r4:64], r6
+ vld1.16 {d0[]}, [r4:128], r6
+ vld1.16 {d0[]}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0c]
+@ CHECK: vld1.16 {d0[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x56,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[], d1[]}, [r4]
+ vld1.16 {d0[], d1[]}, [r4:16]
+ vld1.16 {d0[], d1[]}, [r4:32]
+ vld1.16 {d0[], d1[]}, [r4:64]
+ vld1.16 {d0[], d1[]}, [r4:128]
+ vld1.16 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld1.16 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0c]
+@ CHECK: vld1.16 {d0[], d1[]}, [r4:16] @ encoding: [0xa4,0xf9,0x7f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[], d1[]}, [r4]!
+ vld1.16 {d0[], d1[]}, [r4:16]!
+ vld1.16 {d0[], d1[]}, [r4:32]!
+ vld1.16 {d0[], d1[]}, [r4:64]!
+ vld1.16 {d0[], d1[]}, [r4:128]!
+ vld1.16 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld1.16 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0c]
+@ CHECK: vld1.16 {d0[], d1[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x7d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.16 {d0[], d1[]}, [r4], r6
+ vld1.16 {d0[], d1[]}, [r4:16], r6
+ vld1.16 {d0[], d1[]}, [r4:32], r6
+ vld1.16 {d0[], d1[]}, [r4:64], r6
+ vld1.16 {d0[], d1[]}, [r4:128], r6
+ vld1.16 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld1.16 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0c]
+@ CHECK: vld1.16 {d0[], d1[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x76,0x0c]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld1.16 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0}, [r4]
+ vld1.32 {d0}, [r4:16]
+ vld1.32 {d0}, [r4:32]
+ vld1.32 {d0}, [r4:64]
+ vld1.32 {d0}, [r4:128]
+ vld1.32 {d0}, [r4:256]
+
+@ CHECK: vld1.32 {d0}, [r4] @ encoding: [0x24,0xf9,0x8f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0}, [r4]!
+ vld1.32 {d0}, [r4:16]!
+ vld1.32 {d0}, [r4:32]!
+ vld1.32 {d0}, [r4:64]!
+ vld1.32 {d0}, [r4:128]!
+ vld1.32 {d0}, [r4:256]!
+
+@ CHECK: vld1.32 {d0}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0}, [r4], r6
+ vld1.32 {d0}, [r4:16], r6
+ vld1.32 {d0}, [r4:32], r6
+ vld1.32 {d0}, [r4:64], r6
+ vld1.32 {d0}, [r4:128], r6
+ vld1.32 {d0}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1}, [r4]
+ vld1.32 {d0, d1}, [r4:16]
+ vld1.32 {d0, d1}, [r4:32]
+ vld1.32 {d0, d1}, [r4:64]
+ vld1.32 {d0, d1}, [r4:128]
+ vld1.32 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.32 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x0a]
+@ CHECK: vld1.32 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1}, [r4]!
+ vld1.32 {d0, d1}, [r4:16]!
+ vld1.32 {d0, d1}, [r4:32]!
+ vld1.32 {d0, d1}, [r4:64]!
+ vld1.32 {d0, d1}, [r4:128]!
+ vld1.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.32 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x0a]
+@ CHECK: vld1.32 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1}, [r4], r6
+ vld1.32 {d0, d1}, [r4:16], r6
+ vld1.32 {d0, d1}, [r4:32], r6
+ vld1.32 {d0, d1}, [r4:64], r6
+ vld1.32 {d0, d1}, [r4:128], r6
+ vld1.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x0a]
+@ CHECK: vld1.32 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2}, [r4]
+ vld1.32 {d0, d1, d2}, [r4:16]
+ vld1.32 {d0, d1, d2}, [r4:32]
+ vld1.32 {d0, d1, d2}, [r4:64]
+ vld1.32 {d0, d1, d2}, [r4:128]
+ vld1.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.32 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x8f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2}, [r4]!
+ vld1.32 {d0, d1, d2}, [r4:16]!
+ vld1.32 {d0, d1, d2}, [r4:32]!
+ vld1.32 {d0, d1, d2}, [r4:64]!
+ vld1.32 {d0, d1, d2}, [r4:128]!
+ vld1.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.32 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2}, [r4], r6
+ vld1.32 {d0, d1, d2}, [r4:16], r6
+ vld1.32 {d0, d1, d2}, [r4:32], r6
+ vld1.32 {d0, d1, d2}, [r4:64], r6
+ vld1.32 {d0, d1, d2}, [r4:128], r6
+ vld1.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0, d1, d2, d3}, [r4]
+ vld1.32 {d0, d1, d2, d3}, [r4:16]
+ vld1.32 {d0, d1, d2, d3}, [r4:32]
+ vld1.32 {d0, d1, d2, d3}, [r4:64]
+ vld1.32 {d0, d1, d2, d3}, [r4:128]
+ vld1.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x8f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x02]
+
+ vld1.32 {d0, d1, d2, d3}, [r4]!
+ vld1.32 {d0, d1, d2, d3}, [r4:16]!
+ vld1.32 {d0, d1, d2, d3}, [r4:32]!
+ vld1.32 {d0, d1, d2, d3}, [r4:64]!
+ vld1.32 {d0, d1, d2, d3}, [r4:128]!
+ vld1.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x02]
+
+ vld1.32 {d0, d1, d2, d3}, [r4], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x02]
+@ CHECK: vld1.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x02]
+
+ vld1.32 {d0[1]}, [r4]
+ vld1.32 {d0[1]}, [r4:16]
+ vld1.32 {d0[1]}, [r4:32]
+ vld1.32 {d0[1]}, [r4:64]
+ vld1.32 {d0[1]}, [r4:128]
+ vld1.32 {d0[1]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4]!
+ vld1.32 {d0[1]}, [r4:16]!
+ vld1.32 {d0[1]}, [r4:32]!
+ vld1.32 {d0[1]}, [r4:64]!
+ vld1.32 {d0[1]}, [r4:128]!
+ vld1.32 {d0[1]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4], r6
+ vld1.32 {d0[1]}, [r4:16], r6
+ vld1.32 {d0[1]}, [r4:32], r6
+ vld1.32 {d0[1]}, [r4:64], r6
+ vld1.32 {d0[1]}, [r4:128], r6
+ vld1.32 {d0[1]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[]}, [r4]
+ vld1.32 {d0[]}, [r4:16]
+ vld1.32 {d0[]}, [r4:32]
+ vld1.32 {d0[]}, [r4:64]
+ vld1.32 {d0[]}, [r4:128]
+ vld1.32 {d0[]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[]}, [r4:32] @ encoding: [0xa4,0xf9,0x9f,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[]}, [r4]!
+ vld1.32 {d0[]}, [r4:16]!
+ vld1.32 {d0[]}, [r4:32]!
+ vld1.32 {d0[]}, [r4:64]!
+ vld1.32 {d0[]}, [r4:128]!
+ vld1.32 {d0[]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x9d,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[]}, [r4], r6
+ vld1.32 {d0[]}, [r4:16], r6
+ vld1.32 {d0[]}, [r4:32], r6
+ vld1.32 {d0[]}, [r4:64], r6
+ vld1.32 {d0[]}, [r4:128], r6
+ vld1.32 {d0[]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x96,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[], d1[]}, [r4]
+ vld1.32 {d0[], d1[]}, [r4:16]
+ vld1.32 {d0[], d1[]}, [r4:32]
+ vld1.32 {d0[], d1[]}, [r4:64]
+ vld1.32 {d0[], d1[]}, [r4:128]
+ vld1.32 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[], d1[]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[], d1[]}, [r4]!
+ vld1.32 {d0[], d1[]}, [r4:16]!
+ vld1.32 {d0[], d1[]}, [r4:32]!
+ vld1.32 {d0[], d1[]}, [r4:64]!
+ vld1.32 {d0[], d1[]}, [r4:128]!
+ vld1.32 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[], d1[]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[], d1[]}, [r4], r6
+ vld1.32 {d0[], d1[]}, [r4:16], r6
+ vld1.32 {d0[], d1[]}, [r4:32], r6
+ vld1.32 {d0[], d1[]}, [r4:64], r6
+ vld1.32 {d0[], d1[]}, [r4:128], r6
+ vld1.32 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[], d1[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x0c]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4]
+ vld1.32 {d0[1]}, [r4:16]
+ vld1.32 {d0[1]}, [r4:32]
+ vld1.32 {d0[1]}, [r4:64]
+ vld1.32 {d0[1]}, [r4:128]
+ vld1.32 {d0[1]}, [r4:256]
+
+@ CHECK: vld1.32 {d0[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4]!
+ vld1.32 {d0[1]}, [r4:16]!
+ vld1.32 {d0[1]}, [r4:32]!
+ vld1.32 {d0[1]}, [r4:64]!
+ vld1.32 {d0[1]}, [r4:128]!
+ vld1.32 {d0[1]}, [r4:256]!
+
+@ CHECK: vld1.32 {d0[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.32 {d0[1]}, [r4], r6
+ vld1.32 {d0[1]}, [r4:16], r6
+ vld1.32 {d0[1]}, [r4:32], r6
+ vld1.32 {d0[1]}, [r4:64], r6
+ vld1.32 {d0[1]}, [r4:128], r6
+ vld1.32 {d0[1]}, [r4:256], r6
+
+@ CHECK: vld1.32 {d0[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.32 {d0[1]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld1.32 {d0[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0}, [r4]
+ vld1.64 {d0}, [r4:16]
+ vld1.64 {d0}, [r4:32]
+ vld1.64 {d0}, [r4:64]
+ vld1.64 {d0}, [r4:128]
+ vld1.64 {d0}, [r4:256]
+
+@ CHECK: vld1.64 {d0}, [r4] @ encoding: [0x24,0xf9,0xcf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0}, [r4]!
+ vld1.64 {d0}, [r4:16]!
+ vld1.64 {d0}, [r4:32]!
+ vld1.64 {d0}, [r4:64]!
+ vld1.64 {d0}, [r4:128]!
+ vld1.64 {d0}, [r4:256]!
+
+@ CHECK: vld1.64 {d0}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0}, [r4], r6
+ vld1.64 {d0}, [r4:16], r6
+ vld1.64 {d0}, [r4:32], r6
+ vld1.64 {d0}, [r4:64], r6
+ vld1.64 {d0}, [r4:128], r6
+ vld1.64 {d0}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1}, [r4]
+ vld1.64 {d0, d1}, [r4:16]
+ vld1.64 {d0, d1}, [r4:32]
+ vld1.64 {d0, d1}, [r4:64]
+ vld1.64 {d0, d1}, [r4:128]
+ vld1.64 {d0, d1}, [r4:256]
+
+@ CHECK: vld1.64 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x0a]
+@ CHECK: vld1.64 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0xef,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1}, [r4]!
+ vld1.64 {d0, d1}, [r4:16]!
+ vld1.64 {d0, d1}, [r4:32]!
+ vld1.64 {d0, d1}, [r4:64]!
+ vld1.64 {d0, d1}, [r4:128]!
+ vld1.64 {d0, d1}, [r4:256]!
+
+@ CHECK: vld1.64 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x0a]
+@ CHECK: vld1.64 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0xed,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1}, [r4], r6
+ vld1.64 {d0, d1}, [r4:16], r6
+ vld1.64 {d0, d1}, [r4:32], r6
+ vld1.64 {d0, d1}, [r4:64], r6
+ vld1.64 {d0, d1}, [r4:128], r6
+ vld1.64 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x0a]
+@ CHECK: vld1.64 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0xe6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2}, [r4]
+ vld1.64 {d0, d1, d2}, [r4:16]
+ vld1.64 {d0, d1, d2}, [r4:32]
+ vld1.64 {d0, d1, d2}, [r4:64]
+ vld1.64 {d0, d1, d2}, [r4:128]
+ vld1.64 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld1.64 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0xcf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2}, [r4]!
+ vld1.64 {d0, d1, d2}, [r4:16]!
+ vld1.64 {d0, d1, d2}, [r4:32]!
+ vld1.64 {d0, d1, d2}, [r4:64]!
+ vld1.64 {d0, d1, d2}, [r4:128]!
+ vld1.64 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld1.64 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2}, [r4], r6
+ vld1.64 {d0, d1, d2}, [r4:16], r6
+ vld1.64 {d0, d1, d2}, [r4:32], r6
+ vld1.64 {d0, d1, d2}, [r4:64], r6
+ vld1.64 {d0, d1, d2}, [r4:128], r6
+ vld1.64 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld1.64 {d0, d1, d2, d3}, [r4]
+ vld1.64 {d0, d1, d2, d3}, [r4:16]
+ vld1.64 {d0, d1, d2, d3}, [r4:32]
+ vld1.64 {d0, d1, d2, d3}, [r4:64]
+ vld1.64 {d0, d1, d2, d3}, [r4:128]
+ vld1.64 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0xcf,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0xdf,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xef,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xff,0x02]
+
+ vld1.64 {d0, d1, d2, d3}, [r4]!
+ vld1.64 {d0, d1, d2, d3}, [r4:16]!
+ vld1.64 {d0, d1, d2, d3}, [r4:32]!
+ vld1.64 {d0, d1, d2, d3}, [r4:64]!
+ vld1.64 {d0, d1, d2, d3}, [r4:128]!
+ vld1.64 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0xcd,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0xdd,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xed,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xfd,0x02]
+
+ vld1.64 {d0, d1, d2, d3}, [r4], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:16], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:32], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:64], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:128], r6
+ vld1.64 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0xc6,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld1.64 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0xd6,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xe6,0x02]
+@ CHECK: vld1.64 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xf6,0x02]
+
+ vld2.8 {d0, d1}, [r4]
+ vld2.8 {d0, d1}, [r4:16]
+ vld2.8 {d0, d1}, [r4:32]
+ vld2.8 {d0, d1}, [r4:64]
+ vld2.8 {d0, d1}, [r4:128]
+ vld2.8 {d0, d1}, [r4:256]
+
+@ CHECK: vld2.8 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x0f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x08]
+@ CHECK: vld2.8 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d1}, [r4]!
+ vld2.8 {d0, d1}, [r4:16]!
+ vld2.8 {d0, d1}, [r4:32]!
+ vld2.8 {d0, d1}, [r4:64]!
+ vld2.8 {d0, d1}, [r4:128]!
+ vld2.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vld2.8 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x08]
+@ CHECK: vld2.8 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d1}, [r4], r6
+ vld2.8 {d0, d1}, [r4:16], r6
+ vld2.8 {d0, d1}, [r4:32], r6
+ vld2.8 {d0, d1}, [r4:64], r6
+ vld2.8 {d0, d1}, [r4:128], r6
+ vld2.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x08]
+@ CHECK: vld2.8 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d2}, [r4]
+ vld2.8 {d0, d2}, [r4:16]
+ vld2.8 {d0, d2}, [r4:32]
+ vld2.8 {d0, d2}, [r4:64]
+ vld2.8 {d0, d2}, [r4:128]
+ vld2.8 {d0, d2}, [r4:256]
+
+@ CHECK: vld2.8 {d0, d2}, [r4] @ encoding: [0x24,0xf9,0x0f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d2}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x09]
+@ CHECK: vld2.8 {d0, d2}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d2}, [r4]!
+ vld2.8 {d0, d2}, [r4:16]!
+ vld2.8 {d0, d2}, [r4:32]!
+ vld2.8 {d0, d2}, [r4:64]!
+ vld2.8 {d0, d2}, [r4:128]!
+ vld2.8 {d0, d2}, [r4:256]!
+
+@ CHECK: vld2.8 {d0, d2}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x09]
+@ CHECK: vld2.8 {d0, d2}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d2}, [r4], r6
+ vld2.8 {d0, d2}, [r4:16], r6
+ vld2.8 {d0, d2}, [r4:32], r6
+ vld2.8 {d0, d2}, [r4:64], r6
+ vld2.8 {d0, d2}, [r4:128], r6
+ vld2.8 {d0, d2}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x09]
+@ CHECK: vld2.8 {d0, d2}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0, d1, d2, d3}, [r4]
+ vld2.8 {d0, d1, d2, d3}, [r4:16]
+ vld2.8 {d0, d1, d2, d3}, [r4:32]
+ vld2.8 {d0, d1, d2, d3}, [r4:64]
+ vld2.8 {d0, d1, d2, d3}, [r4:128]
+ vld2.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x0f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x03]
+
+ vld2.8 {d0, d1, d2, d3}, [r4]!
+ vld2.8 {d0, d1, d2, d3}, [r4:16]!
+ vld2.8 {d0, d1, d2, d3}, [r4:32]!
+ vld2.8 {d0, d1, d2, d3}, [r4:64]!
+ vld2.8 {d0, d1, d2, d3}, [r4:128]!
+ vld2.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x03]
+
+ vld2.8 {d0, d1, d2, d3}, [r4], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:16], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:32], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:64], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:128], r6
+ vld2.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x03]
+@ CHECK: vld2.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x03]
+
+ vld2.8 {d0[2], d1[2]}, [r4]
+ vld2.8 {d0[2], d1[2]}, [r4:16]
+ vld2.8 {d0[2], d1[2]}, [r4:32]
+ vld2.8 {d0[2], d1[2]}, [r4:64]
+ vld2.8 {d0[2], d1[2]}, [r4:128]
+ vld2.8 {d0[2], d1[2]}, [r4:256]
+
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x01]
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4:16] @ encoding: [0xa4,0xf9,0x5f,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[2], d1[2]}, [r4]!
+ vld2.8 {d0[2], d1[2]}, [r4:16]!
+ vld2.8 {d0[2], d1[2]}, [r4:32]!
+ vld2.8 {d0[2], d1[2]}, [r4:64]!
+ vld2.8 {d0[2], d1[2]}, [r4:128]!
+ vld2.8 {d0[2], d1[2]}, [r4:256]!
+
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x01]
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4:16]! @ encoding: [0xa4,0xf9,0x5d,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[2], d1[2]}, [r4], r6
+ vld2.8 {d0[2], d1[2]}, [r4:16], r6
+ vld2.8 {d0[2], d1[2]}, [r4:32], r6
+ vld2.8 {d0[2], d1[2]}, [r4:64], r6
+ vld2.8 {d0[2], d1[2]}, [r4:128], r6
+ vld2.8 {d0[2], d1[2]}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x01]
+@ CHECK: vld2.8 {d0[2], d1[2]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x56,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[2], d1[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d1[]}, [r4]
+ vld2.8 {d0[], d1[]}, [r4:16]
+ vld2.8 {d0[], d1[]}, [r4:32]
+ vld2.8 {d0[], d1[]}, [r4:64]
+ vld2.8 {d0[], d1[]}, [r4:128]
+ vld2.8 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld2.8 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0d]
+@ CHECK: vld2.8 {d0[], d1[]}, [r4:16] @ encoding: [0xa4,0xf9,0x1f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d1[]}, [r4]!
+ vld2.8 {d0[], d1[]}, [r4:16]!
+ vld2.8 {d0[], d1[]}, [r4:32]!
+ vld2.8 {d0[], d1[]}, [r4:64]!
+ vld2.8 {d0[], d1[]}, [r4:128]!
+ vld2.8 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld2.8 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0d]
+@ CHECK: vld2.8 {d0[], d1[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x1d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d1[]}, [r4], r6
+ vld2.8 {d0[], d1[]}, [r4:16], r6
+ vld2.8 {d0[], d1[]}, [r4:32], r6
+ vld2.8 {d0[], d1[]}, [r4:64], r6
+ vld2.8 {d0[], d1[]}, [r4:128], r6
+ vld2.8 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0d]
+@ CHECK: vld2.8 {d0[], d1[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x16,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d2[]}, [r4]
+ vld2.8 {d0[], d2[]}, [r4:16]
+ vld2.8 {d0[], d2[]}, [r4:32]
+ vld2.8 {d0[], d2[]}, [r4:64]
+ vld2.8 {d0[], d2[]}, [r4:128]
+ vld2.8 {d0[], d2[]}, [r4:256]
+
+@ CHECK: vld2.8 {d0[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0d]
+@ CHECK: vld2.8 {d0[], d2[]}, [r4:16] @ encoding: [0xa4,0xf9,0x3f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d2[]}, [r4]!
+ vld2.8 {d0[], d2[]}, [r4:16]!
+ vld2.8 {d0[], d2[]}, [r4:32]!
+ vld2.8 {d0[], d2[]}, [r4:64]!
+ vld2.8 {d0[], d2[]}, [r4:128]!
+ vld2.8 {d0[], d2[]}, [r4:256]!
+
+@ CHECK: vld2.8 {d0[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0d]
+@ CHECK: vld2.8 {d0[], d2[]}, [r4:16]! @ encoding: [0xa4,0xf9,0x3d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.8 {d0[], d2[]}, [r4], r6
+ vld2.8 {d0[], d2[]}, [r4:16], r6
+ vld2.8 {d0[], d2[]}, [r4:32], r6
+ vld2.8 {d0[], d2[]}, [r4:64], r6
+ vld2.8 {d0[], d2[]}, [r4:128], r6
+ vld2.8 {d0[], d2[]}, [r4:256], r6
+
+@ CHECK: vld2.8 {d0[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0d]
+@ CHECK: vld2.8 {d0[], d2[]}, [r4:16], r6 @ encoding: [0xa4,0xf9,0x36,0x0d]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vld2.8 {d0[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1}, [r4]
+ vld2.16 {d0, d1}, [r4:16]
+ vld2.16 {d0, d1}, [r4:32]
+ vld2.16 {d0, d1}, [r4:64]
+ vld2.16 {d0, d1}, [r4:128]
+ vld2.16 {d0, d1}, [r4:256]
+
+@ CHECK: vld2.16 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x4f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x08]
+@ CHECK: vld2.16 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1}, [r4]!
+ vld2.16 {d0, d1}, [r4:16]!
+ vld2.16 {d0, d1}, [r4:32]!
+ vld2.16 {d0, d1}, [r4:64]!
+ vld2.16 {d0, d1}, [r4:128]!
+ vld2.16 {d0, d1}, [r4:256]!
+
+@ CHECK: vld2.16 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x08]
+@ CHECK: vld2.16 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1}, [r4], r6
+ vld2.16 {d0, d1}, [r4:16], r6
+ vld2.16 {d0, d1}, [r4:32], r6
+ vld2.16 {d0, d1}, [r4:64], r6
+ vld2.16 {d0, d1}, [r4:128], r6
+ vld2.16 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x08]
+@ CHECK: vld2.16 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d2}, [r4]
+ vld2.16 {d0, d2}, [r4:16]
+ vld2.16 {d0, d2}, [r4:32]
+ vld2.16 {d0, d2}, [r4:64]
+ vld2.16 {d0, d2}, [r4:128]
+ vld2.16 {d0, d2}, [r4:256]
+
+@ CHECK: vld2.16 {d0, d2}, [r4] @ encoding: [0x24,0xf9,0x4f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d2}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x09]
+@ CHECK: vld2.16 {d0, d2}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d2}, [r4]!
+ vld2.16 {d0, d2}, [r4:16]!
+ vld2.16 {d0, d2}, [r4:32]!
+ vld2.16 {d0, d2}, [r4:64]!
+ vld2.16 {d0, d2}, [r4:128]!
+ vld2.16 {d0, d2}, [r4:256]!
+
+@ CHECK: vld2.16 {d0, d2}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x09]
+@ CHECK: vld2.16 {d0, d2}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d2}, [r4], r6
+ vld2.16 {d0, d2}, [r4:16], r6
+ vld2.16 {d0, d2}, [r4:32], r6
+ vld2.16 {d0, d2}, [r4:64], r6
+ vld2.16 {d0, d2}, [r4:128], r6
+ vld2.16 {d0, d2}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x09]
+@ CHECK: vld2.16 {d0, d2}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0, d1, d2, d3}, [r4]
+ vld2.16 {d0, d1, d2, d3}, [r4:16]
+ vld2.16 {d0, d1, d2, d3}, [r4:32]
+ vld2.16 {d0, d1, d2, d3}, [r4:64]
+ vld2.16 {d0, d1, d2, d3}, [r4:128]
+ vld2.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x4f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x03]
+
+ vld2.16 {d0, d1, d2, d3}, [r4]!
+ vld2.16 {d0, d1, d2, d3}, [r4:16]!
+ vld2.16 {d0, d1, d2, d3}, [r4:32]!
+ vld2.16 {d0, d1, d2, d3}, [r4:64]!
+ vld2.16 {d0, d1, d2, d3}, [r4:128]!
+ vld2.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x03]
+
+ vld2.16 {d0, d1, d2, d3}, [r4], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:16], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:32], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:64], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:128], r6
+ vld2.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x03]
+@ CHECK: vld2.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x03]
+
+ vld2.16 {d0[2], d1[2]}, [r4]
+ vld2.16 {d0[2], d1[2]}, [r4:16]
+ vld2.16 {d0[2], d1[2]}, [r4:32]
+ vld2.16 {d0[2], d1[2]}, [r4:64]
+ vld2.16 {d0[2], d1[2]}, [r4:128]
+ vld2.16 {d0[2], d1[2]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32] @ encoding: [0xa4,0xf9,0x9f,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d1[2]}, [r4]!
+ vld2.16 {d0[2], d1[2]}, [r4:16]!
+ vld2.16 {d0[2], d1[2]}, [r4:32]!
+ vld2.16 {d0[2], d1[2]}, [r4:64]!
+ vld2.16 {d0[2], d1[2]}, [r4:128]!
+ vld2.16 {d0[2], d1[2]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32]! @ encoding: [0xa4,0xf9,0x9d,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d1[2]}, [r4], r6
+ vld2.16 {d0[2], d1[2]}, [r4:16], r6
+ vld2.16 {d0[2], d1[2]}, [r4:32], r6
+ vld2.16 {d0[2], d1[2]}, [r4:64], r6
+ vld2.16 {d0[2], d1[2]}, [r4:128], r6
+ vld2.16 {d0[2], d1[2]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x96,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d1[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d2[2]}, [r4]
+ vld2.16 {d0[2], d2[2]}, [r4:16]
+ vld2.16 {d0[2], d2[2]}, [r4:32]
+ vld2.16 {d0[2], d2[2]}, [r4:64]
+ vld2.16 {d0[2], d2[2]}, [r4:128]
+ vld2.16 {d0[2], d2[2]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4:32] @ encoding: [0xa4,0xf9,0xbf,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d2[2]}, [r4]!
+ vld2.16 {d0[2], d2[2]}, [r4:16]!
+ vld2.16 {d0[2], d2[2]}, [r4:32]!
+ vld2.16 {d0[2], d2[2]}, [r4:64]!
+ vld2.16 {d0[2], d2[2]}, [r4:128]!
+ vld2.16 {d0[2], d2[2]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d1[2]}, [r4:32]! @ encoding: [0xa4,0xf9,0xbd,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[2], d2[2]}, [r4], r6
+ vld2.16 {d0[2], d2[2]}, [r4:16], r6
+ vld2.16 {d0[2], d2[2]}, [r4:32], r6
+ vld2.16 {d0[2], d2[2]}, [r4:64], r6
+ vld2.16 {d0[2], d2[2]}, [r4:128], r6
+ vld2.16 {d0[2], d2[2]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[2], d2[2]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0xb6,0x05]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[2], d2[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d1[]}, [r4]
+ vld2.16 {d0[], d1[]}, [r4:16]
+ vld2.16 {d0[], d1[]}, [r4:32]
+ vld2.16 {d0[], d1[]}, [r4:64]
+ vld2.16 {d0[], d1[]}, [r4:128]
+ vld2.16 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d1[]}, [r4:32] @ encoding: [0xa4,0xf9,0x5f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d1[]}, [r4]!
+ vld2.16 {d0[], d1[]}, [r4:16]!
+ vld2.16 {d0[], d1[]}, [r4:32]!
+ vld2.16 {d0[], d1[]}, [r4:64]!
+ vld2.16 {d0[], d1[]}, [r4:128]!
+ vld2.16 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d1[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x5d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d1[]}, [r4], r6
+ vld2.16 {d0[], d1[]}, [r4:16], r6
+ vld2.16 {d0[], d1[]}, [r4:32], r6
+ vld2.16 {d0[], d1[]}, [r4:64], r6
+ vld2.16 {d0[], d1[]}, [r4:128], r6
+ vld2.16 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d1[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x56,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d2[]}, [r4]
+ vld2.16 {d0[], d2[]}, [r4:16]
+ vld2.16 {d0[], d2[]}, [r4:32]
+ vld2.16 {d0[], d2[]}, [r4:64]
+ vld2.16 {d0[], d2[]}, [r4:128]
+ vld2.16 {d0[], d2[]}, [r4:256]
+
+@ CHECK: vld2.16 {d0[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d2[]}, [r4:32] @ encoding: [0xa4,0xf9,0x7f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.16 {d0[], d2[]}, [r4]!
+ vld2.16 {d0[], d2[]}, [r4:16]!
+ vld2.16 {d0[], d2[]}, [r4:32]!
+ vld2.16 {d0[], d2[]}, [r4:64]!
+ vld2.16 {d0[], d2[]}, [r4:128]!
+ vld2.16 {d0[], d2[]}, [r4:256]!
+
+@ CHECK: vld2.16 {d0[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d2[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x7d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:256]!
+
+ vld2.16 {d0[], d2[]}, [r4], r6
+ vld2.16 {d0[], d2[]}, [r4:16], r6
+ vld2.16 {d0[], d2[]}, [r4:32], r6
+ vld2.16 {d0[], d2[]}, [r4:64], r6
+ vld2.16 {d0[], d2[]}, [r4:128], r6
+ vld2.16 {d0[], d2[]}, [r4:256], r6
+
+@ CHECK: vld2.16 {d0[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.16 {d0[], d2[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x76,0x0d]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld2.16 {d0[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1}, [r4]
+ vld2.32 {d0, d1}, [r4:16]
+ vld2.32 {d0, d1}, [r4:32]
+ vld2.32 {d0, d1}, [r4:64]
+ vld2.32 {d0, d1}, [r4:128]
+ vld2.32 {d0, d1}, [r4:256]
+
+@ CHECK: vld2.32 {d0, d1}, [r4] @ encoding: [0x24,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x08]
+@ CHECK: vld2.32 {d0, d1}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1}, [r4]!
+ vld2.32 {d0, d1}, [r4:16]!
+ vld2.32 {d0, d1}, [r4:32]!
+ vld2.32 {d0, d1}, [r4:64]!
+ vld2.32 {d0, d1}, [r4:128]!
+ vld2.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vld2.32 {d0, d1}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x08]
+@ CHECK: vld2.32 {d0, d1}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1}, [r4], r6
+ vld2.32 {d0, d1}, [r4:16], r6
+ vld2.32 {d0, d1}, [r4:32], r6
+ vld2.32 {d0, d1}, [r4:64], r6
+ vld2.32 {d0, d1}, [r4:128], r6
+ vld2.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0, d1}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x08]
+@ CHECK: vld2.32 {d0, d1}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d2}, [r4]
+ vld2.32 {d0, d2}, [r4:16]
+ vld2.32 {d0, d2}, [r4:32]
+ vld2.32 {d0, d2}, [r4:64]
+ vld2.32 {d0, d2}, [r4:128]
+ vld2.32 {d0, d2}, [r4:256]
+
+@ CHECK: vld2.32 {d0, d2}, [r4] @ encoding: [0x24,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d2}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x09]
+@ CHECK: vld2.32 {d0, d2}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d2}, [r4]!
+ vld2.32 {d0, d2}, [r4:16]!
+ vld2.32 {d0, d2}, [r4:32]!
+ vld2.32 {d0, d2}, [r4:64]!
+ vld2.32 {d0, d2}, [r4:128]!
+ vld2.32 {d0, d2}, [r4:256]!
+
+@ CHECK: vld2.32 {d0, d2}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x09]
+@ CHECK: vld2.32 {d0, d2}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d2}, [r4], r6
+ vld2.32 {d0, d2}, [r4:16], r6
+ vld2.32 {d0, d2}, [r4:32], r6
+ vld2.32 {d0, d2}, [r4:64], r6
+ vld2.32 {d0, d2}, [r4:128], r6
+ vld2.32 {d0, d2}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x09]
+@ CHECK: vld2.32 {d0, d2}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0, d1, d2, d3}, [r4]
+ vld2.32 {d0, d1, d2, d3}, [r4:16]
+ vld2.32 {d0, d1, d2, d3}, [r4:32]
+ vld2.32 {d0, d1, d2, d3}, [r4:64]
+ vld2.32 {d0, d1, d2, d3}, [r4:128]
+ vld2.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x8f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x03]
+
+ vld2.32 {d0, d1, d2, d3}, [r4]!
+ vld2.32 {d0, d1, d2, d3}, [r4:16]!
+ vld2.32 {d0, d1, d2, d3}, [r4:32]!
+ vld2.32 {d0, d1, d2, d3}, [r4:64]!
+ vld2.32 {d0, d1, d2, d3}, [r4:128]!
+ vld2.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x03]
+
+ vld2.32 {d0, d1, d2, d3}, [r4], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:16], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:32], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:64], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:128], r6
+ vld2.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld2.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x03]
+@ CHECK: vld2.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x03]
+
+ vld2.32 {d0[1], d1[1]}, [r4]
+ vld2.32 {d0[1], d1[1]}, [r4:16]
+ vld2.32 {d0[1], d1[1]}, [r4:32]
+ vld2.32 {d0[1], d1[1]}, [r4:64]
+ vld2.32 {d0[1], d1[1]}, [r4:128]
+ vld2.32 {d0[1], d1[1]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d1[1]}, [r4]!
+ vld2.32 {d0[1], d1[1]}, [r4:16]!
+ vld2.32 {d0[1], d1[1]}, [r4:32]!
+ vld2.32 {d0[1], d1[1]}, [r4:64]!
+ vld2.32 {d0[1], d1[1]}, [r4:128]!
+ vld2.32 {d0[1], d1[1]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d1[1]}, [r4], r6
+ vld2.32 {d0[1], d1[1]}, [r4:16], r6
+ vld2.32 {d0[1], d1[1]}, [r4:32], r6
+ vld2.32 {d0[1], d1[1]}, [r4:64], r6
+ vld2.32 {d0[1], d1[1]}, [r4:128], r6
+ vld2.32 {d0[1], d1[1]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d1[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d1[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d2[1]}, [r4]
+ vld2.32 {d0[1], d2[1]}, [r4:16]
+ vld2.32 {d0[1], d2[1]}, [r4:32]
+ vld2.32 {d0[1], d2[1]}, [r4:64]
+ vld2.32 {d0[1], d2[1]}, [r4:128]
+ vld2.32 {d0[1], d2[1]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0xcf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4:64] @ encoding: [0xa4,0xf9,0xdf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d2[1]}, [r4]!
+ vld2.32 {d0[1], d2[1]}, [r4:16]!
+ vld2.32 {d0[1], d2[1]}, [r4:32]!
+ vld2.32 {d0[1], d2[1]}, [r4:64]!
+ vld2.32 {d0[1], d2[1]}, [r4:128]!
+ vld2.32 {d0[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0xcd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0xdd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[1], d2[1]}, [r4], r6
+ vld2.32 {d0[1], d2[1]}, [r4:16], r6
+ vld2.32 {d0[1], d2[1]}, [r4:32], r6
+ vld2.32 {d0[1], d2[1]}, [r4:64], r6
+ vld2.32 {d0[1], d2[1]}, [r4:128], r6
+ vld2.32 {d0[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0xc6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[1], d2[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xd6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d1[]}, [r4]
+ vld2.32 {d0[], d1[]}, [r4:16]
+ vld2.32 {d0[], d1[]}, [r4:32]
+ vld2.32 {d0[], d1[]}, [r4:64]
+ vld2.32 {d0[], d1[]}, [r4:128]
+ vld2.32 {d0[], d1[]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[], d1[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d1[]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d1[]}, [r4]!
+ vld2.32 {d0[], d1[]}, [r4:16]!
+ vld2.32 {d0[], d1[]}, [r4:32]!
+ vld2.32 {d0[], d1[]}, [r4:64]!
+ vld2.32 {d0[], d1[]}, [r4:128]!
+ vld2.32 {d0[], d1[]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[], d1[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d1[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d1[]}, [r4], r6
+ vld2.32 {d0[], d1[]}, [r4:16], r6
+ vld2.32 {d0[], d1[]}, [r4:32], r6
+ vld2.32 {d0[], d1[]}, [r4:64], r6
+ vld2.32 {d0[], d1[]}, [r4:128], r6
+ vld2.32 {d0[], d1[]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[], d1[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d1[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d1[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d2[]}, [r4]
+ vld2.32 {d0[], d2[]}, [r4:16]
+ vld2.32 {d0[], d2[]}, [r4:32]
+ vld2.32 {d0[], d2[]}, [r4:64]
+ vld2.32 {d0[], d2[]}, [r4:128]
+ vld2.32 {d0[], d2[]}, [r4:256]
+
+@ CHECK: vld2.32 {d0[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d2[]}, [r4:64] @ encoding: [0xa4,0xf9,0xbf,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d2[]}, [r4]!
+ vld2.32 {d0[], d2[]}, [r4:16]!
+ vld2.32 {d0[], d2[]}, [r4:32]!
+ vld2.32 {d0[], d2[]}, [r4:64]!
+ vld2.32 {d0[], d2[]}, [r4:128]!
+ vld2.32 {d0[], d2[]}, [r4:256]!
+
+@ CHECK: vld2.32 {d0[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d2[]}, [r4:64]! @ encoding: [0xa4,0xf9,0xbd,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld2.32 {d0[], d2[]}, [r4], r6
+ vld2.32 {d0[], d2[]}, [r4:16], r6
+ vld2.32 {d0[], d2[]}, [r4:32], r6
+ vld2.32 {d0[], d2[]}, [r4:64], r6
+ vld2.32 {d0[], d2[]}, [r4:128], r6
+ vld2.32 {d0[], d2[]}, [r4:256], r6
+
+@ CHECK: vld2.32 {d0[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld2.32 {d0[], d2[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xb6,0x0d]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld2.32 {d0[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d1, d2}, [r4]
+ vld3.8 {d0, d1, d2}, [r4:16]
+ vld3.8 {d0, d1, d2}, [r4:32]
+ vld3.8 {d0, d1, d2}, [r4:64]
+ vld3.8 {d0, d1, d2}, [r4:128]
+ vld3.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld3.8 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x0f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d1, d2}, [r4]!
+ vld3.8 {d0, d1, d2}, [r4:16]!
+ vld3.8 {d0, d1, d2}, [r4:32]!
+ vld3.8 {d0, d1, d2}, [r4:64]!
+ vld3.8 {d0, d1, d2}, [r4:128]!
+ vld3.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld3.8 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d1, d2}, [r4], r6
+ vld3.8 {d0, d1, d2}, [r4:16], r6
+ vld3.8 {d0, d1, d2}, [r4:32], r6
+ vld3.8 {d0, d1, d2}, [r4:64], r6
+ vld3.8 {d0, d1, d2}, [r4:128], r6
+ vld3.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d2, d4}, [r4]
+ vld3.8 {d0, d2, d4}, [r4:16]
+ vld3.8 {d0, d2, d4}, [r4:32]
+ vld3.8 {d0, d2, d4}, [r4:64]
+ vld3.8 {d0, d2, d4}, [r4:128]
+ vld3.8 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vld3.8 {d0, d2, d4}, [r4] @ encoding: [0x24,0xf9,0x0f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d2, d4}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d2, d4}, [r4]!
+ vld3.8 {d0, d2, d4}, [r4:16]!
+ vld3.8 {d0, d2, d4}, [r4:32]!
+ vld3.8 {d0, d2, d4}, [r4:64]!
+ vld3.8 {d0, d2, d4}, [r4:128]!
+ vld3.8 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vld3.8 {d0, d2, d4}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d2, d4}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0, d2, d4}, [r4], r6
+ vld3.8 {d0, d2, d4}, [r4:16], r6
+ vld3.8 {d0, d2, d4}, [r4:32], r6
+ vld3.8 {d0, d2, d4}, [r4:64], r6
+ vld3.8 {d0, d2, d4}, [r4:128], r6
+ vld3.8 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0, d2, d4}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.8 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.8 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vld3.8 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld3.8 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vld3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d1[], d2[]}, [r4]
+ vld3.8 {d0[], d1[], d2[]}, [r4:16]
+ vld3.8 {d0[], d1[], d2[]}, [r4:32]
+ vld3.8 {d0[], d1[], d2[]}, [r4:64]
+ vld3.8 {d0[], d1[], d2[]}, [r4:128]
+ vld3.8 {d0[], d1[], d2[]}, [r4:256]
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d1[], d2[]}, [r4]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:16]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:32]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:64]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:128]!
+ vld3.8 {d0[], d1[], d2[]}, [r4:256]!
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d1[], d2[]}, [r4], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:16], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:32], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:64], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:128], r6
+ vld3.8 {d0[], d1[], d2[]}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d1[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d2[], d4[]}, [r4]
+ vld3.8 {d0[], d2[], d4[]}, [r4:16]
+ vld3.8 {d0[], d2[], d4[]}, [r4:32]
+ vld3.8 {d0[], d2[], d4[]}, [r4:64]
+ vld3.8 {d0[], d2[], d4[]}, [r4:128]
+ vld3.8 {d0[], d2[], d4[]}, [r4:256]
+
+@ CHECK: vld3.8 {d0[], d2[], d4[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d2[], d4[]}, [r4]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:16]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:32]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:64]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:128]!
+ vld3.8 {d0[], d2[], d4[]}, [r4:256]!
+
+@ CHECK: vld3.8 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.8 {d0[], d2[], d4[]}, [r4], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:16], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:32], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:64], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:128], r6
+ vld3.8 {d0[], d2[], d4[]}, [r4:256], r6
+
+@ CHECK: vld3.8 {d0[], d2[], d4[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.8 {d0[], d2[], d4[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d1, d2}, [r4]
+ vld3.16 {d0, d1, d2}, [r4:16]
+ vld3.16 {d0, d1, d2}, [r4:32]
+ vld3.16 {d0, d1, d2}, [r4:64]
+ vld3.16 {d0, d1, d2}, [r4:128]
+ vld3.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld3.16 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x4f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d1, d2}, [r4]!
+ vld3.16 {d0, d1, d2}, [r4:16]!
+ vld3.16 {d0, d1, d2}, [r4:32]!
+ vld3.16 {d0, d1, d2}, [r4:64]!
+ vld3.16 {d0, d1, d2}, [r4:128]!
+ vld3.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld3.16 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d1, d2}, [r4], r6
+ vld3.16 {d0, d1, d2}, [r4:16], r6
+ vld3.16 {d0, d1, d2}, [r4:32], r6
+ vld3.16 {d0, d1, d2}, [r4:64], r6
+ vld3.16 {d0, d1, d2}, [r4:128], r6
+ vld3.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d2, d4}, [r4]
+ vld3.16 {d0, d2, d4}, [r4:16]
+ vld3.16 {d0, d2, d4}, [r4:32]
+ vld3.16 {d0, d2, d4}, [r4:64]
+ vld3.16 {d0, d2, d4}, [r4:128]
+ vld3.16 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vld3.16 {d0, d2, d4}, [r4] @ encoding: [0x24,0xf9,0x4f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d2, d4}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d2, d4}, [r4]!
+ vld3.16 {d0, d2, d4}, [r4:16]!
+ vld3.16 {d0, d2, d4}, [r4:32]!
+ vld3.16 {d0, d2, d4}, [r4:64]!
+ vld3.16 {d0, d2, d4}, [r4:128]!
+ vld3.16 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vld3.16 {d0, d2, d4}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d2, d4}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0, d2, d4}, [r4], r6
+ vld3.16 {d0, d2, d4}, [r4:16], r6
+ vld3.16 {d0, d2, d4}, [r4:32], r6
+ vld3.16 {d0, d2, d4}, [r4:64], r6
+ vld3.16 {d0, d2, d4}, [r4:128], r6
+ vld3.16 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0, d2, d4}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.16 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.16 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vld3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vld3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d1[], d2[]}, [r4]
+ vld3.16 {d0[], d1[], d2[]}, [r4:16]
+ vld3.16 {d0[], d1[], d2[]}, [r4:32]
+ vld3.16 {d0[], d1[], d2[]}, [r4:64]
+ vld3.16 {d0[], d1[], d2[]}, [r4:128]
+ vld3.16 {d0[], d1[], d2[]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[], d1[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d1[], d2[]}, [r4]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:16]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:32]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:64]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:128]!
+ vld3.16 {d0[], d1[], d2[]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d1[], d2[]}, [r4], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:16], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:32], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:64], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:128], r6
+ vld3.16 {d0[], d1[], d2[]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[], d1[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d1[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d2[], d4[]}, [r4]
+ vld3.16 {d0[], d2[], d4[]}, [r4:16]
+ vld3.16 {d0[], d2[], d4[]}, [r4:32]
+ vld3.16 {d0[], d2[], d4[]}, [r4:64]
+ vld3.16 {d0[], d2[], d4[]}, [r4:128]
+ vld3.16 {d0[], d2[], d4[]}, [r4:256]
+
+@ CHECK: vld3.16 {d0[], d2[], d4[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d2[], d4[]}, [r4]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:16]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:32]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:64]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:128]!
+ vld3.16 {d0[], d2[], d4[]}, [r4:256]!
+
+@ CHECK: vld3.16 {d0[], d2[], d4[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.16 {d0[], d2[], d4[]}, [r4], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:16], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:32], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:64], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:128], r6
+ vld3.16 {d0[], d2[], d4[]}, [r4:256], r6
+
+@ CHECK: vld3.16 {d0[], d2[], d4[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.16 {d0[], d2[], d4[]}, [r4:256], r6
+
+ vld3.32 {d0, d1, d2}, [r4]
+ vld3.32 {d0, d1, d2}, [r4:16]
+ vld3.32 {d0, d1, d2}, [r4:32]
+ vld3.32 {d0, d1, d2}, [r4:64]
+ vld3.32 {d0, d1, d2}, [r4:128]
+ vld3.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vld3.32 {d0, d1, d2}, [r4] @ encoding: [0x24,0xf9,0x8f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d1, d2}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d1, d2}, [r4]!
+ vld3.32 {d0, d1, d2}, [r4:16]!
+ vld3.32 {d0, d1, d2}, [r4:32]!
+ vld3.32 {d0, d1, d2}, [r4:64]!
+ vld3.32 {d0, d1, d2}, [r4:128]!
+ vld3.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vld3.32 {d0, d1, d2}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d1, d2}, [r4], r6
+ vld3.32 {d0, d1, d2}, [r4:16], r6
+ vld3.32 {d0, d1, d2}, [r4:32], r6
+ vld3.32 {d0, d1, d2}, [r4:64], r6
+ vld3.32 {d0, d1, d2}, [r4:128], r6
+ vld3.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d2, d4}, [r4]
+ vld3.32 {d0, d2, d4}, [r4:16]
+ vld3.32 {d0, d2, d4}, [r4:32]
+ vld3.32 {d0, d2, d4}, [r4:64]
+ vld3.32 {d0, d2, d4}, [r4:128]
+ vld3.32 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vld3.32 {d0, d2, d4}, [r4] @ encoding: [0x24,0xf9,0x8f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d2, d4}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d2, d4}, [r4]!
+ vld3.32 {d0, d2, d4}, [r4:16]!
+ vld3.32 {d0, d2, d4}, [r4:32]!
+ vld3.32 {d0, d2, d4}, [r4:64]!
+ vld3.32 {d0, d2, d4}, [r4:128]!
+ vld3.32 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vld3.32 {d0, d2, d4}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d2, d4}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0, d2, d4}, [r4], r6
+ vld3.32 {d0, d2, d4}, [r4:16], r6
+ vld3.32 {d0, d2, d4}, [r4:32], r6
+ vld3.32 {d0, d2, d4}, [r4:64], r6
+ vld3.32 {d0, d2, d4}, [r4:128], r6
+ vld3.32 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0, d2, d4}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld3.32 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld3.32 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vld3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0xa4,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[1], d2[1], d4[1]}, [r4]! @ encoding: [0xa4,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vld3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d1[], d2[]}, [r4]
+ vld3.32 {d0[], d1[], d2[]}, [r4:16]
+ vld3.32 {d0[], d1[], d2[]}, [r4:32]
+ vld3.32 {d0[], d1[], d2[]}, [r4:64]
+ vld3.32 {d0[], d1[], d2[]}, [r4:128]
+ vld3.32 {d0[], d1[], d2[]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[], d1[], d2[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d1[], d2[]}, [r4]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:16]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:32]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:64]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:128]!
+ vld3.32 {d0[], d1[], d2[]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[], d1[], d2[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d1[], d2[]}, [r4], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:16], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:32], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:64], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:128], r6
+ vld3.32 {d0[], d1[], d2[]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[], d1[], d2[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d1[], d2[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d2[], d4[]}, [r4]
+ vld3.32 {d0[], d2[], d4[]}, [r4:16]
+ vld3.32 {d0[], d2[], d4[]}, [r4:32]
+ vld3.32 {d0[], d2[], d4[]}, [r4:64]
+ vld3.32 {d0[], d2[], d4[]}, [r4:128]
+ vld3.32 {d0[], d2[], d4[]}, [r4:256]
+
+@ CHECK: vld3.32 {d0[], d2[], d4[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d2[], d4[]}, [r4]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:16]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:32]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:64]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:128]!
+ vld3.32 {d0[], d2[], d4[]}, [r4:256]!
+
+@ CHECK: vld3.32 {d0[], d2[], d4[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld3.32 {d0[], d2[], d4[]}, [r4], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:16], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:32], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:64], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:128], r6
+ vld3.32 {d0[], d2[], d4[]}, [r4:256], r6
+
+@ CHECK: vld3.32 {d0[], d2[], d4[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0e]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vld3.32 {d0[], d2[], d4[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0, d1, d2, d3}, [r4]
+ vld4.8 {d0, d1, d2, d3}, [r4:16]
+ vld4.8 {d0, d1, d2, d3}, [r4:32]
+ vld4.8 {d0, d1, d2, d3}, [r4:64]
+ vld4.8 {d0, d1, d2, d3}, [r4:128]
+ vld4.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x0f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x00]
+
+ vld4.8 {d0, d1, d2, d3}, [r4]!
+ vld4.8 {d0, d1, d2, d3}, [r4:16]!
+ vld4.8 {d0, d1, d2, d3}, [r4:32]!
+ vld4.8 {d0, d1, d2, d3}, [r4:64]!
+ vld4.8 {d0, d1, d2, d3}, [r4:128]!
+ vld4.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x00]
+
+ vld4.8 {d0, d1, d2, d3}, [r4], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:16], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:32], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:64], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:128], r6
+ vld4.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x00]
+@ CHECK: vld4.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x00]
+
+ vld4.8 {d0, d2, d4, d6}, [r4]
+ vld4.8 {d0, d2, d4, d6}, [r4:16]
+ vld4.8 {d0, d2, d4, d6}, [r4:32]
+ vld4.8 {d0, d2, d4, d6}, [r4:64]
+ vld4.8 {d0, d2, d4, d6}, [r4:128]
+ vld4.8 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4] @ encoding: [0x24,0xf9,0x0f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x24,0xf9,0x2f,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x24,0xf9,0x3f,0x01]
+
+ vld4.8 {d0, d2, d4, d6}, [r4]!
+ vld4.8 {d0, d2, d4, d6}, [r4:16]!
+ vld4.8 {d0, d2, d4, d6}, [r4:32]!
+ vld4.8 {d0, d2, d4, d6}, [r4:64]!
+ vld4.8 {d0, d2, d4, d6}, [r4:128]!
+ vld4.8 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4]! @ encoding: [0x24,0xf9,0x0d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x24,0xf9,0x1d,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x24,0xf9,0x2d,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x24,0xf9,0x3d,0x01]
+
+ vld4.8 {d0, d2, d4, d6}, [r4], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:16], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:32], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:64], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:128], r6
+ vld4.8 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x24,0xf9,0x06,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.8 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x24,0xf9,0x16,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x24,0xf9,0x26,0x01]
+@ CHECK: vld4.8 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x24,0xf9,0x36,0x01]
+
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32] @ encoding: [0xa4,0xf9,0x3f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]! @ encoding: [0xa4,0xf9,0x3d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x36,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4] @ encoding: [0xa4,0xf9,0x0f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32] @ encoding: [0xa4,0xf9,0x1f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]!
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]!
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x0d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x1d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x06,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x16,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]
+
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4] @ encoding: [0xa4,0xf9,0x2f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32] @ encoding: [0xa4,0xf9,0x3f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]!
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]!
+
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x2d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d1[], d2[], d3[]}, [r4:32]! @ encoding: [0xa4,0xf9,0x3d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+ vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x26,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:32], r6 @ encoding: [0xa4,0xf9,0x36,0x0f]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vld4.8 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0, d1, d2, d3}, [r4]
+ vld4.16 {d0, d1, d2, d3}, [r4:16]
+ vld4.16 {d0, d1, d2, d3}, [r4:32]
+ vld4.16 {d0, d1, d2, d3}, [r4:64]
+ vld4.16 {d0, d1, d2, d3}, [r4:128]
+ vld4.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x00]
+
+ vld4.16 {d0, d1, d2, d3}, [r4]!
+ vld4.16 {d0, d1, d2, d3}, [r4:16]!
+ vld4.16 {d0, d1, d2, d3}, [r4:32]!
+ vld4.16 {d0, d1, d2, d3}, [r4:64]!
+ vld4.16 {d0, d1, d2, d3}, [r4:128]!
+ vld4.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x00]
+
+ vld4.16 {d0, d1, d2, d3}, [r4], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:16], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:32], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:64], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:128], r6
+ vld4.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x00]
+@ CHECK: vld4.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x00]
+
+ vld4.16 {d0, d2, d4, d6}, [r4]
+ vld4.16 {d0, d2, d4, d6}, [r4:16]
+ vld4.16 {d0, d2, d4, d6}, [r4:32]
+ vld4.16 {d0, d2, d4, d6}, [r4:64]
+ vld4.16 {d0, d2, d4, d6}, [r4:128]
+ vld4.16 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4] @ encoding: [0x24,0xf9,0x4f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x24,0xf9,0x5f,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x24,0xf9,0x6f,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x24,0xf9,0x7f,0x01]
+
+ vld4.16 {d0, d2, d4, d6}, [r4]!
+ vld4.16 {d0, d2, d4, d6}, [r4:16]!
+ vld4.16 {d0, d2, d4, d6}, [r4:32]!
+ vld4.16 {d0, d2, d4, d6}, [r4:64]!
+ vld4.16 {d0, d2, d4, d6}, [r4:128]!
+ vld4.16 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4]! @ encoding: [0x24,0xf9,0x4d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x24,0xf9,0x5d,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x24,0xf9,0x6d,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x24,0xf9,0x7d,0x01]
+
+ vld4.16 {d0, d2, d4, d6}, [r4], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:16], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:32], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:64], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:128], r6
+ vld4.16 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x24,0xf9,0x46,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.16 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x24,0xf9,0x56,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x24,0xf9,0x66,0x01]
+@ CHECK: vld4.16 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x24,0xf9,0x76,0x01]
+
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x7f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x7d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x76,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4] @ encoding: [0xa4,0xf9,0x4f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64] @ encoding: [0xa4,0xf9,0x5f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]!
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x4d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x5d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x46,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x56,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]
+
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4] @ encoding: [0xa4,0xf9,0x6f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64] @ encoding: [0xa4,0xf9,0x7f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]!
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]!
+
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x6d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d1[], d2[], d3[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x7d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+ vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x66,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x76,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vld4.16 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0, d1, d2, d3}, [r4]
+ vld4.32 {d0, d1, d2, d3}, [r4:16]
+ vld4.32 {d0, d1, d2, d3}, [r4:32]
+ vld4.32 {d0, d1, d2, d3}, [r4:64]
+ vld4.32 {d0, d1, d2, d3}, [r4:128]
+ vld4.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x24,0xf9,0x8f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x00]
+
+ vld4.32 {d0, d1, d2, d3}, [r4]!
+ vld4.32 {d0, d1, d2, d3}, [r4:16]!
+ vld4.32 {d0, d1, d2, d3}, [r4:32]!
+ vld4.32 {d0, d1, d2, d3}, [r4:64]!
+ vld4.32 {d0, d1, d2, d3}, [r4:128]!
+ vld4.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x00]
+
+ vld4.32 {d0, d1, d2, d3}, [r4], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:16], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:32], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:64], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:128], r6
+ vld4.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x00]
+@ CHECK: vld4.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x00]
+
+ vld4.32 {d0, d2, d4, d6}, [r4]
+ vld4.32 {d0, d2, d4, d6}, [r4:16]
+ vld4.32 {d0, d2, d4, d6}, [r4:32]
+ vld4.32 {d0, d2, d4, d6}, [r4:64]
+ vld4.32 {d0, d2, d4, d6}, [r4:128]
+ vld4.32 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4] @ encoding: [0x24,0xf9,0x8f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x24,0xf9,0x9f,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x24,0xf9,0xaf,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x24,0xf9,0xbf,0x01]
+
+ vld4.32 {d0, d2, d4, d6}, [r4]!
+ vld4.32 {d0, d2, d4, d6}, [r4:16]!
+ vld4.32 {d0, d2, d4, d6}, [r4:32]!
+ vld4.32 {d0, d2, d4, d6}, [r4:64]!
+ vld4.32 {d0, d2, d4, d6}, [r4:128]!
+ vld4.32 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4]! @ encoding: [0x24,0xf9,0x8d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x24,0xf9,0x9d,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x24,0xf9,0xad,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x24,0xf9,0xbd,0x01]
+
+ vld4.32 {d0, d2, d4, d6}, [r4], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:16], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:32], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:64], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:128], r6
+ vld4.32 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x24,0xf9,0x86,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vld4.32 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x24,0xf9,0x96,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x24,0xf9,0xa6,0x01]
+@ CHECK: vld4.32 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x24,0xf9,0xb6,0x01]
+
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x0b]
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128] @ encoding: [0xa4,0xf9,0xaf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x0b]
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]! @ encoding: [0xa4,0xf9,0xad,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x0b]
+@ CHECK: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xa6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0xa4,0xf9,0xcf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0xa4,0xf9,0xdf,0x0b]
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128] @ encoding: [0xa4,0xf9,0xef,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0xa4,0xf9,0xcd,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0xa4,0xf9,0xdd,0x0b]
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]! @ encoding: [0xa4,0xf9,0xed,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0xa4,0xf9,0xc6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xd6,0x0b]
+@ CHECK: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xe6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128]
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4] @ encoding: [0xa4,0xf9,0x8f,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64] @ encoding: [0xa4,0xf9,0x9f,0x0f]
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128] @ encoding: [0xa4,0xf9,0xdf,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128]!
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4]! @ encoding: [0xa4,0xf9,0x8d,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64]! @ encoding: [0xa4,0xf9,0x9d,0x0f]
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128]! @ encoding: [0xa4,0xf9,0xdd,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128], r6
+ vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4], r6 @ encoding: [0xa4,0xf9,0x86,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0x96,0x0f]
+@ CHECK: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xd6,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d1[], d2[], d3[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128]
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]
+
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4] @ encoding: [0xa4,0xf9,0xaf,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64] @ encoding: [0xa4,0xf9,0xbf,0x0f]
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128] @ encoding: [0xa4,0xf9,0xff,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128]!
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]!
+
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4]! @ encoding: [0xa4,0xf9,0xad,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64]! @ encoding: [0xa4,0xf9,0xbd,0x0f]
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128]! @ encoding: [0xa4,0xf9,0xfd,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128], r6
+ vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4], r6 @ encoding: [0xa4,0xf9,0xa6,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:64], r6 @ encoding: [0xa4,0xf9,0xb6,0x0f]
+@ CHECK: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:128], r6 @ encoding: [0xa4,0xf9,0xf6,0x0f]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vld4.32 {d0[], d2[], d4[], d6[]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0}, [r4]
+ vst1.8 {d0}, [r4:16]
+ vst1.8 {d0}, [r4:32]
+ vst1.8 {d0}, [r4:64]
+ vst1.8 {d0}, [r4:128]
+ vst1.8 {d0}, [r4:256]
+
+@ CHECK: vst1.8 {d0}, [r4] @ encoding: [0x04,0xf9,0x0f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0}, [r4]!
+ vst1.8 {d0}, [r4:16]!
+ vst1.8 {d0}, [r4:32]!
+ vst1.8 {d0}, [r4:64]!
+ vst1.8 {d0}, [r4:128]!
+ vst1.8 {d0}, [r4:256]!
+
+@ CHECK: vst1.8 {d0}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0}, [r4], r6
+ vst1.8 {d0}, [r4:16], r6
+ vst1.8 {d0}, [r4:32], r6
+ vst1.8 {d0}, [r4:64], r6
+ vst1.8 {d0}, [r4:128], r6
+ vst1.8 {d0}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1}, [r4]
+ vst1.8 {d0, d1}, [r4:16]
+ vst1.8 {d0, d1}, [r4:32]
+ vst1.8 {d0, d1}, [r4:64]
+ vst1.8 {d0, d1}, [r4:128]
+ vst1.8 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.8 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x0f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x0a]
+@ CHECK: vst1.8 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1}, [r4]!
+ vst1.8 {d0, d1}, [r4:16]!
+ vst1.8 {d0, d1}, [r4:32]!
+ vst1.8 {d0, d1}, [r4:64]!
+ vst1.8 {d0, d1}, [r4:128]!
+ vst1.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.8 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x0a]
+@ CHECK: vst1.8 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1}, [r4], r6
+ vst1.8 {d0, d1}, [r4:16], r6
+ vst1.8 {d0, d1}, [r4:32], r6
+ vst1.8 {d0, d1}, [r4:64], r6
+ vst1.8 {d0, d1}, [r4:128], r6
+ vst1.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x0a]
+@ CHECK: vst1.8 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2}, [r4]
+ vst1.8 {d0, d1, d2}, [r4:16]
+ vst1.8 {d0, d1, d2}, [r4:32]
+ vst1.8 {d0, d1, d2}, [r4:64]
+ vst1.8 {d0, d1, d2}, [r4:128]
+ vst1.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.8 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x0f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2}, [r4]!
+ vst1.8 {d0, d1, d2}, [r4:16]!
+ vst1.8 {d0, d1, d2}, [r4:32]!
+ vst1.8 {d0, d1, d2}, [r4:64]!
+ vst1.8 {d0, d1, d2}, [r4:128]!
+ vst1.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.8 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2}, [r4], r6
+ vst1.8 {d0, d1, d2}, [r4:16], r6
+ vst1.8 {d0, d1, d2}, [r4:32], r6
+ vst1.8 {d0, d1, d2}, [r4:64], r6
+ vst1.8 {d0, d1, d2}, [r4:128], r6
+ vst1.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0, d1, d2, d3}, [r4]
+ vst1.8 {d0, d1, d2, d3}, [r4:16]
+ vst1.8 {d0, d1, d2, d3}, [r4:32]
+ vst1.8 {d0, d1, d2, d3}, [r4:64]
+ vst1.8 {d0, d1, d2, d3}, [r4:128]
+ vst1.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x0f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x02]
+
+ vst1.8 {d0, d1, d2, d3}, [r4]!
+ vst1.8 {d0, d1, d2, d3}, [r4:16]!
+ vst1.8 {d0, d1, d2, d3}, [r4:32]!
+ vst1.8 {d0, d1, d2, d3}, [r4:64]!
+ vst1.8 {d0, d1, d2, d3}, [r4:128]!
+ vst1.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x02]
+
+ vst1.8 {d0, d1, d2, d3}, [r4], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x02]
+@ CHECK: vst1.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x02]
+
+ vst1.8 {d0[2]}, [r4]
+ vst1.8 {d0[2]}, [r4:16]
+ vst1.8 {d0[2]}, [r4:32]
+ vst1.8 {d0[2]}, [r4:64]
+ vst1.8 {d0[2]}, [r4:128]
+ vst1.8 {d0[2]}, [r4:256]
+
+@ CHECK: vst1.8 {d0[2]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0[2]}, [r4]!
+ vst1.8 {d0[2]}, [r4:16]!
+ vst1.8 {d0[2]}, [r4:32]!
+ vst1.8 {d0[2]}, [r4:64]!
+ vst1.8 {d0[2]}, [r4:128]!
+ vst1.8 {d0[2]}, [r4:256]!
+
+@ CHECK: vst1.8 {d0[2]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.8 {d0[2]}, [r4], r6
+ vst1.8 {d0[2]}, [r4:16], r6
+ vst1.8 {d0[2]}, [r4:32], r6
+ vst1.8 {d0[2]}, [r4:64], r6
+ vst1.8 {d0[2]}, [r4:128], r6
+ vst1.8 {d0[2]}, [r4:256], r6
+
+@ CHECK: vst1.8 {d0[2]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst1.8 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0}, [r4]
+ vst1.16 {d0}, [r4:16]
+ vst1.16 {d0}, [r4:32]
+ vst1.16 {d0}, [r4:64]
+ vst1.16 {d0}, [r4:128]
+ vst1.16 {d0}, [r4:256]
+
+@ CHECK: vst1.16 {d0}, [r4] @ encoding: [0x04,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0}, [r4]!
+ vst1.16 {d0}, [r4:16]!
+ vst1.16 {d0}, [r4:32]!
+ vst1.16 {d0}, [r4:64]!
+ vst1.16 {d0}, [r4:128]!
+ vst1.16 {d0}, [r4:256]!
+
+@ CHECK: vst1.16 {d0}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0}, [r4], r6
+ vst1.16 {d0}, [r4:16], r6
+ vst1.16 {d0}, [r4:32], r6
+ vst1.16 {d0}, [r4:64], r6
+ vst1.16 {d0}, [r4:128], r6
+ vst1.16 {d0}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1}, [r4]
+ vst1.16 {d0, d1}, [r4:16]
+ vst1.16 {d0, d1}, [r4:32]
+ vst1.16 {d0, d1}, [r4:64]
+ vst1.16 {d0, d1}, [r4:128]
+ vst1.16 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.16 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x4f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x0a]
+@ CHECK: vst1.16 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1}, [r4]!
+ vst1.16 {d0, d1}, [r4:16]!
+ vst1.16 {d0, d1}, [r4:32]!
+ vst1.16 {d0, d1}, [r4:64]!
+ vst1.16 {d0, d1}, [r4:128]!
+ vst1.16 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.16 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x0a]
+@ CHECK: vst1.16 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1}, [r4], r6
+ vst1.16 {d0, d1}, [r4:16], r6
+ vst1.16 {d0, d1}, [r4:32], r6
+ vst1.16 {d0, d1}, [r4:64], r6
+ vst1.16 {d0, d1}, [r4:128], r6
+ vst1.16 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x0a]
+@ CHECK: vst1.16 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2}, [r4]
+ vst1.16 {d0, d1, d2}, [r4:16]
+ vst1.16 {d0, d1, d2}, [r4:32]
+ vst1.16 {d0, d1, d2}, [r4:64]
+ vst1.16 {d0, d1, d2}, [r4:128]
+ vst1.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.16 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2}, [r4]!
+ vst1.16 {d0, d1, d2}, [r4:16]!
+ vst1.16 {d0, d1, d2}, [r4:32]!
+ vst1.16 {d0, d1, d2}, [r4:64]!
+ vst1.16 {d0, d1, d2}, [r4:128]!
+ vst1.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.16 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2}, [r4], r6
+ vst1.16 {d0, d1, d2}, [r4:16], r6
+ vst1.16 {d0, d1, d2}, [r4:32], r6
+ vst1.16 {d0, d1, d2}, [r4:64], r6
+ vst1.16 {d0, d1, d2}, [r4:128], r6
+ vst1.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0, d1, d2, d3}, [r4]
+ vst1.16 {d0, d1, d2, d3}, [r4:16]
+ vst1.16 {d0, d1, d2, d3}, [r4:32]
+ vst1.16 {d0, d1, d2, d3}, [r4:64]
+ vst1.16 {d0, d1, d2, d3}, [r4:128]
+ vst1.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x4f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x7f,0x02]
+
+ vst1.16 {d0, d1, d2, d3}, [r4]!
+ vst1.16 {d0, d1, d2, d3}, [r4:16]!
+ vst1.16 {d0, d1, d2, d3}, [r4:32]!
+ vst1.16 {d0, d1, d2, d3}, [r4:64]!
+ vst1.16 {d0, d1, d2, d3}, [r4:128]!
+ vst1.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x7d,0x02]
+
+ vst1.16 {d0, d1, d2, d3}, [r4], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x02]
+@ CHECK: vst1.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x76,0x02]
+
+ vst1.16 {d0[2]}, [r4]
+ vst1.16 {d0[2]}, [r4:16]
+ vst1.16 {d0[2]}, [r4:32]
+ vst1.16 {d0[2]}, [r4:64]
+ vst1.16 {d0[2]}, [r4:128]
+ vst1.16 {d0[2]}, [r4:256]
+
+@ CHECK: vst1.16 {d0[2]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x04]
+@ CHECK: vst1.16 {d0[2]}, [r4:16] @ encoding: [0x84,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0[2]}, [r4]!
+ vst1.16 {d0[2]}, [r4:16]!
+ vst1.16 {d0[2]}, [r4:32]!
+ vst1.16 {d0[2]}, [r4:64]!
+ vst1.16 {d0[2]}, [r4:128]!
+ vst1.16 {d0[2]}, [r4:256]!
+
+@ CHECK: vst1.16 {d0[2]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x04]
+@ CHECK: vst1.16 {d0[2]}, [r4:16]! @ encoding: [0x84,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.16 {d0[2]}, [r4], r6
+ vst1.16 {d0[2]}, [r4:16], r6
+ vst1.16 {d0[2]}, [r4:32], r6
+ vst1.16 {d0[2]}, [r4:64], r6
+ vst1.16 {d0[2]}, [r4:128], r6
+ vst1.16 {d0[2]}, [r4:256], r6
+
+@ CHECK: vst1.16 {d0[2]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x04]
+@ CHECK: vst1.16 {d0[2]}, [r4:16], r6 @ encoding: [0x84,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst1.16 {d0[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0}, [r4]
+ vst1.32 {d0}, [r4:16]
+ vst1.32 {d0}, [r4:32]
+ vst1.32 {d0}, [r4:64]
+ vst1.32 {d0}, [r4:128]
+ vst1.32 {d0}, [r4:256]
+
+@ CHECK: vst1.32 {d0}, [r4] @ encoding: [0x04,0xf9,0x8f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0}, [r4]!
+ vst1.32 {d0}, [r4:16]!
+ vst1.32 {d0}, [r4:32]!
+ vst1.32 {d0}, [r4:64]!
+ vst1.32 {d0}, [r4:128]!
+ vst1.32 {d0}, [r4:256]!
+
+@ CHECK: vst1.32 {d0}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0}, [r4], r6
+ vst1.32 {d0}, [r4:16], r6
+ vst1.32 {d0}, [r4:32], r6
+ vst1.32 {d0}, [r4:64], r6
+ vst1.32 {d0}, [r4:128], r6
+ vst1.32 {d0}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1}, [r4]
+ vst1.32 {d0, d1}, [r4:16]
+ vst1.32 {d0, d1}, [r4:32]
+ vst1.32 {d0, d1}, [r4:64]
+ vst1.32 {d0, d1}, [r4:128]
+ vst1.32 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.32 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x0a]
+@ CHECK: vst1.32 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1}, [r4]!
+ vst1.32 {d0, d1}, [r4:16]!
+ vst1.32 {d0, d1}, [r4:32]!
+ vst1.32 {d0, d1}, [r4:64]!
+ vst1.32 {d0, d1}, [r4:128]!
+ vst1.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.32 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x0a]
+@ CHECK: vst1.32 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1}, [r4], r6
+ vst1.32 {d0, d1}, [r4:16], r6
+ vst1.32 {d0, d1}, [r4:32], r6
+ vst1.32 {d0, d1}, [r4:64], r6
+ vst1.32 {d0, d1}, [r4:128], r6
+ vst1.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x0a]
+@ CHECK: vst1.32 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2}, [r4]
+ vst1.32 {d0, d1, d2}, [r4:16]
+ vst1.32 {d0, d1, d2}, [r4:32]
+ vst1.32 {d0, d1, d2}, [r4:64]
+ vst1.32 {d0, d1, d2}, [r4:128]
+ vst1.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.32 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x8f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2}, [r4]!
+ vst1.32 {d0, d1, d2}, [r4:16]!
+ vst1.32 {d0, d1, d2}, [r4:32]!
+ vst1.32 {d0, d1, d2}, [r4:64]!
+ vst1.32 {d0, d1, d2}, [r4:128]!
+ vst1.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.32 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2}, [r4], r6
+ vst1.32 {d0, d1, d2}, [r4:16], r6
+ vst1.32 {d0, d1, d2}, [r4:32], r6
+ vst1.32 {d0, d1, d2}, [r4:64], r6
+ vst1.32 {d0, d1, d2}, [r4:128], r6
+ vst1.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0, d1, d2, d3}, [r4]
+ vst1.32 {d0, d1, d2, d3}, [r4:16]
+ vst1.32 {d0, d1, d2, d3}, [r4:32]
+ vst1.32 {d0, d1, d2, d3}, [r4:64]
+ vst1.32 {d0, d1, d2, d3}, [r4:128]
+ vst1.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x8f,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x02]
+
+ vst1.32 {d0, d1, d2, d3}, [r4]!
+ vst1.32 {d0, d1, d2, d3}, [r4:16]!
+ vst1.32 {d0, d1, d2, d3}, [r4:32]!
+ vst1.32 {d0, d1, d2, d3}, [r4:64]!
+ vst1.32 {d0, d1, d2, d3}, [r4:128]!
+ vst1.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x02]
+
+ vst1.32 {d0, d1, d2, d3}, [r4], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x02]
+@ CHECK: vst1.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x02]
+
+ vst1.32 {d0[1]}, [r4]
+ vst1.32 {d0[1]}, [r4:16]
+ vst1.32 {d0[1]}, [r4:32]
+ vst1.32 {d0[1]}, [r4:64]
+ vst1.32 {d0[1]}, [r4:128]
+ vst1.32 {d0[1]}, [r4:256]
+
+@ CHECK: vst1.32 {d0[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0[1]}, [r4:32] @ encoding: [0x84,0xf9,0xbf,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0[1]}, [r4]!
+ vst1.32 {d0[1]}, [r4:16]!
+ vst1.32 {d0[1]}, [r4:32]!
+ vst1.32 {d0[1]}, [r4:64]!
+ vst1.32 {d0[1]}, [r4:128]!
+ vst1.32 {d0[1]}, [r4:256]!
+
+@ CHECK: vst1.32 {d0[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0[1]}, [r4:32]! @ encoding: [0x84,0xf9,0xbd,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.32 {d0[1]}, [r4], r6
+ vst1.32 {d0[1]}, [r4:16], r6
+ vst1.32 {d0[1]}, [r4:32], r6
+ vst1.32 {d0[1]}, [r4:64], r6
+ vst1.32 {d0[1]}, [r4:128], r6
+ vst1.32 {d0[1]}, [r4:256], r6
+
+@ CHECK: vst1.32 {d0[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.32 {d0[1]}, [r4:32], r6 @ encoding: [0x84,0xf9,0xb6,0x08]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst1.32 {d0[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0}, [r4]
+ vst1.64 {d0}, [r4:16]
+ vst1.64 {d0}, [r4:32]
+ vst1.64 {d0}, [r4:64]
+ vst1.64 {d0}, [r4:128]
+ vst1.64 {d0}, [r4:256]
+
+@ CHECK: vst1.64 {d0}, [r4] @ encoding: [0x04,0xf9,0xcf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0}, [r4]!
+ vst1.64 {d0}, [r4:16]!
+ vst1.64 {d0}, [r4:32]!
+ vst1.64 {d0}, [r4:64]!
+ vst1.64 {d0}, [r4:128]!
+ vst1.64 {d0}, [r4:256]!
+
+@ CHECK: vst1.64 {d0}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0}, [r4], r6
+ vst1.64 {d0}, [r4:16], r6
+ vst1.64 {d0}, [r4:32], r6
+ vst1.64 {d0}, [r4:64], r6
+ vst1.64 {d0}, [r4:128], r6
+ vst1.64 {d0}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1}, [r4]
+ vst1.64 {d0, d1}, [r4:16]
+ vst1.64 {d0, d1}, [r4:32]
+ vst1.64 {d0, d1}, [r4:64]
+ vst1.64 {d0, d1}, [r4:128]
+ vst1.64 {d0, d1}, [r4:256]
+
+@ CHECK: vst1.64 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x0a]
+@ CHECK: vst1.64 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0xef,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1}, [r4]!
+ vst1.64 {d0, d1}, [r4:16]!
+ vst1.64 {d0, d1}, [r4:32]!
+ vst1.64 {d0, d1}, [r4:64]!
+ vst1.64 {d0, d1}, [r4:128]!
+ vst1.64 {d0, d1}, [r4:256]!
+
+@ CHECK: vst1.64 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x0a]
+@ CHECK: vst1.64 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0xed,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1}, [r4], r6
+ vst1.64 {d0, d1}, [r4:16], r6
+ vst1.64 {d0, d1}, [r4:32], r6
+ vst1.64 {d0, d1}, [r4:64], r6
+ vst1.64 {d0, d1}, [r4:128], r6
+ vst1.64 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x0a]
+@ CHECK: vst1.64 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0xe6,0x0a]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2}, [r4]
+ vst1.64 {d0, d1, d2}, [r4:16]
+ vst1.64 {d0, d1, d2}, [r4:32]
+ vst1.64 {d0, d1, d2}, [r4:64]
+ vst1.64 {d0, d1, d2}, [r4:128]
+ vst1.64 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst1.64 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0xcf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2}, [r4]!
+ vst1.64 {d0, d1, d2}, [r4:16]!
+ vst1.64 {d0, d1, d2}, [r4:32]!
+ vst1.64 {d0, d1, d2}, [r4:64]!
+ vst1.64 {d0, d1, d2}, [r4:128]!
+ vst1.64 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst1.64 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2}, [r4], r6
+ vst1.64 {d0, d1, d2}, [r4:16], r6
+ vst1.64 {d0, d1, d2}, [r4:32], r6
+ vst1.64 {d0, d1, d2}, [r4:64], r6
+ vst1.64 {d0, d1, d2}, [r4:128], r6
+ vst1.64 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x06]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst1.64 {d0, d1, d2, d3}, [r4]
+ vst1.64 {d0, d1, d2, d3}, [r4:16]
+ vst1.64 {d0, d1, d2, d3}, [r4:32]
+ vst1.64 {d0, d1, d2, d3}, [r4:64]
+ vst1.64 {d0, d1, d2, d3}, [r4:128]
+ vst1.64 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0xcf,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0xdf,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xef,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xff,0x02]
+
+ vst1.64 {d0, d1, d2, d3}, [r4]!
+ vst1.64 {d0, d1, d2, d3}, [r4:16]!
+ vst1.64 {d0, d1, d2, d3}, [r4:32]!
+ vst1.64 {d0, d1, d2, d3}, [r4:64]!
+ vst1.64 {d0, d1, d2, d3}, [r4:128]!
+ vst1.64 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0xcd,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0xdd,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xed,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xfd,0x02]
+
+ vst1.64 {d0, d1, d2, d3}, [r4], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:16], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:32], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:64], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:128], r6
+ vst1.64 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0xc6,0x02]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst1.64 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0xd6,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xe6,0x02]
+@ CHECK: vst1.64 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xf6,0x02]
+
+ vst2.8 {d0, d1}, [r4]
+ vst2.8 {d0, d1}, [r4:16]
+ vst2.8 {d0, d1}, [r4:32]
+ vst2.8 {d0, d1}, [r4:64]
+ vst2.8 {d0, d1}, [r4:128]
+ vst2.8 {d0, d1}, [r4:256]
+
+@ CHECK: vst2.8 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x0f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x08]
+@ CHECK: vst2.8 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d1}, [r4]!
+ vst2.8 {d0, d1}, [r4:16]!
+ vst2.8 {d0, d1}, [r4:32]!
+ vst2.8 {d0, d1}, [r4:64]!
+ vst2.8 {d0, d1}, [r4:128]!
+ vst2.8 {d0, d1}, [r4:256]!
+
+@ CHECK: vst2.8 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x08]
+@ CHECK: vst2.8 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d1}, [r4], r6
+ vst2.8 {d0, d1}, [r4:16], r6
+ vst2.8 {d0, d1}, [r4:32], r6
+ vst2.8 {d0, d1}, [r4:64], r6
+ vst2.8 {d0, d1}, [r4:128], r6
+ vst2.8 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x08]
+@ CHECK: vst2.8 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d2}, [r4]
+ vst2.8 {d0, d2}, [r4:16]
+ vst2.8 {d0, d2}, [r4:32]
+ vst2.8 {d0, d2}, [r4:64]
+ vst2.8 {d0, d2}, [r4:128]
+ vst2.8 {d0, d2}, [r4:256]
+
+@ CHECK: vst2.8 {d0, d2}, [r4] @ encoding: [0x04,0xf9,0x0f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d2}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x09]
+@ CHECK: vst2.8 {d0, d2}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d2}, [r4]!
+ vst2.8 {d0, d2}, [r4:16]!
+ vst2.8 {d0, d2}, [r4:32]!
+ vst2.8 {d0, d2}, [r4:64]!
+ vst2.8 {d0, d2}, [r4:128]!
+ vst2.8 {d0, d2}, [r4:256]!
+
+@ CHECK: vst2.8 {d0, d2}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x09]
+@ CHECK: vst2.8 {d0, d2}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d2}, [r4], r6
+ vst2.8 {d0, d2}, [r4:16], r6
+ vst2.8 {d0, d2}, [r4:32], r6
+ vst2.8 {d0, d2}, [r4:64], r6
+ vst2.8 {d0, d2}, [r4:128], r6
+ vst2.8 {d0, d2}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x09]
+@ CHECK: vst2.8 {d0, d2}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0, d1, d2, d3}, [r4]
+ vst2.8 {d0, d1, d2, d3}, [r4:16]
+ vst2.8 {d0, d1, d2, d3}, [r4:32]
+ vst2.8 {d0, d1, d2, d3}, [r4:64]
+ vst2.8 {d0, d1, d2, d3}, [r4:128]
+ vst2.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x0f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x03]
+
+ vst2.8 {d0, d1, d2, d3}, [r4]!
+ vst2.8 {d0, d1, d2, d3}, [r4:16]!
+ vst2.8 {d0, d1, d2, d3}, [r4:32]!
+ vst2.8 {d0, d1, d2, d3}, [r4:64]!
+ vst2.8 {d0, d1, d2, d3}, [r4:128]!
+ vst2.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x03]
+
+ vst2.8 {d0, d1, d2, d3}, [r4], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:16], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:32], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:64], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:128], r6
+ vst2.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x03]
+@ CHECK: vst2.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x03]
+
+ vst2.8 {d0[2], d1[2]}, [r4]
+ vst2.8 {d0[2], d1[2]}, [r4:16]
+ vst2.8 {d0[2], d1[2]}, [r4:32]
+ vst2.8 {d0[2], d1[2]}, [r4:64]
+ vst2.8 {d0[2], d1[2]}, [r4:128]
+ vst2.8 {d0[2], d1[2]}, [r4:256]
+
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x01]
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4:16] @ encoding: [0x84,0xf9,0x5f,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0[2], d1[2]}, [r4]!
+ vst2.8 {d0[2], d1[2]}, [r4:16]!
+ vst2.8 {d0[2], d1[2]}, [r4:32]!
+ vst2.8 {d0[2], d1[2]}, [r4:64]!
+ vst2.8 {d0[2], d1[2]}, [r4:128]!
+ vst2.8 {d0[2], d1[2]}, [r4:256]!
+
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x01]
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4:16]! @ encoding: [0x84,0xf9,0x5d,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.8 {d0[2], d1[2]}, [r4], r6
+ vst2.8 {d0[2], d1[2]}, [r4:16], r6
+ vst2.8 {d0[2], d1[2]}, [r4:32], r6
+ vst2.8 {d0[2], d1[2]}, [r4:64], r6
+ vst2.8 {d0[2], d1[2]}, [r4:128], r6
+ vst2.8 {d0[2], d1[2]}, [r4:256], r6
+
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x01]
+@ CHECK: vst2.8 {d0[2], d1[2]}, [r4:16], r6 @ encoding: [0x84,0xf9,0x56,0x01]
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 16 or omitted
+@ CHECK-ERRORS: vst2.8 {d0[2], d1[2]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1}, [r4]
+ vst2.32 {d0, d1}, [r4:16]
+ vst2.32 {d0, d1}, [r4:32]
+ vst2.32 {d0, d1}, [r4:64]
+ vst2.32 {d0, d1}, [r4:128]
+ vst2.32 {d0, d1}, [r4:256]
+
+@ CHECK: vst2.32 {d0, d1}, [r4] @ encoding: [0x04,0xf9,0x8f,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x08]
+@ CHECK: vst2.32 {d0, d1}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1}, [r4]!
+ vst2.32 {d0, d1}, [r4:16]!
+ vst2.32 {d0, d1}, [r4:32]!
+ vst2.32 {d0, d1}, [r4:64]!
+ vst2.32 {d0, d1}, [r4:128]!
+ vst2.32 {d0, d1}, [r4:256]!
+
+@ CHECK: vst2.32 {d0, d1}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x08]
+@ CHECK: vst2.32 {d0, d1}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1}, [r4], r6
+ vst2.32 {d0, d1}, [r4:16], r6
+ vst2.32 {d0, d1}, [r4:32], r6
+ vst2.32 {d0, d1}, [r4:64], r6
+ vst2.32 {d0, d1}, [r4:128], r6
+ vst2.32 {d0, d1}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0, d1}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x08]
+@ CHECK: vst2.32 {d0, d1}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x08]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d2}, [r4]
+ vst2.32 {d0, d2}, [r4:16]
+ vst2.32 {d0, d2}, [r4:32]
+ vst2.32 {d0, d2}, [r4:64]
+ vst2.32 {d0, d2}, [r4:128]
+ vst2.32 {d0, d2}, [r4:256]
+
+@ CHECK: vst2.32 {d0, d2}, [r4] @ encoding: [0x04,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d2}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x09]
+@ CHECK: vst2.32 {d0, d2}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d2}, [r4]!
+ vst2.32 {d0, d2}, [r4:16]!
+ vst2.32 {d0, d2}, [r4:32]!
+ vst2.32 {d0, d2}, [r4:64]!
+ vst2.32 {d0, d2}, [r4:128]!
+ vst2.32 {d0, d2}, [r4:256]!
+
+@ CHECK: vst2.32 {d0, d2}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x09]
+@ CHECK: vst2.32 {d0, d2}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d2}, [r4], r6
+ vst2.32 {d0, d2}, [r4:16], r6
+ vst2.32 {d0, d2}, [r4:32], r6
+ vst2.32 {d0, d2}, [r4:64], r6
+ vst2.32 {d0, d2}, [r4:128], r6
+ vst2.32 {d0, d2}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x09]
+@ CHECK: vst2.32 {d0, d2}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0, d1, d2, d3}, [r4]
+ vst2.32 {d0, d1, d2, d3}, [r4:16]
+ vst2.32 {d0, d1, d2, d3}, [r4:32]
+ vst2.32 {d0, d1, d2, d3}, [r4:64]
+ vst2.32 {d0, d1, d2, d3}, [r4:128]
+ vst2.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x8f,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x03]
+
+ vst2.32 {d0, d1, d2, d3}, [r4]!
+ vst2.32 {d0, d1, d2, d3}, [r4:16]!
+ vst2.32 {d0, d1, d2, d3}, [r4:32]!
+ vst2.32 {d0, d1, d2, d3}, [r4:64]!
+ vst2.32 {d0, d1, d2, d3}, [r4:128]!
+ vst2.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x03]
+
+ vst2.32 {d0, d1, d2, d3}, [r4], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:16], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:32], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:64], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:128], r6
+ vst2.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x03]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst2.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x03]
+@ CHECK: vst2.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x03]
+
+ vst2.32 {d0[1], d1[1]}, [r4]
+ vst2.32 {d0[1], d1[1]}, [r4:16]
+ vst2.32 {d0[1], d1[1]}, [r4:32]
+ vst2.32 {d0[1], d1[1]}, [r4:64]
+ vst2.32 {d0[1], d1[1]}, [r4:128]
+ vst2.32 {d0[1], d1[1]}, [r4:256]
+
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4:64] @ encoding: [0x84,0xf9,0x9f,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d1[1]}, [r4]!
+ vst2.32 {d0[1], d1[1]}, [r4:16]!
+ vst2.32 {d0[1], d1[1]}, [r4:32]!
+ vst2.32 {d0[1], d1[1]}, [r4:64]!
+ vst2.32 {d0[1], d1[1]}, [r4:128]!
+ vst2.32 {d0[1], d1[1]}, [r4:256]!
+
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x9d,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d1[1]}, [r4], r6
+ vst2.32 {d0[1], d1[1]}, [r4:16], r6
+ vst2.32 {d0[1], d1[1]}, [r4:32], r6
+ vst2.32 {d0[1], d1[1]}, [r4:64], r6
+ vst2.32 {d0[1], d1[1]}, [r4:128], r6
+ vst2.32 {d0[1], d1[1]}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d1[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x96,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d1[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d2[1]}, [r4]
+ vst2.32 {d0[1], d2[1]}, [r4:16]
+ vst2.32 {d0[1], d2[1]}, [r4:32]
+ vst2.32 {d0[1], d2[1]}, [r4:64]
+ vst2.32 {d0[1], d2[1]}, [r4:128]
+ vst2.32 {d0[1], d2[1]}, [r4:256]
+
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4:64] @ encoding: [0x84,0xf9,0xdf,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d2[1]}, [r4]!
+ vst2.32 {d0[1], d2[1]}, [r4:16]!
+ vst2.32 {d0[1], d2[1]}, [r4:32]!
+ vst2.32 {d0[1], d2[1]}, [r4:64]!
+ vst2.32 {d0[1], d2[1]}, [r4:128]!
+ vst2.32 {d0[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4:64]! @ encoding: [0x84,0xf9,0xdd,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst2.32 {d0[1], d2[1]}, [r4], r6
+ vst2.32 {d0[1], d2[1]}, [r4:16], r6
+ vst2.32 {d0[1], d2[1]}, [r4:32], r6
+ vst2.32 {d0[1], d2[1]}, [r4:64], r6
+ vst2.32 {d0[1], d2[1]}, [r4:128], r6
+ vst2.32 {d0[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst2.32 {d0[1], d2[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0xd6,0x09]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst2.32 {d0[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d1, d2}, [r4]
+ vst3.8 {d0, d1, d2}, [r4:16]
+ vst3.8 {d0, d1, d2}, [r4:32]
+ vst3.8 {d0, d1, d2}, [r4:64]
+ vst3.8 {d0, d1, d2}, [r4:128]
+ vst3.8 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst3.8 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x0f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d1, d2}, [r4]!
+ vst3.8 {d0, d1, d2}, [r4:16]!
+ vst3.8 {d0, d1, d2}, [r4:32]!
+ vst3.8 {d0, d1, d2}, [r4:64]!
+ vst3.8 {d0, d1, d2}, [r4:128]!
+ vst3.8 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst3.8 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d1, d2}, [r4], r6
+ vst3.8 {d0, d1, d2}, [r4:16], r6
+ vst3.8 {d0, d1, d2}, [r4:32], r6
+ vst3.8 {d0, d1, d2}, [r4:64], r6
+ vst3.8 {d0, d1, d2}, [r4:128], r6
+ vst3.8 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst3.8 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d2, d4}, [r4]
+ vst3.8 {d0, d2, d4}, [r4:16]
+ vst3.8 {d0, d2, d4}, [r4:32]
+ vst3.8 {d0, d2, d4}, [r4:64]
+ vst3.8 {d0, d2, d4}, [r4:128]
+ vst3.8 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vst3.8 {d0, d2, d4}, [r4] @ encoding: [0x04,0xf9,0x0f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d2, d4}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d2, d4}, [r4]!
+ vst3.8 {d0, d2, d4}, [r4:16]!
+ vst3.8 {d0, d2, d4}, [r4:32]!
+ vst3.8 {d0, d2, d4}, [r4:64]!
+ vst3.8 {d0, d2, d4}, [r4:128]!
+ vst3.8 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vst3.8 {d0, d2, d4}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d2, d4}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0, d2, d4}, [r4], r6
+ vst3.8 {d0, d2, d4}, [r4:16], r6
+ vst3.8 {d0, d2, d4}, [r4:32], r6
+ vst3.8 {d0, d2, d4}, [r4:64], r6
+ vst3.8 {d0, d2, d4}, [r4:128], r6
+ vst3.8 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vst3.8 {d0, d2, d4}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.8 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.8 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vst3.8 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0x2f,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst3.8 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x2d,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vst3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst3.8 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x26,0x02]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.8 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d1, d2}, [r4]
+ vst3.16 {d0, d1, d2}, [r4:16]
+ vst3.16 {d0, d1, d2}, [r4:32]
+ vst3.16 {d0, d1, d2}, [r4:64]
+ vst3.16 {d0, d1, d2}, [r4:128]
+ vst3.16 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst3.16 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x4f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d1, d2}, [r4]!
+ vst3.16 {d0, d1, d2}, [r4:16]!
+ vst3.16 {d0, d1, d2}, [r4:32]!
+ vst3.16 {d0, d1, d2}, [r4:64]!
+ vst3.16 {d0, d1, d2}, [r4:128]!
+ vst3.16 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst3.16 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d1, d2}, [r4], r6
+ vst3.16 {d0, d1, d2}, [r4:16], r6
+ vst3.16 {d0, d1, d2}, [r4:32], r6
+ vst3.16 {d0, d1, d2}, [r4:64], r6
+ vst3.16 {d0, d1, d2}, [r4:128], r6
+ vst3.16 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d2, d4}, [r4]
+ vst3.16 {d0, d2, d4}, [r4:16]
+ vst3.16 {d0, d2, d4}, [r4:32]
+ vst3.16 {d0, d2, d4}, [r4:64]
+ vst3.16 {d0, d2, d4}, [r4:128]
+ vst3.16 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vst3.16 {d0, d2, d4}, [r4] @ encoding: [0x04,0xf9,0x4f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d2, d4}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d2, d4}, [r4]!
+ vst3.16 {d0, d2, d4}, [r4:16]!
+ vst3.16 {d0, d2, d4}, [r4:32]!
+ vst3.16 {d0, d2, d4}, [r4:64]!
+ vst3.16 {d0, d2, d4}, [r4:128]!
+ vst3.16 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vst3.16 {d0, d2, d4}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d2, d4}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0, d2, d4}, [r4], r6
+ vst3.16 {d0, d2, d4}, [r4:16], r6
+ vst3.16 {d0, d2, d4}, [r4:32], r6
+ vst3.16 {d0, d2, d4}, [r4:64], r6
+ vst3.16 {d0, d2, d4}, [r4:128], r6
+ vst3.16 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0, d2, d4}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.16 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.16 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vst3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vst3.16 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0x84,0xf9,0x6f,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vst3.16 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x6d,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vst3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vst3.16 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x66,0x06]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.16 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d1, d2}, [r4]
+ vst3.32 {d0, d1, d2}, [r4:16]
+ vst3.32 {d0, d1, d2}, [r4:32]
+ vst3.32 {d0, d1, d2}, [r4:64]
+ vst3.32 {d0, d1, d2}, [r4:128]
+ vst3.32 {d0, d1, d2}, [r4:256]
+
+@ CHECK: vst3.32 {d0, d1, d2}, [r4] @ encoding: [0x04,0xf9,0x8f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d1, d2}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d1, d2}, [r4]!
+ vst3.32 {d0, d1, d2}, [r4:16]!
+ vst3.32 {d0, d1, d2}, [r4:32]!
+ vst3.32 {d0, d1, d2}, [r4:64]!
+ vst3.32 {d0, d1, d2}, [r4:128]!
+ vst3.32 {d0, d1, d2}, [r4:256]!
+
+@ CHECK: vst3.32 {d0, d1, d2}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d1, d2}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d1, d2}, [r4], r6
+ vst3.32 {d0, d1, d2}, [r4:16], r6
+ vst3.32 {d0, d1, d2}, [r4:32], r6
+ vst3.32 {d0, d1, d2}, [r4:64], r6
+ vst3.32 {d0, d1, d2}, [r4:128], r6
+ vst3.32 {d0, d1, d2}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0, d1, d2}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d1, d2}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x04]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d1, d2}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d2, d4}, [r4]
+ vst3.32 {d0, d2, d4}, [r4:16]
+ vst3.32 {d0, d2, d4}, [r4:32]
+ vst3.32 {d0, d2, d4}, [r4:64]
+ vst3.32 {d0, d2, d4}, [r4:128]
+ vst3.32 {d0, d2, d4}, [r4:256]
+
+@ CHECK: vst3.32 {d0, d2, d4}, [r4] @ encoding: [0x04,0xf9,0x8f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d2, d4}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d2, d4}, [r4]!
+ vst3.32 {d0, d2, d4}, [r4:16]!
+ vst3.32 {d0, d2, d4}, [r4:32]!
+ vst3.32 {d0, d2, d4}, [r4:64]!
+ vst3.32 {d0, d2, d4}, [r4:128]!
+ vst3.32 {d0, d2, d4}, [r4:256]!
+
+@ CHECK: vst3.32 {d0, d2, d4}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d2, d4}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0, d2, d4}, [r4], r6
+ vst3.32 {d0, d2, d4}, [r4:16], r6
+ vst3.32 {d0, d2, d4}, [r4:32], r6
+ vst3.32 {d0, d2, d4}, [r4:64], r6
+ vst3.32 {d0, d2, d4}, [r4:128], r6
+ vst3.32 {d0, d2, d4}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0, d2, d4}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst3.32 {d0, d2, d4}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x05]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst3.32 {d0, d2, d4}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+
+@ CHECK: vst3.32 {d0[1], d1[1], d2[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+
+@ CHECK: vst3.32 {d0[1], d1[1], d2[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+ vst3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0[1], d1[1], d2[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d1[1], d2[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+
+@ CHECK: vst3.32 {d0[1], d2[1], d4[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+
+@ CHECK: vst3.32 {d0[1], d2[1], d4[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+ vst3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+
+@ CHECK: vst3.32 {d0[1], d2[1], d4[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x0a]
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be omitted
+@ CHECK-ERRORS: vst3.32 {d0[1], d2[1], d4[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.8 {d0, d1, d2, d3}, [r4]
+ vst4.8 {d0, d1, d2, d3}, [r4:16]
+ vst4.8 {d0, d1, d2, d3}, [r4:32]
+ vst4.8 {d0, d1, d2, d3}, [r4:64]
+ vst4.8 {d0, d1, d2, d3}, [r4:128]
+ vst4.8 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x0f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x00]
+
+ vst4.8 {d0, d1, d2, d3}, [r4]!
+ vst4.8 {d0, d1, d2, d3}, [r4:16]!
+ vst4.8 {d0, d1, d2, d3}, [r4:32]!
+ vst4.8 {d0, d1, d2, d3}, [r4:64]!
+ vst4.8 {d0, d1, d2, d3}, [r4:128]!
+ vst4.8 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x00]
+
+ vst4.8 {d0, d1, d2, d3}, [r4], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:16], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:32], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:64], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:128], r6
+ vst4.8 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x00]
+@ CHECK: vst4.8 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x00]
+
+ vst4.8 {d0, d2, d4, d6}, [r4]
+ vst4.8 {d0, d2, d4, d6}, [r4:16]
+ vst4.8 {d0, d2, d4, d6}, [r4:32]
+ vst4.8 {d0, d2, d4, d6}, [r4:64]
+ vst4.8 {d0, d2, d4, d6}, [r4:128]
+ vst4.8 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4] @ encoding: [0x04,0xf9,0x0f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x04,0xf9,0x1f,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x04,0xf9,0x2f,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x04,0xf9,0x3f,0x01]
+
+ vst4.8 {d0, d2, d4, d6}, [r4]!
+ vst4.8 {d0, d2, d4, d6}, [r4:16]!
+ vst4.8 {d0, d2, d4, d6}, [r4:32]!
+ vst4.8 {d0, d2, d4, d6}, [r4:64]!
+ vst4.8 {d0, d2, d4, d6}, [r4:128]!
+ vst4.8 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4]! @ encoding: [0x04,0xf9,0x0d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x04,0xf9,0x1d,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x04,0xf9,0x2d,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x04,0xf9,0x3d,0x01]
+
+ vst4.8 {d0, d2, d4, d6}, [r4], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:16], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:32], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:64], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:128], r6
+ vst4.8 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x04,0xf9,0x06,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.8 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x04,0xf9,0x16,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x04,0xf9,0x26,0x01]
+@ CHECK: vst4.8 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x04,0xf9,0x36,0x01]
+
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0x84,0xf9,0x2f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32] @ encoding: [0x84,0xf9,0x3f,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x2d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]! @ encoding: [0x84,0xf9,0x3d,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x26,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6 @ encoding: [0x84,0xf9,0x36,0x03]
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 32 or omitted
+@ CHECK-ERRORS: vst4.8 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0, d1, d2, d3}, [r4]
+ vst4.16 {d0, d1, d2, d3}, [r4:16]
+ vst4.16 {d0, d1, d2, d3}, [r4:32]
+ vst4.16 {d0, d1, d2, d3}, [r4:64]
+ vst4.16 {d0, d1, d2, d3}, [r4:128]
+ vst4.16 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x4f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0x7f,0x00]
+
+ vst4.16 {d0, d1, d2, d3}, [r4]!
+ vst4.16 {d0, d1, d2, d3}, [r4:16]!
+ vst4.16 {d0, d1, d2, d3}, [r4:32]!
+ vst4.16 {d0, d1, d2, d3}, [r4:64]!
+ vst4.16 {d0, d1, d2, d3}, [r4:128]!
+ vst4.16 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0x7d,0x00]
+
+ vst4.16 {d0, d1, d2, d3}, [r4], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:16], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:32], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:64], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:128], r6
+ vst4.16 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x00]
+@ CHECK: vst4.16 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0x76,0x00]
+
+ vst4.16 {d0, d2, d4, d6}, [r4]
+ vst4.16 {d0, d2, d4, d6}, [r4:16]
+ vst4.16 {d0, d2, d4, d6}, [r4:32]
+ vst4.16 {d0, d2, d4, d6}, [r4:64]
+ vst4.16 {d0, d2, d4, d6}, [r4:128]
+ vst4.16 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4] @ encoding: [0x04,0xf9,0x4f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x04,0xf9,0x5f,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x04,0xf9,0x6f,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x04,0xf9,0x7f,0x01]
+
+ vst4.16 {d0, d2, d4, d6}, [r4]!
+ vst4.16 {d0, d2, d4, d6}, [r4:16]!
+ vst4.16 {d0, d2, d4, d6}, [r4:32]!
+ vst4.16 {d0, d2, d4, d6}, [r4:64]!
+ vst4.16 {d0, d2, d4, d6}, [r4:128]!
+ vst4.16 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4]! @ encoding: [0x04,0xf9,0x4d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x04,0xf9,0x5d,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x04,0xf9,0x6d,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x04,0xf9,0x7d,0x01]
+
+ vst4.16 {d0, d2, d4, d6}, [r4], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:16], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:32], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:64], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:128], r6
+ vst4.16 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x04,0xf9,0x46,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.16 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x04,0xf9,0x56,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x04,0xf9,0x66,0x01]
+@ CHECK: vst4.16 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x04,0xf9,0x76,0x01]
+
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0x84,0xf9,0x4f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0x84,0xf9,0x5f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x4d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x5d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x46,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x56,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0x84,0xf9,0x6f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0x84,0xf9,0x7f,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x6d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x7d,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x66,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x76,0x07]
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64 or omitted
+@ CHECK-ERRORS: vst4.16 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0, d1, d2, d3}, [r4]
+ vst4.32 {d0, d1, d2, d3}, [r4:16]
+ vst4.32 {d0, d1, d2, d3}, [r4:32]
+ vst4.32 {d0, d1, d2, d3}, [r4:64]
+ vst4.32 {d0, d1, d2, d3}, [r4:128]
+ vst4.32 {d0, d1, d2, d3}, [r4:256]
+
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4] @ encoding: [0x04,0xf9,0x8f,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x00]
+
+ vst4.32 {d0, d1, d2, d3}, [r4]!
+ vst4.32 {d0, d1, d2, d3}, [r4:16]!
+ vst4.32 {d0, d1, d2, d3}, [r4:32]!
+ vst4.32 {d0, d1, d2, d3}, [r4:64]!
+ vst4.32 {d0, d1, d2, d3}, [r4:128]!
+ vst4.32 {d0, d1, d2, d3}, [r4:256]!
+
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x00]
+
+ vst4.32 {d0, d1, d2, d3}, [r4], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:16], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:32], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:64], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:128], r6
+ vst4.32 {d0, d1, d2, d3}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x00]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d1, d2, d3}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x00]
+@ CHECK: vst4.32 {d0, d1, d2, d3}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x00]
+
+ vst4.32 {d0, d2, d4, d6}, [r4]
+ vst4.32 {d0, d2, d4, d6}, [r4:16]
+ vst4.32 {d0, d2, d4, d6}, [r4:32]
+ vst4.32 {d0, d2, d4, d6}, [r4:64]
+ vst4.32 {d0, d2, d4, d6}, [r4:128]
+ vst4.32 {d0, d2, d4, d6}, [r4:256]
+
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4] @ encoding: [0x04,0xf9,0x8f,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:64] @ encoding: [0x04,0xf9,0x9f,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:128] @ encoding: [0x04,0xf9,0xaf,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:256] @ encoding: [0x04,0xf9,0xbf,0x01]
+
+ vst4.32 {d0, d2, d4, d6}, [r4]!
+ vst4.32 {d0, d2, d4, d6}, [r4:16]!
+ vst4.32 {d0, d2, d4, d6}, [r4:32]!
+ vst4.32 {d0, d2, d4, d6}, [r4:64]!
+ vst4.32 {d0, d2, d4, d6}, [r4:128]!
+ vst4.32 {d0, d2, d4, d6}, [r4:256]!
+
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4]! @ encoding: [0x04,0xf9,0x8d,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:64]! @ encoding: [0x04,0xf9,0x9d,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:128]! @ encoding: [0x04,0xf9,0xad,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:256]! @ encoding: [0x04,0xf9,0xbd,0x01]
+
+ vst4.32 {d0, d2, d4, d6}, [r4], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:16], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:32], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:64], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:128], r6
+ vst4.32 {d0, d2, d4, d6}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4], r6 @ encoding: [0x04,0xf9,0x86,0x01]
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128, 256 or omitted
+@ CHECK-ERRORS: vst4.32 {d0, d2, d4, d6}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:64], r6 @ encoding: [0x04,0xf9,0x96,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:128], r6 @ encoding: [0x04,0xf9,0xa6,0x01]
+@ CHECK: vst4.32 {d0, d2, d4, d6}, [r4:256], r6 @ encoding: [0x04,0xf9,0xb6,0x01]
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4] @ encoding: [0x84,0xf9,0x8f,0x0b]
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64] @ encoding: [0x84,0xf9,0x9f,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128] @ encoding: [0x84,0xf9,0xaf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x9d,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xad,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x96,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xa6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0x84,0xf9,0xdf,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128] @ encoding: [0x84,0xf9,0xef,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0x84,0xf9,0xdd,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xed,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0xd6,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xe6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]!
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4]! @ encoding: [0x84,0xf9,0x8d,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64]! @ encoding: [0x84,0xf9,0x9d,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xad,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6
+ vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4], r6 @ encoding: [0x84,0xf9,0x86,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0x96,0x0b]
+@ CHECK: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xa6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d1[1], d2[1], d3[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4] @ encoding: [0x84,0xf9,0xcf,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64] @ encoding: [0x84,0xf9,0xdf,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128] @ encoding: [0x84,0xf9,0xef,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]!
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4]! @ encoding: [0x84,0xf9,0xcd,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16]!
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32]!
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64]! @ encoding: [0x84,0xf9,0xdd,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128]! @ encoding: [0x84,0xf9,0xed,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256]!
+@ CHECK-ERRORS: ^
+
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6
+ vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r6 @ encoding: [0x84,0xf9,0xc6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:16], r6
+@ CHECK-ERRORS: ^
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:32], r6
+@ CHECK-ERRORS: ^
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:64], r6 @ encoding: [0x84,0xf9,0xd6,0x0b]
+@ CHECK: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:128], r6 @ encoding: [0x84,0xf9,0xe6,0x0b]
+@ CHECK-ERRORS: error: alignment must be 64, 128 or omitted
+@ CHECK-ERRORS: vst4.32 {d0[1], d2[1], d4[1], d6[1]}, [r4:256], r6
+@ CHECK-ERRORS: ^
More information about the llvm-commits
mailing list