[llvm-commits] [llvm] r137635 - in /llvm/trunk: lib/Target/ARM/ARMInstrNEON.td lib/Target/ARM/Disassembler/ARMDisassembler.cpp test/MC/Disassembler/ARM/neon.txt

Owen Anderson resistor at mac.com
Mon Aug 15 11:44:44 PDT 2011


Author: resistor
Date: Mon Aug 15 13:44:44 2011
New Revision: 137635

URL: http://llvm.org/viewvc/llvm-project?rev=137635&view=rev
Log:
Fix problems decoding the to/from-lane NEON memory instructions, and add a comprehensive NEON decoding testcase.

Added:
    llvm/trunk/test/MC/Disassembler/ARM/neon.txt
Modified:
    llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
    llvm/trunk/lib/Target/ARM/Disassembler/ARMDisassembler.cpp

Modified: llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrNEON.td?rev=137635&r1=137634&r2=137635&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrNEON.td Mon Aug 15 13:44:44 2011
@@ -547,6 +547,7 @@
                                          (i32 (LoadOp addrmode6:$Rn)),
                                          imm:$lane))]> {
   let Rm = 0b1111;
+  let DecoderMethod = "DecodeVLD1LN";
 }
 class VLD1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
              PatFrag LoadOp>
@@ -558,6 +559,7 @@
                                          (i32 (LoadOp addrmode6oneL32:$Rn)),
                                          imm:$lane))]> {
   let Rm = 0b1111;
+  let DecoderMethod = "DecodeVLD1LN";
 }
 class VLD1QLNPseudo<ValueType Ty, PatFrag LoadOp> : VLDQLNPseudo<IIC_VLD1ln> {
   let Pattern = [(set QPR:$dst, (vector_insert (Ty QPR:$src),
@@ -597,7 +599,9 @@
           (ins addrmode6:$Rn, am6offset:$Rm,
            DPR:$src, nohash_imm:$lane), IIC_VLD1lnu, "vld1", Dt,
           "\\{$Vd[$lane]\\}, $Rn$Rm",
-          "$src = $Vd, $Rn.addr = $wb", []>;
+          "$src = $Vd, $Rn.addr = $wb", []> {
+  let DecoderMethod = "DecodeVLD1LN";
+}
 
 def VLD1LNd8_UPD  : VLD1LNWB<0b0000, {?,?,?,0}, "8"> {
   let Inst{7-5} = lane{2-0};
@@ -624,6 +628,7 @@
           "$src1 = $Vd, $src2 = $dst2", []> {
   let Rm = 0b1111;
   let Inst{4}   = Rn{4};
+  let DecoderMethod = "DecodeVLD2LN";
 }
 
 def VLD2LNd8  : VLD2LN<0b0001, {?,?,?,?}, "8"> {
@@ -659,6 +664,7 @@
           "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn$Rm",
           "$src1 = $Vd, $src2 = $dst2, $Rn.addr = $wb", []> {
   let Inst{4}   = Rn{4};
+  let DecoderMethod = "DecodeVLD2LN";
 }
 
 def VLD2LNd8_UPD  : VLD2LNWB<0b0001, {?,?,?,?}, "8"> {
@@ -693,6 +699,7 @@
           "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn",
           "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3", []> {
   let Rm = 0b1111;
+  let DecoderMethod = "DecodeVLD3LN";
 }
 
 def VLD3LNd8  : VLD3LN<0b0010, {?,?,?,0}, "8"> {
@@ -729,7 +736,9 @@
           IIC_VLD3lnu, "vld3", Dt,
           "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn$Rm",
           "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $Rn.addr = $wb",
-          []>;
+          []> {
+  let DecoderMethod = "DecodeVLD3LN";
+}
 
 def VLD3LNd8_UPD  : VLD3LNWB<0b0010, {?,?,?,0}, "8"> {
   let Inst{7-5} = lane{2-0};
@@ -765,6 +774,7 @@
           "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []> {
   let Rm = 0b1111;
   let Inst{4}   = Rn{4};
+  let DecoderMethod = "DecodeVLD4LN";
 }
 
 def VLD4LNd8  : VLD4LN<0b0011, {?,?,?,?}, "8"> {
@@ -805,6 +815,7 @@
 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4, $Rn.addr = $wb",
           []> {
   let Inst{4}   = Rn{4};
+  let DecoderMethod = "DecodeVLD4LN"  ;
 }
 
 def VLD4LNd8_UPD  : VLD4LNWB<0b0011, {?,?,?,?}, "8"> {
@@ -1424,6 +1435,7 @@
           IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
           [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6:$Rn)]> {
   let Rm = 0b1111;
+  let DecoderMethod = "DecodeVST1LN";
 }
 class VST1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
              PatFrag StoreOp, SDNode ExtractOp>
@@ -1432,6 +1444,7 @@
           IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
           [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6oneL32:$Rn)]>{
   let Rm = 0b1111;
+  let DecoderMethod = "DecodeVST1LN";
 }
 class VST1QLNPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
   : VSTQLNPseudo<IIC_VST1ln> {
@@ -1472,7 +1485,9 @@
           "\\{$Vd[$lane]\\}, $Rn$Rm",
           "$Rn.addr = $wb",
           [(set GPR:$wb, (StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane),
-                                  addrmode6:$Rn, am6offset:$Rm))]>;
+                                  addrmode6:$Rn, am6offset:$Rm))]> {
+  let DecoderMethod = "DecodeVST1LN";
+}
 class VST1QLNWBPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
   : VSTQLNWBPseudo<IIC_VST1lnu> {
   let Pattern = [(set GPR:$wb, (StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
@@ -1508,6 +1523,7 @@
           "", []> {
   let Rm = 0b1111;
   let Inst{4}   = Rn{4};
+  let DecoderMethod = "DecodeVST2LN";
 }
 
 def VST2LNd8  : VST2LN<0b0001, {?,?,?,?}, "8"> {
@@ -1545,6 +1561,7 @@
           "\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
           "$addr.addr = $wb", []> {
   let Inst{4}   = Rn{4};
+  let DecoderMethod = "DecodeVST2LN";
 }
 
 def VST2LNd8_UPD  : VST2LNWB<0b0001, {?,?,?,?}, "8"> {
@@ -1578,6 +1595,7 @@
            nohash_imm:$lane), IIC_VST3ln, "vst3", Dt,
           "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn", "", []> {
   let Rm = 0b1111;
+  let DecoderMethod = "DecodeVST3LN";
 }
 
 def VST3LNd8  : VST3LN<0b0010, {?,?,?,0}, "8"> {
@@ -1612,7 +1630,9 @@
            DPR:$Vd, DPR:$src2, DPR:$src3, nohash_imm:$lane),
           IIC_VST3lnu, "vst3", Dt,
           "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn$Rm",
-          "$Rn.addr = $wb", []>;
+          "$Rn.addr = $wb", []> {
+  let DecoderMethod = "DecodeVST3LN";
+}
 
 def VST3LNd8_UPD  : VST3LNWB<0b0010, {?,?,?,0}, "8"> {
   let Inst{7-5} = lane{2-0};
@@ -1647,6 +1667,7 @@
           "", []> {
   let Rm = 0b1111;
   let Inst{4} = Rn{4};
+  let DecoderMethod = "DecodeVST4LN";
 }
 
 def VST4LNd8  : VST4LN<0b0011, {?,?,?,?}, "8"> {
@@ -1685,6 +1706,7 @@
   "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn$Rm",
           "$Rn.addr = $wb", []> {
   let Inst{4} = Rn{4};
+  let DecoderMethod = "DecodeVST4LN";
 }
 
 def VST4LNd8_UPD  : VST4LNWB<0b0011, {?,?,?,?}, "8"> {

Modified: llvm/trunk/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/Disassembler/ARMDisassembler.cpp?rev=137635&r1=137634&r2=137635&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/Disassembler/ARMDisassembler.cpp (original)
+++ llvm/trunk/lib/Target/ARM/Disassembler/ARMDisassembler.cpp Mon Aug 15 13:44:44 2011
@@ -141,6 +141,22 @@
                                uint64_t Address, const void *Decoder);
 static bool DecodeSTRPreReg(llvm::MCInst &Inst, unsigned Insn,
                                uint64_t Address, const void *Decoder);
+static bool DecodeVLD1LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVLD2LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVLD3LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVLD4LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVST1LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVST2LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVST3LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
+static bool DecodeVST4LN(llvm::MCInst &Inst, unsigned Insn,
+                               uint64_t Address, const void *Decoder);
 
 
 static bool DecodeThumbAddSpecialReg(llvm::MCInst &Inst, uint16_t Insn,
@@ -2560,3 +2576,447 @@
 
   return true;
 }
+
+static bool DecodeVLD1LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 5, 3);
+      break;
+    case 1:
+      if (fieldFromInstruction32(Insn, 5, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 6, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 4, 2) != 0)
+        align = 4;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+static bool DecodeVST1LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 5, 3);
+      break;
+    case 1:
+      if (fieldFromInstruction32(Insn, 5, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 6, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 4, 2) != 0)
+        align = 4;
+  }
+
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+
+static bool DecodeVLD2LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  unsigned inc = 1;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      index = fieldFromInstruction32(Insn, 5, 3);
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 2;
+      break;
+    case 1:
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 4;
+      if (fieldFromInstruction32(Insn, 5, 1))
+        inc = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 5, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 4, 1) != 0)
+        align = 8;
+      if (fieldFromInstruction32(Insn, 6, 1))
+        inc = 2;
+      break;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+static bool DecodeVST2LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  unsigned inc = 1;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      index = fieldFromInstruction32(Insn, 5, 3);
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 2;
+      break;
+    case 1:
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 4;
+      if (fieldFromInstruction32(Insn, 5, 1))
+        inc = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 5, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 4, 1) != 0)
+        align = 8;
+      if (fieldFromInstruction32(Insn, 6, 1))
+        inc = 2;
+      break;
+  }
+
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+
+static bool DecodeVLD3LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  unsigned inc = 1;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 5, 3);
+      break;
+    case 1:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 5, 1))
+        inc = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 4, 2))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 6, 1))
+        inc = 2;
+      break;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+2*inc, Address, Decoder)) return false;
+
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+2*inc, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+static bool DecodeVST3LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  unsigned inc = 1;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 5, 3);
+      break;
+    case 1:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 5, 1))
+        inc = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 4, 2))
+        return false; // UNDEFINED
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 6, 1))
+        inc = 2;
+      break;
+  }
+
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+2*inc, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+
+static bool DecodeVLD4LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  unsigned inc = 1;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 4;
+      index = fieldFromInstruction32(Insn, 5, 3);
+      break;
+    case 1:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 8;
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 5, 1))
+        inc = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 4, 2))
+        align = 4 << fieldFromInstruction32(Insn, 4, 2);
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 6, 1))
+        inc = 2;
+      break;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+2*inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+3*inc, Address, Decoder)) return false;
+
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+2*inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+3*inc, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+
+static bool DecodeVST4LN(llvm::MCInst &Inst, unsigned Insn,
+                         uint64_t Address, const void *Decoder) {
+  unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+  unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
+  unsigned Rd = fieldFromInstruction32(Insn, 12, 4);
+  Rd |= fieldFromInstruction32(Insn, 22, 1) << 4;
+  unsigned size = fieldFromInstruction32(Insn, 10, 2);
+
+  unsigned align = 0;
+  unsigned index = 0;
+  unsigned inc = 1;
+  switch (size) {
+    default:
+      return false;
+    case 0:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 4;
+      index = fieldFromInstruction32(Insn, 5, 3);
+      break;
+    case 1:
+      if (fieldFromInstruction32(Insn, 4, 1))
+        align = 8;
+      index = fieldFromInstruction32(Insn, 6, 2);
+      if (fieldFromInstruction32(Insn, 5, 1))
+        inc = 2;
+      break;
+    case 2:
+      if (fieldFromInstruction32(Insn, 4, 2))
+        align = 4 << fieldFromInstruction32(Insn, 4, 2);
+      index = fieldFromInstruction32(Insn, 7, 1);
+      if (fieldFromInstruction32(Insn, 6, 1))
+        inc = 2;
+      break;
+  }
+
+  if (Rm != 0xF) { // Writeback
+    if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))
+      return false;
+  }
+  if (!DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(align));
+  if (Rm != 0xF && Rm != 0xD) {
+    if (!DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))
+      return false;
+  }
+
+  if (!DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+2*inc, Address, Decoder)) return false;
+  if (!DecodeDPRRegisterClass(Inst, Rd+3*inc, Address, Decoder)) return false;
+  Inst.addOperand(MCOperand::CreateImm(index));
+
+  return true;
+}
+

Added: llvm/trunk/test/MC/Disassembler/ARM/neon.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/ARM/neon.txt?rev=137635&view=auto
==============================================================================
--- llvm/trunk/test/MC/Disassembler/ARM/neon.txt (added)
+++ llvm/trunk/test/MC/Disassembler/ARM/neon.txt Mon Aug 15 13:44:44 2011
@@ -0,0 +1,1847 @@
+# RUN: llvm-mc -triple armv7-unknown-unknown -disassemble < %s | FileCheck %s
+
+0x20 0x03 0xf1 0xf3
+# CHECK: vabs.s8	d16, d16
+0x20 0x03 0xf5 0xf3
+# CHECK: vabs.s16	d16, d16
+0x20 0x03 0xf9 0xf3
+# CHECK: vabs.s32	d16, d16
+0x20 0x07 0xf9 0xf3
+# CHECK: vabs.f32	d16, d16
+0x60 0x03 0xf1 0xf3
+# CHECK: vabs.s8	q8, q8
+0x60 0x03 0xf5 0xf3
+# CHECK: vabs.s16	q8, q8
+0x60 0x03 0xf9 0xf3
+# CHECK: vabs.s32	q8, q8
+0x60 0x07 0xf9 0xf3
+# CHECK: vabs.f32	q8, q8
+
+0x20 0x07 0xf0 0xf3
+# CHECK: vqabs.s8	d16, d16
+0x20 0x07 0xf4 0xf3
+# CHECK: vqabs.s16	d16, d16
+0x20 0x07 0xf8 0xf3
+# CHECK: vqabs.s32	d16, d16
+0x60 0x07 0xf0 0xf3
+# CHECK: vqabs.s8	q8, q8
+0x60 0x07 0xf4 0xf3
+# CHECK: vqabs.s16	q8, q8
+0x60 0x07 0xf8 0xf3
+# CHECK: vqabs.s32	q8, q8
+
+0xa1 0x07 0x40 0xf2
+# CHECK: vabd.s8	d16, d16, d17
+0xa1 0x07 0x50 0xf2
+# CHECK: vabd.s16	d16, d16, d17
+0xa1 0x07 0x60 0xf2
+# CHECK: vabd.s32	d16, d16, d17
+0xa1 0x07 0x40 0xf3
+# CHECK: vabd.u8	d16, d16, d17
+0xa1 0x07 0x50 0xf3
+# CHECK: vabd.u16	d16, d16, d17
+  0xa1 0x07 0x60 0xf3
+# CHECK: vabd.u32	d16, d16, d17
+0xa1 0x0d 0x60 0xf3
+# CHECK: vabd.f32	d16, d16, d17
+0xe2 0x07 0x40 0xf2
+# CHECK: vabd.s8	q8, q8, q9
+0xe2 0x07 0x50 0xf2
+# CHECK: vabd.s16	q8, q8, q9
+0xe2 0x07 0x60 0xf2
+# CHECK: vabd.s32	q8, q8, q9
+0xe2 0x07 0x40 0xf3
+# CHECK: vabd.u8	q8, q8, q9
+0xe2 0x07 0x50 0xf3
+# CHECK: vabd.u16	q8, q8, q9
+0xe2 0x07 0x60 0xf3
+# CHECK: vabd.u32	q8, q8, q9
+0xe2 0x0d 0x60 0xf3
+# CHECK: vabd.f32	q8, q8, q9
+
+0xa1 0x07 0xc0 0xf2
+# CHECK: vabdl.s8	q8, d16, d17
+0xa1 0x07 0xd0 0xf2
+# CHECK: vabdl.s16	q8, d16, d17
+0xa1 0x07 0xe0 0xf2
+# CHECK: vabdl.s32	q8, d16, d17
+0xa1 0x07 0xc0 0xf3
+# CHECK: vabdl.u8	q8, d16, d17
+0xa1 0x07 0xd0 0xf3
+# CHECK: vabdl.u16	q8, d16, d17
+0xa1 0x07 0xe0 0xf3
+# CHECK: vabdl.u32	q8, d16, d17
+
+0xb1 0x07 0x42 0xf2
+# CHECK: vaba.s8	d16, d18, d17
+0xb1 0x07 0x52 0xf2
+# CHECK: vaba.s16	d16, d18, d17
+0xb1 0x07 0x62 0xf2
+# CHECK: vaba.s32	d16, d18, d17
+0xb1 0x07 0x42 0xf3
+# CHECK: vaba.u8	d16, d18, d17
+0xb1 0x07 0x52 0xf3
+# CHECK: vaba.u16	d16, d18, d17
+0xb1 0x07 0x62 0xf3
+# CHECK: vaba.u32	d16, d18, d17
+0xf4 0x27 0x40 0xf2
+# CHECK: vaba.s8	q9, q8, q10
+0xf4 0x27 0x50 0xf2
+# CHECK: vaba.s16	q9, q8, q10
+0xf4 0x27 0x60 0xf2
+# CHECK: vaba.s32	q9, q8, q10
+0xf4 0x27 0x40 0xf3
+# CHECK: vaba.u8	q9, q8, q10
+0xf4 0x27 0x50 0xf3
+# CHECK: vaba.u16	q9, q8, q10
+0xf4 0x27 0x60 0xf3
+# CHECK: vaba.u32	q9, q8, q10
+
+0xa2 0x05 0xc3 0xf2
+# CHECK: vabal.s8	q8, d19, d18
+0xa2 0x05 0xd3 0xf2
+# CHECK: vabal.s16	q8, d19, d18
+0xa2 0x05 0xe3 0xf2
+# CHECK: vabal.s32	q8, d19, d18
+0xa2 0x05 0xc3 0xf3
+# CHECK: vabal.u8	q8, d19, d18
+0xa2 0x05 0xd3 0xf3
+# CHECK: vabal.u16	q8, d19, d18
+0xa2 0x05 0xe3 0xf3
+# CHECK: vabal.u32	q8, d19, d18
+
+
+
+
+0xa0 0x08 0x41 0xf2
+# CHECK: vadd.i8	d16, d17, d16
+0xa0 0x08 0x51 0xf2
+# CHECK: vadd.i16	d16, d17, d16
+0xa0 0x08 0x71 0xf2
+# CHECK: vadd.i64	d16, d17, d16
+0xa0 0x08 0x61 0xf2
+# CHECK: vadd.i32	d16, d17, d16
+0xa1 0x0d 0x40 0xf2
+# CHECK: vadd.f32	d16, d16, d17
+0xe2 0x0d 0x40 0xf2
+# CHECK: vadd.f32	q8, q8, q9
+
+0xa0 0x00 0xc1 0xf2
+# CHECK: vaddl.s8	q8, d17, d16
+0xa0 0x00 0xd1 0xf2
+# CHECK: vaddl.s16	q8, d17, d16
+0xa0 0x00 0xe1 0xf2
+# CHECK: vaddl.s32	q8, d17, d16
+0xa0 0x00 0xc1 0xf3
+# CHECK: vaddl.u8	q8, d17, d16
+0xa0 0x00 0xd1 0xf3
+# CHECK: vaddl.u16	q8, d17, d16
+0xa0 0x00 0xe1 0xf3
+# CHECK: vaddl.u32	q8, d17, d16
+
+0xa2 0x01 0xc0 0xf2
+# CHECK: vaddw.s8	q8, q8, d18
+0xa2 0x01 0xd0 0xf2
+# CHECK: vaddw.s16	q8, q8, d18
+0xa2 0x01 0xe0 0xf2
+# CHECK: vaddw.s32	q8, q8, d18
+0xa2 0x01 0xc0 0xf3
+# CHECK: vaddw.u8	q8, q8, d18
+0xa2 0x01 0xd0 0xf3
+# CHECK: vaddw.u16	q8, q8, d18
+0xa2 0x01 0xe0 0xf3
+# CHECK: vaddw.u32	q8, q8, d18
+
+0xa1 0x00 0x40 0xf2
+# CHECK: vhadd.s8	d16, d16, d17
+0xa1 0x00 0x50 0xf2
+# CHECK: vhadd.s16	d16, d16, d17
+0xa1 0x00 0x60 0xf2
+# CHECK: vhadd.s32	d16, d16, d17
+0xa1 0x00 0x40 0xf3
+# CHECK: vhadd.u8	d16, d16, d17
+0xa1 0x00 0x50 0xf3
+# CHECK: vhadd.u16	d16, d16, d17
+0xa1 0x00 0x60 0xf3
+# CHECK: vhadd.u32	d16, d16, d17
+0xe2 0x00 0x40 0xf2
+# CHECK: vhadd.s8	q8, q8, q9
+0xe2 0x00 0x50 0xf2
+# CHECK: vhadd.s16	q8, q8, q9
+0xe2 0x00 0x60 0xf2
+# CHECK: vhadd.s32	q8, q8, q9
+  0xe2 0x00 0x40 0xf3
+# CHECK: vhadd.u8	q8, q8, q9
+0xe2 0x00 0x50 0xf3
+# CHECK: vhadd.u16	q8, q8, q9
+0xe2 0x00 0x60 0xf3
+# CHECK: vhadd.u32	q8, q8, q9
+
+0xa1 0x01 0x40 0xf2
+# CHECK: vrhadd.s8	d16, d16, d17
+0xa1 0x01 0x50 0xf2
+# CHECK: vrhadd.s16	d16, d16, d17
+0xa1 0x01 0x60 0xf2
+# CHECK: vrhadd.s32	d16, d16, d17
+0xa1 0x01 0x40 0xf3
+# CHECK: vrhadd.u8	d16, d16, d17
+0xa1 0x01 0x50 0xf3
+# CHECK: vrhadd.u16	d16, d16, d17
+0xa1 0x01 0x60 0xf3
+# CHECK: vrhadd.u32	d16, d16, d17
+0xe2 0x01 0x40 0xf2
+# CHECK: vrhadd.s8	q8, q8, q9
+0xe2 0x01 0x50 0xf2
+# CHECK: vrhadd.s16	q8, q8, q9
+0xe2 0x01 0x60 0xf2
+# CHECK: vrhadd.s32	q8, q8, q9
+0xe2 0x01 0x40 0xf3
+# CHECK: vrhadd.u8	q8, q8, q9
+0xe2 0x01 0x50 0xf3
+# CHECK: vrhadd.u16	q8, q8, q9
+0xe2 0x01 0x60 0xf3
+# CHECK: vrhadd.u32	q8, q8, q9
+
+0xb1 0x00 0x40 0xf2
+# CHECK: vqadd.s8	d16, d16, d17
+0xb1 0x00 0x50 0xf2
+# CHECK: vqadd.s16	d16, d16, d17
+0xb1 0x00 0x60 0xf2
+# CHECK: vqadd.s32	d16, d16, d17
+0xb1 0x00 0x70 0xf2
+# CHECK: vqadd.s64	d16, d16, d17
+0xb1 0x00 0x40 0xf3
+# CHECK: vqadd.u8	d16, d16, d17
+0xb1 0x00 0x50 0xf3
+# CHECK: vqadd.u16	d16, d16, d17
+0xb1 0x00 0x60 0xf3
+# CHECK: vqadd.u32	d16, d16, d17
+0xb1 0x00 0x70 0xf3
+# CHECK: vqadd.u64	d16, d16, d17
+0xf2 0x00 0x40 0xf2
+# CHECK: vqadd.s8	q8, q8, q9
+0xf2 0x00 0x50 0xf2
+# CHECK: vqadd.s16	q8, q8, q9
+0xf2 0x00 0x60 0xf2
+# CHECK: vqadd.s32	q8, q8, q9
+0xf2 0x00 0x70 0xf2
+# CHECK: vqadd.s64	q8, q8, q9
+0xf2 0x00 0x40 0xf3
+# CHECK: vqadd.u8	q8, q8, q9
+0xf2 0x00 0x50 0xf3
+# CHECK: vqadd.u16	q8, q8, q9
+0xf2 0x00 0x60 0xf3
+# CHECK: vqadd.u32	q8, q8, q9
+0xf2 0x00 0x70 0xf3
+# CHECK: vqadd.u64	q8, q8, q9
+
+0xa2 0x04 0xc0 0xf2
+# CHECK: vaddhn.i16	d16, q8, q9
+0xa2 0x04 0xd0 0xf2
+# CHECK: vaddhn.i32	d16, q8, q9
+0xa2 0x04 0xe0 0xf2
+# CHECK: vaddhn.i64	d16, q8, q9
+0xa2 0x04 0xc0 0xf3
+# CHECK: vraddhn.i16	d16, q8, q9
+0xa2 0x04 0xd0 0xf3
+# CHECK: vraddhn.i32	d16, q8, q9
+0xa2 0x04 0xe0 0xf3
+# CHECK: vraddhn.i64	d16, q8, q9
+
+
+0x20 0x05 0xf0 0xf3
+# CHECK: vcnt.8	d16, d16
+0x60 0x05 0xf0 0xf3
+# CHECK: vcnt.8	q8, q8
+0xa0 0x04 0xf0 0xf3
+# CHECK: vclz.i8	d16, d16
+0xa0 0x04 0xf4 0xf3
+# CHECK: vclz.i16	d16, d16
+0xa0 0x04 0xf8 0xf3
+# CHECK: vclz.i32	d16, d16
+0xe0 0x04 0xf0 0xf3
+# CHECK: vclz.i8	q8, q8
+0xe0 0x04 0xf4 0xf3
+# CHECK: vclz.i16	q8, q8
+0xe0 0x04 0xf8 0xf3
+# CHECK: vclz.i32	q8, q8
+0x20 0x04 0xf0 0xf3
+# CHECK: vcls.s8	d16, d16
+0x20 0x04 0xf4 0xf3
+# CHECK: vcls.s16	d16, d16
+0x20 0x04 0xf8 0xf3
+# CHECK: vcls.s32	d16, d16
+0x60 0x04 0xf0 0xf3
+# CHECK: vcls.s8	q8, q8
+0x60 0x04 0xf4 0xf3
+# CHECK: vcls.s16	q8, q8
+0x60 0x04 0xf8 0xf3
+# CHECK: vcls.s32	q8, q8
+
+
+
+
+0xb0 0x01 0x41 0xf2
+# CHECK: vand	d16, d17, d16
+0xf2 0x01 0x40 0xf2
+# CHECK: vand	q8, q8, q9
+
+0xb0 0x01 0x41 0xf3
+# CHECK: veor	d16, d17, d16
+0xf2 0x01 0x40 0xf3
+# CHECK: veor	q8, q8, q9
+
+0xb0 0x01 0x61 0xf2
+# CHECK: vorr	d16, d17, d16
+0xf2 0x01 0x60 0xf2
+# CHECK: vorr	q8, q8, q9
+0x11 0x07 0xc0 0xf2
+# CHECK: vorr.i32	d16, #0x1000000
+0x51 0x07 0xc0 0xf2
+# CHECK: vorr.i32	q8, #0x1000000
+0x50 0x01 0xc0 0xf2
+# CHECK: vorr.i32	q8, #0x0
+
+0xb0 0x01 0x51 0xf2
+# CHECK: vbic	d16, d17, d16
+0xf2 0x01 0x50 0xf2
+# CHECK: vbic	q8, q8, q9
+0x3f 0x07 0xc7 0xf3
+# CHECK: vbic.i32	d16, #0xFF000000
+0x7f 0x07 0xc7 0xf3
+# CHECK: vbic.i32	q8, #0xFF000000
+
+0xb0 0x01 0x71 0xf2
+# CHECK: vorn	d16, d17, d16
+0xf2 0x01 0x70 0xf2
+# CHECK: vorn	q8, q8, q9
+
+0xa0 0x05 0xf0 0xf3
+# CHECK: vmvn	d16, d16
+0xe0 0x05 0xf0 0xf3
+# CHECK: vmvn	q8, q8
+
+0xb0 0x21 0x51 0xf3
+# CHECK: vbsl	d18, d17, d16
+0xf2 0x01 0x54 0xf3
+# CHECK: vbsl	q8, q10, q9
+
+
+# CHECK: vceq.i8	d16, d16, d17
+# CHECK: vceq.i16	d16, d16, d17
+# CHECK: vceq.i32	d16, d16, d17
+# CHECK: vceq.f32	d16, d16, d17
+# CHECK: vceq.i8	q8, q8, q9
+# CHECK: vceq.i16	q8, q8, q9
+# CHECK: vceq.i32	q8, q8, q9
+# CHECK: vceq.f32	q8, q8, q9
+
+0xb1 0x08 0x40 0xf3
+0xb1 0x08 0x50 0xf3
+0xb1 0x08 0x60 0xf3
+0xa1 0x0e 0x40 0xf2
+0xf2 0x08 0x40 0xf3
+0xf2 0x08 0x50 0xf3
+0xf2 0x08 0x60 0xf3
+0xe2 0x0e 0x40 0xf2
+
+# CHECK: vcge.s8	d16, d16, d17
+# CHECK: vcge.s16	d16, d16, d17
+# CHECK: vcge.s32	d16, d16, d17
+# CHECK: vcge.u8	d16, d16, d17
+# CHECK: vcge.u16	d16, d16, d17
+# CHECK: vcge.u32	d16, d16, d17
+# CHECK: vcge.f32	d16, d16, d17
+# CHECK: vcge.s8	q8, q8, q9
+# CHECK: vcge.s16	q8, q8, q9
+# CHECK: vcge.s32	q8, q8, q9
+# CHECK: vcge.u8	q8, q8, q9
+# CHECK: vcge.u16	q8, q8, q9
+# CHECK: vcge.u32	q8, q8, q9
+# CHECK: vcge.f32	q8, q8, q9
+# CHECK: vacge.f32	d16, d16, d17
+# CHECK: vacge.f32	q8, q8, q9
+
+0xb1 0x03 0x40 0xf2
+0xb1 0x03 0x50 0xf2
+0xb1 0x03 0x60 0xf2
+0xb1 0x03 0x40 0xf3
+0xb1 0x03 0x50 0xf3
+0xb1 0x03 0x60 0xf3
+0xa1 0x0e 0x40 0xf3
+0xf2 0x03 0x40 0xf2
+0xf2 0x03 0x50 0xf2
+0xf2 0x03 0x60 0xf2
+0xf2 0x03 0x40 0xf3
+0xf2 0x03 0x50 0xf3
+0xf2 0x03 0x60 0xf3
+0xe2 0x0e 0x40 0xf3
+0xb1 0x0e 0x40 0xf3
+0xf2 0x0e 0x40 0xf3
+
+# CHECK: vcgt.s8	d16, d16, d17
+# CHECK: vcgt.s16	d16, d16, d17
+# CHECK: vcgt.s32	d16, d16, d17
+# CHECK: vcgt.u8	d16, d16, d17
+# CHECK: vcgt.u16	d16, d16, d17
+# CHECK: vcgt.u32	d16, d16, d17
+# CHECK: vcgt.f32	d16, d16, d17
+# CHECK: vcgt.s8	q8, q8, q9
+# CHECK: vcgt.s16	q8, q8, q9
+# CHECK: vcgt.s32	q8, q8, q9
+# CHECK: vcgt.u8	q8, q8, q9
+# CHECK: vcgt.u16	q8, q8, q9
+# CHECK: vcgt.u32	q8, q8, q9
+# CHECK: vcgt.f32	q8, q8, q9
+# CHECK: vacgt.f32	d16, d16, d17
+# CHECK: vacgt.f32	q8, q8, q9
+
+0xa1 0x03 0x40 0xf2
+0xa1 0x03 0x50 0xf2
+0xa1 0x03 0x60 0xf2
+0xa1 0x03 0x40 0xf3
+0xa1 0x03 0x50 0xf3
+0xa1 0x03 0x60 0xf3
+0xa1 0x0e 0x60 0xf3
+0xe2 0x03 0x40 0xf2
+0xe2 0x03 0x50 0xf2
+0xe2 0x03 0x60 0xf2
+0xe2 0x03 0x40 0xf3
+0xe2 0x03 0x50 0xf3
+0xe2 0x03 0x60 0xf3
+0xe2 0x0e 0x60 0xf3
+0xb1 0x0e 0x60 0xf3
+0xf2 0x0e 0x60 0xf3
+
+# CHECK: vtst.8	d16, d16, d17
+# CHECK: vtst.16	d16, d16, d17
+# CHECK: vtst.32	d16, d16, d17
+# CHECK: vtst.8	q8, q8, q9
+# CHECK: vtst.16	q8, q8, q9
+# CHECK: vtst.32	q8, q8, q9
+
+0xb1 0x08 0x40 0xf2
+0xb1 0x08 0x50 0xf2
+0xb1 0x08 0x60 0xf2
+0xf2 0x08 0x40 0xf2
+0xf2 0x08 0x50 0xf2
+0xf2 0x08 0x60 0xf2
+
+# CHECK: vceq.i8	d16, d16, #0
+# CHECK: vcge.s8	d16, d16, #0
+# CHECK: vcle.s8	d16, d16, #0
+# CHECK: vcgt.s8	d16, d16, #0
+# CHECK: vclt.s8	d16, d16, #0
+
+0x20 0x01 0xf1 0xf3
+0xa0 0x00 0xf1 0xf3
+0xa0 0x01 0xf1 0xf3
+0x20 0x00 0xf1 0xf3
+0x20 0x02 0xf1 0xf3
+
+
+0x20 0x07 0xfb 0xf3
+# CHECK: vcvt.s32.f32	d16, d16
+0xa0 0x07 0xfb 0xf3
+# CHECK: vcvt.u32.f32	d16, d16
+0x20 0x06 0xfb 0xf3
+# CHECK: vcvt.f32.s32	d16, d16
+0xa0 0x06 0xfb 0xf3
+# CHECK: vcvt.f32.u32	d16, d16
+0x60 0x07 0xfb 0xf3
+# CHECK: vcvt.s32.f32	q8, q8
+0xe0 0x07 0xfb 0xf3
+# CHECK: vcvt.u32.f32	q8, q8
+0x60 0x06 0xfb 0xf3
+# CHECK: vcvt.f32.s32	q8, q8
+0xe0 0x06 0xfb 0xf3
+# CHECK: vcvt.f32.u32	q8, q8
+0x30 0x0f 0xff 0xf2
+# CHECK: vcvt.s32.f32	d16, d16, #1
+0x30 0x0f 0xff 0xf3
+# CHECK: vcvt.u32.f32	d16, d16, #1
+0x30 0x0e 0xff 0xf2
+# CHECK: vcvt.f32.s32	d16, d16, #1
+0x30 0x0e 0xff 0xf3
+# CHECK: vcvt.f32.u32	d16, d16, #1
+0x70 0x0f 0xff 0xf2
+# CHECK: vcvt.s32.f32	q8, q8, #1
+0x70 0x0f 0xff 0xf3
+# CHECK: vcvt.u32.f32	q8, q8, #1
+0x70 0x0e 0xff 0xf2
+# CHECK: vcvt.f32.s32	q8, q8, #1
+0x70 0x0e 0xff 0xf3
+# CHECK: vcvt.f32.u32	q8, q8, #1
+0x20 0x07 0xf6 0xf3
+# CHECK: vcvt.f32.f16	q8, d16
+0x20 0x06 0xf6 0xf3
+# CHECK: vcvt.f16.f32	d16, q8
+
+
+
+
+# CHECK: vdup.8	d16, r0
+# CHECK: vdup.16	d16, r0
+# CHECK: vdup.32	d16, r0
+
+0x90 0x0b 0xc0 0xee
+0xb0 0x0b 0x80 0xee
+0x90 0x0b 0x80 0xee
+
+# CHECK: vdup.8	q8, r0
+# CHECK: vdup.16	q8, r0
+# CHECK: vdup.32	q8, r0
+
+0x90 0x0b 0xe0 0xee
+0xb0 0x0b 0xa0 0xee
+0x90 0x0b 0xa0 0xee
+
+# CHECK: vdup.8	d16, d16[1
+# CHECK: vdup.16	d16, d16[1
+# CHECK: vdup.32	d16, d16[1
+
+0x20 0x0c 0xf3 0xf3
+0x20 0x0c 0xf6 0xf3
+0x20 0x0c 0xfc 0xf3
+
+# CHECK: vdup.8	q8, d16[1
+# CHECK: vdup.16	q8, d16[1
+# CHECK: vdup.32	q8, d16[1
+
+0x60 0x0c 0xf3 0xf3
+0x60 0x0c 0xf6 0xf3
+0x60 0x0c 0xfc 0xf3
+
+
+0xb1 0x06 0x40 0xf2
+# CHECK: vmin.s8	d16, d16, d17
+0xb1 0x06 0x50 0xf2
+# CHECK: vmin.s16	d16, d16, d17
+0xb1 0x06 0x60 0xf2
+# CHECK: vmin.s32	d16, d16, d17
+0xb1 0x06 0x40 0xf3
+# CHECK: vmin.u8	d16, d16, d17
+0xb1 0x06 0x50 0xf3
+# CHECK: vmin.u16	d16, d16, d17
+0xb1 0x06 0x60 0xf3
+# CHECK: vmin.u32	d16, d16, d17
+0xa1 0x0f 0x60 0xf2
+# CHECK: vmin.f32	d16, d16, d17
+0xf2 0x06 0x40 0xf2
+# CHECK: vmin.s8	q8, q8, q9
+0xf2 0x06 0x50 0xf2
+# CHECK: vmin.s16	q8, q8, q9
+0xf2 0x06 0x60 0xf2
+# CHECK: vmin.s32	q8, q8, q9
+0xf2 0x06 0x40 0xf3
+# CHECK: vmin.u8	q8, q8, q9
+0xf2 0x06 0x50 0xf3
+# CHECK: vmin.u16	q8, q8, q9
+0xf2 0x06 0x60 0xf3
+# CHECK: vmin.u32	q8, q8, q9
+0xe2 0x0f 0x60 0xf2
+# CHECK: vmin.f32	q8, q8, q9
+0xa1 0x06 0x40 0xf2
+# CHECK: vmax.s8	d16, d16, d17
+0xa1 0x06 0x50 0xf2
+# CHECK: vmax.s16	d16, d16, d17
+0xa1 0x06 0x60 0xf2
+# CHECK: vmax.s32	d16, d16, d17
+0xa1 0x06 0x40 0xf3
+# CHECK: vmax.u8	d16, d16, d17
+0xa1 0x06 0x50 0xf3
+# CHECK: vmax.u16	d16, d16, d17
+0xa1 0x06 0x60 0xf3
+# CHECK: vmax.u32	d16, d16, d17
+0xa1 0x0f 0x40 0xf2
+# CHECK: vmax.f32	d16, d16, d17
+0xe2 0x06 0x40 0xf2
+# CHECK: vmax.s8	q8, q8, q9
+0xe2 0x06 0x50 0xf2
+# CHECK: vmax.s16	q8, q8, q9
+0xe2 0x06 0x60 0xf2
+# CHECK: vmax.s32	q8, q8, q9
+0xe2 0x06 0x40 0xf3
+# CHECK: vmax.u8	q8, q8, q9
+0xe2 0x06 0x50 0xf3
+# CHECK: vmax.u16	q8, q8, q9
+0xe2 0x06 0x60 0xf3
+# CHECK: vmax.u32	q8, q8, q9
+0xe2 0x0f 0x40 0xf2
+# CHECK: vmax.f32	q8, q8, q9
+
+
+
+0x18 0x0e 0xc0 0xf2
+# CHECK: vmov.i8	d16, #0x8
+0x10 0x08 0xc1 0xf2
+# CHECK: vmov.i16	d16, #0x10
+0x10 0x0a 0xc1 0xf2
+# CHECK: vmov.i16	d16, #0x1000
+0x10 0x00 0xc2 0xf2
+# CHECK: vmov.i32	d16, #0x20
+0x10 0x02 0xc2 0xf2
+# CHECK: vmov.i32	d16, #0x2000
+0x10 0x04 0xc2 0xf2
+# CHECK: vmov.i32	d16, #0x200000
+0x10 0x06 0xc2 0xf2
+# CHECK: vmov.i32	d16, #0x20000000
+0x10 0x0c 0xc2 0xf2
+# CHECK: vmov.i32	d16, #0x20FF
+0x10 0x0d 0xc2 0xf2
+# CHECK: vmov.i32	d16, #0x20FFFF
+0x33 0x0e 0xc1 0xf3
+# CHECK: vmov.i64	d16, #0xFF0000FF0000FFFF
+0x58 0x0e 0xc0 0xf2
+# CHECK: vmov.i8	q8, #0x8
+0x50 0x08 0xc1 0xf2
+# CHECK: vmov.i16	q8, #0x10
+0x50 0x0a 0xc1 0xf2
+# CHECK: vmov.i16	q8, #0x1000
+0x50 0x00 0xc2 0xf2
+# CHECK: vmov.i32	q8, #0x20
+0x50 0x02 0xc2 0xf2
+# CHECK: vmov.i32	q8, #0x2000
+0x50 0x04 0xc2 0xf2
+# CHECK: vmov.i32	q8, #0x200000
+0x50 0x06 0xc2 0xf2
+# CHECK: vmov.i32	q8, #0x20000000
+0x50 0x0c 0xc2 0xf2
+# CHECK: vmov.i32	q8, #0x20FF
+0x50 0x0d 0xc2 0xf2
+# CHECK: vmov.i32	q8, #0x20FFFF
+0x73 0x0e 0xc1 0xf3
+# CHECK: vmov.i64	q8, #0xFF0000FF0000FFFF
+0x30 0x08 0xc1 0xf2
+# CHECK: vmvn.i16	d16, #0x10
+0x30 0x0a 0xc1 0xf2
+# CHECK: vmvn.i16	d16, #0x1000
+0x30 0x00 0xc2 0xf2
+# CHECK: vmvn.i32	d16, #0x20
+0x30 0x02 0xc2 0xf2
+# CHECK: vmvn.i32	d16, #0x2000
+0x30 0x04 0xc2 0xf2
+# CHECK: vmvn.i32	d16, #0x200000
+0x30 0x06 0xc2 0xf2
+# CHECK: vmvn.i32	d16, #0x20000000
+0x30 0x0c 0xc2 0xf2
+# CHECK: vmvn.i32	d16, #0x20FF
+0x30 0x0d 0xc2 0xf2
+# CHECK: vmvn.i32	d16, #0x20FFFF
+0x30 0x0a 0xc8 0xf2
+# CHECK: vmovl.s8	q8, d16
+0x30 0x0a 0xd0 0xf2
+# CHECK: vmovl.s16	q8, d16
+0x30 0x0a 0xe0 0xf2
+# CHECK: vmovl.s32	q8, d16
+0x30 0x0a 0xc8 0xf3
+# CHECK: vmovl.u8	q8, d16
+0x30 0x0a 0xd0 0xf3
+# CHECK: vmovl.u16	q8, d16
+0x30 0x0a 0xe0 0xf3
+# CHECK: vmovl.u32	q8, d16
+0x20 0x02 0xf2 0xf3
+# CHECK: vmovn.i16	d16, q8
+0x20 0x02 0xf6 0xf3
+# CHECK: vmovn.i32	d16, q8
+0x20 0x02 0xfa 0xf3
+# CHECK: vmovn.i64	d16, q8
+0xa0 0x02 0xf2 0xf3
+# CHECK: vqmovn.s16	d16, q8
+0xa0 0x02 0xf6 0xf3
+# CHECK: vqmovn.s32	d16, q8
+0xa0 0x02 0xfa 0xf3
+# CHECK: vqmovn.s64	d16, q8
+0xe0 0x02 0xf2 0xf3
+# CHECK: vqmovn.u16	d16, q8
+0xe0 0x02 0xf6 0xf3
+# CHECK: vqmovn.u32	d16, q8
+0xe0 0x02 0xfa 0xf3
+# CHECK: vqmovn.u64	d16, q8
+0x60 0x02 0xf2 0xf3
+# CHECK: vqmovun.s16	d16, q8
+0x60 0x02 0xf6 0xf3
+# CHECK: vqmovun.s32	d16, q8
+0x60 0x02 0xfa 0xf3
+# CHECK: vqmovun.s64	d16, q8
+0xb0 0x0b 0x50 0xee
+# CHECK: vmov.s8	r0, d16[1
+0xf0 0x0b 0x10 0xee
+# CHECK: vmov.s16	r0, d16[1
+0xb0 0x0b 0xd0 0xee
+# CHECK: vmov.u8	r0, d16[1
+0xf0 0x0b 0x90 0xee
+# CHECK: vmov.u16	r0, d16[1
+0x90 0x0b 0x30 0xee
+# CHECK: vmov.32	r0, d16[1
+0xb0 0x1b 0x40 0xee
+# CHECK: vmov.8	d16[1], r1
+0xf0 0x1b 0x00 0xee
+# CHECK: vmov.16	d16[1], r1
+0x90 0x1b 0x20 0xee
+# CHECK: vmov.32	d16[1], r1
+0xb0 0x1b 0x42 0xee
+# CHECK: vmov.8	d18[1], r1
+0xf0 0x1b 0x02 0xee
+# CHECK: vmov.16	d18[1], r1
+0x90 0x1b 0x22 0xee
+# CHECK: vmov.32	d18[1], r1
+
+
+
+0xa1 0x09 0x42 0xf2
+# CHECK: vmla.i8	d16, d18, d17
+0xa1 0x09 0x52 0xf2
+# CHECK: vmla.i16	d16, d18, d17
+0xa1 0x09 0x62 0xf2
+# CHECK: vmla.i32	d16, d18, d17
+0xb1 0x0d 0x42 0xf2
+# CHECK: vmla.f32	d16, d18, d17
+0xe4 0x29 0x40 0xf2
+# CHECK: vmla.i8	q9, q8, q10
+0xe4 0x29 0x50 0xf2
+# CHECK: vmla.i16	q9, q8, q10
+0xe4 0x29 0x60 0xf2
+# CHECK: vmla.i32	q9, q8, q10
+0xf4 0x2d 0x40 0xf2
+# CHECK: vmla.f32	q9, q8, q10
+0xa2 0x08 0xc3 0xf2
+# CHECK: vmlal.s8	q8, d19, d18
+0xa2 0x08 0xd3 0xf2
+# CHECK: vmlal.s16	q8, d19, d18
+0xa2 0x08 0xe3 0xf2
+# CHECK: vmlal.s32	q8, d19, d18
+0xa2 0x08 0xc3 0xf3
+# CHECK: vmlal.u8	q8, d19, d18
+0xa2 0x08 0xd3 0xf3
+# CHECK: vmlal.u16	q8, d19, d18
+0xa2 0x08 0xe3 0xf3
+# CHECK: vmlal.u32	q8, d19, d18
+0xa2 0x09 0xd3 0xf2
+# CHECK: vqdmlal.s16	q8, d19, d18
+0xa2 0x09 0xe3 0xf2
+# CHECK: vqdmlal.s32	q8, d19, d18
+0xa1 0x09 0x42 0xf3
+# CHECK: vmls.i8	d16, d18, d17
+0xa1 0x09 0x52 0xf3
+# CHECK: vmls.i16	d16, d18, d17
+0xa1 0x09 0x62 0xf3
+# CHECK: vmls.i32	d16, d18, d17
+0xb1 0x0d 0x62 0xf2
+# CHECK: vmls.f32	d16, d18, d17
+0xe4 0x29 0x40 0xf3
+# CHECK: vmls.i8	q9, q8, q10
+0xe4 0x29 0x50 0xf3
+# CHECK: vmls.i16	q9, q8, q10
+0xe4 0x29 0x60 0xf3
+# CHECK: vmls.i32	q9, q8, q10
+0xf4 0x2d 0x60 0xf2
+# CHECK: vmls.f32	q9, q8, q10
+0xa2 0x0a 0xc3 0xf2
+# CHECK: vmlsl.s8	q8, d19, d18
+0xa2 0x0a 0xd3 0xf2
+# CHECK: vmlsl.s16	q8, d19, d18
+0xa2 0x0a 0xe3 0xf2
+# CHECK: vmlsl.s32	q8, d19, d18
+0xa2 0x0a 0xc3 0xf3
+# CHECK: vmlsl.u8	q8, d19, d18
+0xa2 0x0a 0xd3 0xf3
+# CHECK: vmlsl.u16	q8, d19, d18
+0xa2 0x0a 0xe3 0xf3
+# CHECK: vmlsl.u32	q8, d19, d18
+0xa2 0x0b 0xd3 0xf2
+# CHECK: vqdmlsl.s16	q8, d19, d18
+0xa2 0x0b 0xe3 0xf2
+# CHECK: vqdmlsl.s32	q8, d19, d18
+
+
+0xb1 0x09 0x40 0xf2
+# CHECK: vmul.i8	d16, d16, d17
+0xb1 0x09 0x50 0xf2
+# CHECK: vmul.i16	d16, d16, d17
+0xb1 0x09 0x60 0xf2
+# CHECK: vmul.i32	d16, d16, d17
+0xb1 0x0d 0x40 0xf3
+# CHECK: vmul.f32	d16, d16, d17
+0xf2 0x09 0x40 0xf2
+# CHECK: vmul.i8	q8, q8, q9
+0xf2 0x09 0x50 0xf2
+# CHECK: vmul.i16	q8, q8, q9
+0xf2 0x09 0x60 0xf2
+# CHECK: vmul.i32	q8, q8, q9
+0xf2 0x0d 0x40 0xf3
+# CHECK: vmul.f32	q8, q8, q9
+0xb1 0x09 0x40 0xf3
+# CHECK: vmul.p8	d16, d16, d17
+0xf2 0x09 0x40 0xf3
+# CHECK: vmul.p8	q8, q8, q9
+0xa1 0x0b 0x50 0xf2
+# CHECK: vqdmulh.s16	d16, d16, d17
+0xa1 0x0b 0x60 0xf2
+# CHECK: vqdmulh.s32	d16, d16, d17
+0xe2 0x0b 0x50 0xf2
+# CHECK: vqdmulh.s16	q8, q8, q9
+0xe2 0x0b 0x60 0xf2
+# CHECK: vqdmulh.s32	q8, q8, q9
+0xa1 0x0b 0x50 0xf3
+# CHECK: vqrdmulh.s16	d16, d16, d17
+0xa1 0x0b 0x60 0xf3
+# CHECK: vqrdmulh.s32	d16, d16, d17
+0xe2 0x0b 0x50 0xf3
+# CHECK: vqrdmulh.s16	q8, q8, q9
+0xe2 0x0b 0x60 0xf3
+# CHECK: vqrdmulh.s32	q8, q8, q9
+0xa1 0x0c 0xc0 0xf2
+# CHECK: vmull.s8	q8, d16, d17
+0xa1 0x0c 0xd0 0xf2
+# CHECK: vmull.s16	q8, d16, d17
+0xa1 0x0c 0xe0 0xf2
+# CHECK: vmull.s32	q8, d16, d17
+0xa1 0x0c 0xc0 0xf3
+# CHECK: vmull.u8	q8, d16, d17
+0xa1 0x0c 0xd0 0xf3
+# CHECK: vmull.u16	q8, d16, d17
+0xa1 0x0c 0xe0 0xf3
+# CHECK: vmull.u32	q8, d16, d17
+0xa1 0x0e 0xc0 0xf2
+# CHECK: vmull.p8	q8, d16, d17
+0xa1 0x0d 0xd0 0xf2
+# CHECK: vqdmull.s16	q8, d16, d17
+0xa1 0x0d 0xe0 0xf2
+# CHECK: vqdmull.s32	q8, d16, d17
+
+
+0xa0 0x03 0xf1 0xf3
+# CHECK: vneg.s8	d16, d16
+0xa0 0x03 0xf5 0xf3
+# CHECK: vneg.s16	d16, d16
+0xa0 0x03 0xf9 0xf3
+# CHECK: vneg.s32	d16, d16
+0xa0 0x07 0xf9 0xf3
+# CHECK: vneg.f32	d16, d16
+0xe0 0x03 0xf1 0xf3
+# CHECK: vneg.s8	q8, q8
+0xe0 0x03 0xf5 0xf3
+# CHECK: vneg.s16	q8, q8
+0xe0 0x03 0xf9 0xf3
+# CHECK: vneg.s32	q8, q8
+0xe0 0x07 0xf9 0xf3
+# CHECK: vneg.f32	q8, q8
+0xa0 0x07 0xf0 0xf3
+# CHECK: vqneg.s8	d16, d16
+0xa0 0x07 0xf4 0xf3
+# CHECK: vqneg.s16	d16, d16
+0xa0 0x07 0xf8 0xf3
+# CHECK: vqneg.s32	d16, d16
+0xe0 0x07 0xf0 0xf3
+# CHECK: vqneg.s8	q8, q8
+0xe0 0x07 0xf4 0xf3
+# CHECK: vqneg.s16	q8, q8
+0xe0 0x07 0xf8 0xf3
+# CHECK: vqneg.s32	q8, q8
+
+
+0xb0 0x0b 0x41 0xf2
+# CHECK: vpadd.i8	d16, d17, d16
+0xb0 0x0b 0x51 0xf2
+# CHECK: vpadd.i16	d16, d17, d16
+0xb0 0x0b 0x61 0xf2
+# CHECK: vpadd.i32	d16, d17, d16
+0xa1 0x0d 0x40 0xf3
+# CHECK: vpadd.f32	d16, d16, d17
+0x20 0x02 0xf0 0xf3
+# CHECK: vpaddl.s8	d16, d16
+0x20 0x02 0xf4 0xf3
+# CHECK: vpaddl.s16	d16, d16
+0x20 0x02 0xf8 0xf3
+# CHECK: vpaddl.s32	d16, d16
+0xa0 0x02 0xf0 0xf3
+# CHECK: vpaddl.u8	d16, d16
+0xa0 0x02 0xf4 0xf3
+# CHECK: vpaddl.u16	d16, d16
+0xa0 0x02 0xf8 0xf3
+# CHECK: vpaddl.u32	d16, d16
+0x60 0x02 0xf0 0xf3
+# CHECK: vpaddl.s8	q8, q8
+0x60 0x02 0xf4 0xf3
+# CHECK: vpaddl.s16	q8, q8
+0x60 0x02 0xf8 0xf3
+# CHECK: vpaddl.s32	q8, q8
+0xe0 0x02 0xf0 0xf3
+# CHECK: vpaddl.u8	q8, q8
+0xe0 0x02 0xf4 0xf3
+# CHECK: vpaddl.u16	q8, q8
+0xe0 0x02 0xf8 0xf3
+# CHECK: vpaddl.u32	q8, q8
+0x21 0x06 0xf0 0xf3
+# CHECK: vpadal.s8	d16, d17
+0x21 0x06 0xf4 0xf3
+# CHECK: vpadal.s16	d16, d17
+0x21 0x06 0xf8 0xf3
+# CHECK: vpadal.s32	d16, d17
+0xa1 0x06 0xf0 0xf3
+# CHECK: vpadal.u8	d16, d17
+0xa1 0x06 0xf4 0xf3
+# CHECK: vpadal.u16	d16, d17
+0xa1 0x06 0xf8 0xf3
+# CHECK: vpadal.u32	d16, d17
+0x60 0x26 0xf0 0xf3
+# CHECK: vpadal.s8	q9, q8
+0x60 0x26 0xf4 0xf3
+# CHECK: vpadal.s16	q9, q8
+0x60 0x26 0xf8 0xf3
+# CHECK: vpadal.s32	q9, q8
+0xe0 0x26 0xf0 0xf3
+# CHECK: vpadal.u8	q9, q8
+0xe0 0x26 0xf4 0xf3
+# CHECK: vpadal.u16	q9, q8
+0xe0 0x26 0xf8 0xf3
+# CHECK: vpadal.u32	q9, q8
+0xb1 0x0a 0x40 0xf2
+# CHECK: vpmin.s8	d16, d16, d17
+0xb1 0x0a 0x50 0xf2
+# CHECK: vpmin.s16	d16, d16, d17
+0xb1 0x0a 0x60 0xf2
+# CHECK: vpmin.s32	d16, d16, d17
+0xb1 0x0a 0x40 0xf3
+# CHECK: vpmin.u8	d16, d16, d17
+0xb1 0x0a 0x50 0xf3
+# CHECK: vpmin.u16	d16, d16, d17
+0xb1 0x0a 0x60 0xf3
+# CHECK: vpmin.u32	d16, d16, d17
+0xa1 0x0f 0x60 0xf3
+# CHECK: vpmin.f32	d16, d16, d17
+0xa1 0x0a 0x40 0xf2
+# CHECK: vpmax.s8	d16, d16, d17
+0xa1 0x0a 0x50 0xf2
+# CHECK: vpmax.s16	d16, d16, d17
+0xa1 0x0a 0x60 0xf2
+# CHECK: vpmax.s32	d16, d16, d17
+0xa1 0x0a 0x40 0xf3
+# CHECK: vpmax.u8	d16, d16, d17
+0xa1 0x0a 0x50 0xf3
+# CHECK: vpmax.u16	d16, d16, d17
+0xa1 0x0a 0x60 0xf3
+# CHECK: vpmax.u32	d16, d16, d17
+0xa1 0x0f 0x40 0xf3
+# CHECK: vpmax.f32	d16, d16, d17
+
+
+0x20 0x04 0xfb 0xf3
+# CHECK: vrecpe.u32	d16, d16
+0x60 0x04 0xfb 0xf3
+# CHECK: vrecpe.u32	q8, q8
+0x20 0x05 0xfb 0xf3
+# CHECK: vrecpe.f32	d16, d16
+0x60 0x05 0xfb 0xf3
+# CHECK: vrecpe.f32	q8, q8
+0xb1 0x0f 0x40 0xf2
+# CHECK: vrecps.f32	d16, d16, d17
+0xf2 0x0f 0x40 0xf2
+# CHECK: vrecps.f32	q8, q8, q9
+0xa0 0x04 0xfb 0xf3
+# CHECK: vrsqrte.u32	d16, d16
+0xe0 0x04 0xfb 0xf3
+# CHECK: vrsqrte.u32	q8, q8
+0xa0 0x05 0xfb 0xf3
+# CHECK: vrsqrte.f32	d16, d16
+0xe0 0x05 0xfb 0xf3
+# CHECK: vrsqrte.f32	q8, q8
+0xb1 0x0f 0x60 0xf2
+# CHECK: vrsqrts.f32	d16, d16, d17
+0xf2 0x0f 0x60 0xf2
+# CHECK: vrsqrts.f32	q8, q8, q9
+
+
+0x20 0x00 0xf0 0xf3
+# CHECK: vrev64.8	d16, d16
+0x20 0x00 0xf4 0xf3
+# CHECK: vrev64.16	d16, d16
+0x20 0x00 0xf8 0xf3
+# CHECK: vrev64.32	d16, d16
+0x60 0x00 0xf0 0xf3
+# CHECK: vrev64.8	q8, q8
+0x60 0x00 0xf4 0xf3
+# CHECK: vrev64.16	q8, q8
+0x60 0x00 0xf8 0xf3
+# CHECK: vrev64.32	q8, q8
+0xa0 0x00 0xf0 0xf3
+# CHECK: vrev32.8	d16, d16
+0xa0 0x00 0xf4 0xf3
+# CHECK: vrev32.16	d16, d16
+0xe0 0x00 0xf0 0xf3
+# CHECK: vrev32.8	q8, q8
+0xe0 0x00 0xf4 0xf3
+# CHECK: vrev32.16	q8, q8
+0x20 0x01 0xf0 0xf3
+# CHECK: vrev16.8	d16, d16
+0x60 0x01 0xf0 0xf3
+# CHECK: vrev16.8	q8, q8
+
+
+0xb0 0x04 0x41 0xf2
+# CHECK: vqshl.s8	d16, d16, d17
+0xb0 0x04 0x51 0xf2
+# CHECK: vqshl.s16	d16, d16, d17
+0xb0 0x04 0x61 0xf2
+# CHECK: vqshl.s32	d16, d16, d17
+0xb0 0x04 0x71 0xf2
+# CHECK: vqshl.s64	d16, d16, d17
+0xb0 0x04 0x41 0xf3
+# CHECK: vqshl.u8	d16, d16, d17
+0xb0 0x04 0x51 0xf3
+# CHECK: vqshl.u16	d16, d16, d17
+0xb0 0x04 0x61 0xf3
+# CHECK: vqshl.u32	d16, d16, d17
+0xb0 0x04 0x71 0xf3
+# CHECK: vqshl.u64	d16, d16, d17
+0xf0 0x04 0x42 0xf2
+# CHECK: vqshl.s8	q8, q8, q9
+0xf0 0x04 0x52 0xf2
+# CHECK: vqshl.s16	q8, q8, q9
+0xf0 0x04 0x62 0xf2
+# CHECK: vqshl.s32	q8, q8, q9
+0xf0 0x04 0x72 0xf2
+# CHECK: vqshl.s64	q8, q8, q9
+0xf0 0x04 0x42 0xf3
+# CHECK: vqshl.u8	q8, q8, q9
+0xf0 0x04 0x52 0xf3
+# CHECK: vqshl.u16	q8, q8, q9
+0xf0 0x04 0x62 0xf3
+# CHECK: vqshl.u32	q8, q8, q9
+0xf0 0x04 0x72 0xf3
+# CHECK: vqshl.u64	q8, q8, q9
+0x30 0x07 0xcf 0xf2
+# CHECK: vqshl.s8	d16, d16, #7
+0x30 0x07 0xdf 0xf2
+# CHECK: vqshl.s16	d16, d16, #15
+0x30 0x07 0xff 0xf2
+# CHECK: vqshl.s32	d16, d16, #31
+0xb0 0x07 0xff 0xf2
+# CHECK: vqshl.s64	d16, d16, #63
+0x30 0x07 0xcf 0xf3
+# CHECK: vqshl.u8	d16, d16, #7
+0x30 0x07 0xdf 0xf3
+# CHECK: vqshl.u16	d16, d16, #15
+0x30 0x07 0xff 0xf3
+# CHECK: vqshl.u32	d16, d16, #31
+0xb0 0x07 0xff 0xf3
+# CHECK: vqshl.u64	d16, d16, #63
+0x30 0x06 0xcf 0xf3
+# CHECK: vqshlu.s8	d16, d16, #7
+0x30 0x06 0xdf 0xf3
+# CHECK: vqshlu.s16	d16, d16, #15
+0x30 0x06 0xff 0xf3
+# CHECK: vqshlu.s32	d16, d16, #31
+0xb0 0x06 0xff 0xf3
+# CHECK: vqshlu.s64	d16, d16, #63
+0x70 0x07 0xcf 0xf2
+# CHECK: vqshl.s8	q8, q8, #7
+0x70 0x07 0xdf 0xf2
+# CHECK: vqshl.s16	q8, q8, #15
+0x70 0x07 0xff 0xf2
+# CHECK: vqshl.s32	q8, q8, #31
+0xf0 0x07 0xff 0xf2
+# CHECK: vqshl.s64	q8, q8, #63
+0x70 0x07 0xcf 0xf3
+# CHECK: vqshl.u8	q8, q8, #7
+0x70 0x07 0xdf 0xf3
+# CHECK: vqshl.u16	q8, q8, #15
+0x70 0x07 0xff 0xf3
+# CHECK: vqshl.u32	q8, q8, #31
+0xf0 0x07 0xff 0xf3
+# CHECK: vqshl.u64	q8, q8, #63
+0x70 0x06 0xcf 0xf3
+# CHECK: vqshlu.s8	q8, q8, #7
+0x70 0x06 0xdf 0xf3
+# CHECK: vqshlu.s16	q8, q8, #15
+0x70 0x06 0xff 0xf3
+# CHECK: vqshlu.s32	q8, q8, #31
+0xf0 0x06 0xff 0xf3
+# CHECK: vqshlu.s64	q8, q8, #63
+0xb0 0x05 0x41 0xf2
+# CHECK: vqrshl.s8	d16, d16, d17
+0xb0 0x05 0x51 0xf2
+# CHECK: vqrshl.s16	d16, d16, d17
+0xb0 0x05 0x61 0xf2
+# CHECK: vqrshl.s32	d16, d16, d17
+0xb0 0x05 0x71 0xf2
+# CHECK: vqrshl.s64	d16, d16, d17
+0xb0 0x05 0x41 0xf3
+# CHECK: vqrshl.u8	d16, d16, d17
+0xb0 0x05 0x51 0xf3
+# CHECK: vqrshl.u16	d16, d16, d17
+0xb0 0x05 0x61 0xf3
+# CHECK: vqrshl.u32	d16, d16, d17
+0xb0 0x05 0x71 0xf3
+# CHECK: vqrshl.u64	d16, d16, d17
+0xf0 0x05 0x42 0xf2
+# CHECK: vqrshl.s8	q8, q8, q9
+0xf0 0x05 0x52 0xf2
+# CHECK: vqrshl.s16	q8, q8, q9
+0xf0 0x05 0x62 0xf2
+# CHECK: vqrshl.s32	q8, q8, q9
+0xf0 0x05 0x72 0xf2
+# CHECK: vqrshl.s64	q8, q8, q9
+0xf0 0x05 0x42 0xf3
+# CHECK: vqrshl.u8	q8, q8, q9
+0xf0 0x05 0x52 0xf3
+# CHECK: vqrshl.u16	q8, q8, q9
+0xf0 0x05 0x62 0xf3
+# CHECK: vqrshl.u32	q8, q8, q9
+0xf0 0x05 0x72 0xf3
+# CHECK: vqrshl.u64	q8, q8, q9
+0x30 0x09 0xc8 0xf2
+# CHECK: vqshrn.s16	d16, q8, #8
+0x30 0x09 0xd0 0xf2
+# CHECK: vqshrn.s32	d16, q8, #16
+0x30 0x09 0xe0 0xf2
+# CHECK: vqshrn.s64	d16, q8, #32
+0x30 0x09 0xc8 0xf3
+# CHECK: vqshrn.u16	d16, q8, #8
+0x30 0x09 0xd0 0xf3
+# CHECK: vqshrn.u32	d16, q8, #16
+0x30 0x09 0xe0 0xf3
+# CHECK: vqshrn.u64	d16, q8, #32
+0x30 0x08 0xc8 0xf3
+# CHECK: vqshrun.s16	d16, q8, #8
+0x30 0x08 0xd0 0xf3
+# CHECK: vqshrun.s32	d16, q8, #16
+0x30 0x08 0xe0 0xf3
+# CHECK: vqshrun.s64	d16, q8, #32
+0x70 0x09 0xc8 0xf2
+# CHECK: vqrshrn.s16	d16, q8, #8
+0x70 0x09 0xd0 0xf2
+# CHECK: vqrshrn.s32	d16, q8, #16
+0x70 0x09 0xe0 0xf2
+# CHECK: vqrshrn.s64	d16, q8, #32
+0x70 0x09 0xc8 0xf3
+# CHECK: vqrshrn.u16	d16, q8, #8
+0x70 0x09 0xd0 0xf3
+# CHECK: vqrshrn.u32	d16, q8, #16
+0x70 0x09 0xe0 0xf3
+# CHECK: vqrshrn.u64	d16, q8, #32
+0x70 0x08 0xc8 0xf3
+# CHECK: vqrshrun.s16	d16, q8, #8
+0x70 0x08 0xd0 0xf3
+# CHECK: vqrshrun.s32	d16, q8, #16
+0x70 0x08 0xe0 0xf3
+# CHECK: vqrshrun.s64	d16, q8, #32
+
+
+0xa1 0x04 0x40 0xf3
+# CHECK: vshl.u8	d16, d17, d16
+0xa1 0x04 0x50 0xf3
+# CHECK: vshl.u16	d16, d17, d16
+0xa1 0x04 0x60 0xf3
+# CHECK: vshl.u32	d16, d17, d16
+0xa1 0x04 0x70 0xf3
+# CHECK: vshl.u64	d16, d17, d16
+0x30 0x05 0xcf 0xf2
+# CHECK: vshl.i8	d16, d16, #7
+0x30 0x05 0xdf 0xf2
+# CHECK: vshl.i16	d16, d16, #15
+0x30 0x05 0xff 0xf2
+# CHECK: vshl.i32	d16, d16, #31
+0xb0 0x05 0xff 0xf2
+# CHECK: vshl.i64	d16, d16, #63
+0xe2 0x04 0x40 0xf3
+# CHECK: vshl.u8	q8, q9, q8
+0xe2 0x04 0x50 0xf3
+# CHECK: vshl.u16	q8, q9, q8
+0xe2 0x04 0x60 0xf3
+# CHECK: vshl.u32	q8, q9, q8
+0xe2 0x04 0x70 0xf3
+# CHECK: vshl.u64	q8, q9, q8
+0x70 0x05 0xcf 0xf2
+# CHECK: vshl.i8	q8, q8, #7
+0x70 0x05 0xdf 0xf2
+# CHECK: vshl.i16	q8, q8, #15
+0x70 0x05 0xff 0xf2
+# CHECK: vshl.i32	q8, q8, #31
+0xf0 0x05 0xff 0xf2
+# CHECK: vshl.i64	q8, q8, #63
+0x30 0x00 0xc9 0xf3
+# CHECK: vshr.u8	d16, d16, #7
+0x30 0x00 0xd1 0xf3
+# CHECK: vshr.u16	d16, d16, #15
+0x30 0x00 0xe1 0xf3
+# CHECK: vshr.u32	d16, d16, #31
+0xb0 0x00 0xc1 0xf3
+# CHECK: vshr.u64	d16, d16, #63
+0x70 0x00 0xc9 0xf3
+# CHECK: vshr.u8	q8, q8, #7
+0x70 0x00 0xd1 0xf3
+# CHECK: vshr.u16	q8, q8, #15
+0x70 0x00 0xe1 0xf3
+# CHECK: vshr.u32	q8, q8, #31
+0xf0 0x00 0xc1 0xf3
+# CHECK: vshr.u64	q8, q8, #63
+0x30 0x00 0xc9 0xf2
+# CHECK: vshr.s8	d16, d16, #7
+0x30 0x00 0xd1 0xf2
+# CHECK: vshr.s16	d16, d16, #15
+0x30 0x00 0xe1 0xf2
+# CHECK: vshr.s32	d16, d16, #31
+0xb0 0x00 0xc1 0xf2
+# CHECK: vshr.s64	d16, d16, #63
+0x70 0x00 0xc9 0xf2
+# CHECK: vshr.s8	q8, q8, #7
+0x70 0x00 0xd1 0xf2
+# CHECK: vshr.s16	q8, q8, #15
+0x70 0x00 0xe1 0xf2
+# CHECK: vshr.s32	q8, q8, #31
+0xf0 0x00 0xc1 0xf2
+# CHECK: vshr.s64	q8, q8, #63
+0x30 0x01 0xc9 0xf3
+# CHECK: vsra.u8   d16, d16, #7
+0x30 0x01 0xd1 0xf3
+# CHECK: vsra.u16  d16, d16, #15
+0x30 0x01 0xe1 0xf3
+# CHECK: vsra.u32  d16, d16, #31
+0xb0 0x01 0xc1 0xf3
+# CHECK: vsra.u64  d16, d16, #63
+0x70 0x01 0xc9 0xf3
+# CHECK: vsra.u8   q8, q8, #7
+0x70 0x01 0xd1 0xf3
+# CHECK: vsra.u16  q8, q8, #15
+0x70 0x01 0xe1 0xf3
+# CHECK: vsra.u32  q8, q8, #31
+0xf0 0x01 0xc1 0xf3
+# CHECK: vsra.u64  q8, q8, #63
+0x30 0x01 0xc9 0xf2
+# CHECK: vsra.s8   d16, d16, #7
+0x30 0x01 0xd1 0xf2
+# CHECK: vsra.s16  d16, d16, #15
+0x30 0x01 0xe1 0xf2
+# CHECK: vsra.s32  d16, d16, #31
+0xb0 0x01 0xc1 0xf2
+# CHECK: vsra.s64  d16, d16, #63
+0x70 0x01 0xc9 0xf2
+# CHECK: vsra.s8   q8, q8, #7
+0x70 0x01 0xd1 0xf2
+# CHECK: vsra.s16  q8, q8, #15
+0x70 0x01 0xe1 0xf2
+# CHECK: vsra.s32  q8, q8, #31
+0xf0 0x01 0xc1 0xf2
+# CHECK: vsra.s64  q8, q8, #63
+0x30 0x04 0xc9 0xf3
+# CHECK: vsri.8   d16, d16, #7
+0x30 0x04 0xd1 0xf3
+# CHECK: vsri.16  d16, d16, #15
+0x30 0x04 0xe1 0xf3
+# CHECK: vsri.32  d16, d16, #31
+0xb0 0x04 0xc1 0xf3
+# CHECK: vsri.64  d16, d16, #63
+0x70 0x04 0xc9 0xf3
+# CHECK: vsri.8   q8, q8, #7
+0x70 0x04 0xd1 0xf3
+# CHECK: vsri.16  q8, q8, #15
+0x70 0x04 0xe1 0xf3
+# CHECK: vsri.32  q8, q8, #31
+0xf0 0x04 0xc1 0xf3
+# CHECK: vsri.64  q8, q8, #63
+0x30 0x05 0xcf 0xf3
+# CHECK: vsli.8   d16, d16, #7
+0x30 0x05 0xdf 0xf3
+# CHECK: vsli.16  d16, d16, #15
+0x30 0x05 0xff 0xf3
+# CHECK: vsli.32  d16, d16, #31
+0xb0 0x05 0xff 0xf3
+# CHECK: vsli.64  d16, d16, #63
+0x70 0x05 0xcf 0xf3
+# CHECK: vsli.8   q8, q8, #7
+0x70 0x05 0xdf 0xf3
+# CHECK: vsli.16  q8, q8, #15
+0x70 0x05 0xff 0xf3
+# CHECK: vsli.32  q8, q8, #31
+0xf0 0x05 0xff 0xf3
+# CHECK: vsli.64  q8, q8, #63
+0x30 0x0a 0xcf 0xf2
+# CHECK: vshll.s8	q8, d16, #7
+0x30 0x0a 0xdf 0xf2
+# CHECK: vshll.s16	q8, d16, #15
+0x30 0x0a 0xff 0xf2
+# CHECK: vshll.s32	q8, d16, #31
+0x30 0x0a 0xcf 0xf3
+# CHECK: vshll.u8	q8, d16, #7
+0x30 0x0a 0xdf 0xf3
+# CHECK: vshll.u16	q8, d16, #15
+0x30 0x0a 0xff 0xf3
+# CHECK: vshll.u32	q8, d16, #31
+0x20 0x03 0xf2 0xf3
+# CHECK: vshll.i8	q8, d16, #8
+0x20 0x03 0xf6 0xf3
+# CHECK: vshll.i16	q8, d16, #16
+0x20 0x03 0xfa 0xf3
+# CHECK: vshll.i32	q8, d16, #32
+0x30 0x08 0xc8 0xf2
+# CHECK: vshrn.i16	d16, q8, #8
+0x30 0x08 0xd0 0xf2
+# CHECK: vshrn.i32	d16, q8, #16
+0x30 0x08 0xe0 0xf2
+# CHECK: vshrn.i64	d16, q8, #32
+0xa1 0x05 0x40 0xf2
+# CHECK: vrshl.s8	d16, d17, d16
+0xa1 0x05 0x50 0xf2
+# CHECK: vrshl.s16	d16, d17, d16
+0xa1 0x05 0x60 0xf2
+# CHECK: vrshl.s32	d16, d17, d16
+0xa1 0x05 0x70 0xf2
+# CHECK: vrshl.s64	d16, d17, d16
+0xa1 0x05 0x40 0xf3
+# CHECK: vrshl.u8	d16, d17, d16
+0xa1 0x05 0x50 0xf3
+# CHECK: vrshl.u16	d16, d17, d16
+0xa1 0x05 0x60 0xf3
+# CHECK: vrshl.u32	d16, d17, d16
+0xa1 0x05 0x70 0xf3
+# CHECK: vrshl.u64	d16, d17, d16
+0xe2 0x05 0x40 0xf2
+# CHECK: vrshl.s8	q8, q9, q8
+0xe2 0x05 0x50 0xf2
+# CHECK: vrshl.s16	q8, q9, q8
+0xe2 0x05 0x60 0xf2
+# CHECK: vrshl.s32	q8, q9, q8
+0xe2 0x05 0x70 0xf2
+# CHECK: vrshl.s64	q8, q9, q8
+0xe2 0x05 0x40 0xf3
+# CHECK: vrshl.u8	q8, q9, q8
+0xe2 0x05 0x50 0xf3
+# CHECK: vrshl.u16	q8, q9, q8
+0xe2 0x05 0x60 0xf3
+# CHECK: vrshl.u32	q8, q9, q8
+0xe2 0x05 0x70 0xf3
+# CHECK: vrshl.u64	q8, q9, q8
+0x30 0x02 0xc8 0xf2
+# CHECK: vrshr.s8	d16, d16, #8
+0x30 0x02 0xd0 0xf2
+# CHECK: vrshr.s16	d16, d16, #16
+0x30 0x02 0xe0 0xf2
+# CHECK: vrshr.s32	d16, d16, #32
+0xb0 0x02 0xc0 0xf2
+# CHECK: vrshr.s64	d16, d16, #64
+0x30 0x02 0xc8 0xf3
+# CHECK: vrshr.u8	d16, d16, #8
+0x30 0x02 0xd0 0xf3
+# CHECK: vrshr.u16	d16, d16, #16
+0x30 0x02 0xe0 0xf3
+# CHECK: vrshr.u32	d16, d16, #32
+0xb0 0x02 0xc0 0xf3
+# CHECK: vrshr.u64	d16, d16, #64
+0x70 0x02 0xc8 0xf2
+# CHECK: vrshr.s8	q8, q8, #8
+0x70 0x02 0xd0 0xf2
+# CHECK: vrshr.s16	q8, q8, #16
+0x70 0x02 0xe0 0xf2
+# CHECK: vrshr.s32	q8, q8, #32
+0xf0 0x02 0xc0 0xf2
+# CHECK: vrshr.s64	q8, q8, #64
+0x70 0x02 0xc8 0xf3
+# CHECK: vrshr.u8	q8, q8, #8
+0x70 0x02 0xd0 0xf3
+# CHECK: vrshr.u16	q8, q8, #16
+0x70 0x02 0xe0 0xf3
+# CHECK: vrshr.u32	q8, q8, #32
+0xf0 0x02 0xc0 0xf3
+# CHECK: vrshr.u64	q8, q8, #64
+0x70 0x08 0xc8 0xf2
+# CHECK: vrshrn.i16	d16, q8, #8
+0x70 0x08 0xd0 0xf2
+# CHECK: vrshrn.i32	d16, q8, #16
+0x70 0x08 0xe0 0xf2
+# CHECK: vrshrn.i64	d16, q8, #32
+0x70 0x09 0xcc 0xf2
+# CHECK: vqrshrn.s16	d16, q8, #4
+0x70 0x09 0xd3 0xf2
+# CHECK: vqrshrn.s32	d16, q8, #13
+0x70 0x09 0xf3 0xf2
+# CHECK: vqrshrn.s64	d16, q8, #13
+0x70 0x09 0xcc 0xf3
+# CHECK: vqrshrn.u16	d16, q8, #4
+0x70 0x09 0xd3 0xf3
+# CHECK: vqrshrn.u32	d16, q8, #13
+0x70 0x09 0xf3 0xf3
+# CHECK: vqrshrn.u64	d16, q8, #13
+
+
+0x30 0x11 0xc8 0xf2
+# CHECK: vsra.s8	d17, d16, #8
+0x30 0x11 0xd0 0xf2
+# CHECK: vsra.s16	d17, d16, #16
+0x30 0x11 0xe0 0xf2
+# CHECK: vsra.s32	d17, d16, #32
+0xb0 0x11 0xc0 0xf2
+# CHECK: vsra.s64	d17, d16, #64
+0x72 0x01 0xc8 0xf2
+# CHECK: vsra.s8	q8, q9, #8
+0x72 0x01 0xd0 0xf2
+# CHECK: vsra.s16	q8, q9, #16
+0x72 0x01 0xe0 0xf2
+# CHECK: vsra.s32	q8, q9, #32
+0xf2 0x01 0xc0 0xf2
+# CHECK: vsra.s64	q8, q9, #64
+0x30 0x11 0xc8 0xf3
+# CHECK: vsra.u8	d17, d16, #8
+0x30 0x11 0xd0 0xf3
+# CHECK: vsra.u16	d17, d16, #16
+0x30 0x11 0xe0 0xf3
+# CHECK: vsra.u32	d17, d16, #32
+0xb0 0x11 0xc0 0xf3
+# CHECK: vsra.u64	d17, d16, #64
+0x72 0x01 0xc8 0xf3
+# CHECK: vsra.u8	q8, q9, #8
+0x72 0x01 0xd0 0xf3
+# CHECK: vsra.u16	q8, q9, #16
+0x72 0x01 0xe0 0xf3
+# CHECK: vsra.u32	q8, q9, #32
+0xf2 0x01 0xc0 0xf3
+# CHECK: vsra.u64	q8, q9, #64
+0x30 0x13 0xc8 0xf2
+# CHECK: vrsra.s8	d17, d16, #8
+0x30 0x13 0xd0 0xf2
+# CHECK: vrsra.s16	d17, d16, #16
+0x30 0x13 0xe0 0xf2
+# CHECK: vrsra.s32	d17, d16, #32
+0xb0 0x13 0xc0 0xf2
+# CHECK: vrsra.s64	d17, d16, #64
+0x30 0x13 0xc8 0xf3
+# CHECK: vrsra.u8	d17, d16, #8
+0x30 0x13 0xd0 0xf3
+# CHECK: vrsra.u16	d17, d16, #16
+0x30 0x13 0xe0 0xf3
+# CHECK: vrsra.u32	d17, d16, #32
+0xb0 0x13 0xc0 0xf3
+# CHECK: vrsra.u64	d17, d16, #64
+0x72 0x03 0xc8 0xf2
+# CHECK: vrsra.s8	q8, q9, #8
+0x72 0x03 0xd0 0xf2
+# CHECK: vrsra.s16	q8, q9, #16
+0x72 0x03 0xe0 0xf2
+# CHECK: vrsra.s32	q8, q9, #32
+0xf2 0x03 0xc0 0xf2
+# CHECK: vrsra.s64	q8, q9, #64
+0x72 0x03 0xc8 0xf3
+# CHECK: vrsra.u8	q8, q9, #8
+0x72 0x03 0xd0 0xf3
+# CHECK: vrsra.u16	q8, q9, #16
+0x72 0x03 0xe0 0xf3
+# CHECK: vrsra.u32	q8, q9, #32
+0xf2 0x03 0xc0 0xf3
+# CHECK: vrsra.u64	q8, q9, #64
+0x30 0x15 0xcf 0xf3
+# CHECK: vsli.8	d17, d16, #7
+0x30 0x15 0xdf 0xf3
+# CHECK: vsli.16	d17, d16, #15
+0x30 0x15 0xff 0xf3
+# CHECK: vsli.32	d17, d16, #31
+0xb0 0x15 0xff 0xf3
+# CHECK: vsli.64	d17, d16, #63
+0x70 0x25 0xcf 0xf3
+# CHECK: vsli.8	q9, q8, #7
+0x70 0x25 0xdf 0xf3
+# CHECK: vsli.16	q9, q8, #15
+0x70 0x25 0xff 0xf3
+# CHECK: vsli.32	q9, q8, #31
+0xf0 0x25 0xff 0xf3
+# CHECK: vsli.64	q9, q8, #63
+0x30 0x14 0xc8 0xf3
+# CHECK: vsri.8	d17, d16, #8
+0x30 0x14 0xd0 0xf3
+# CHECK: vsri.16	d17, d16, #16
+0x30 0x14 0xe0 0xf3
+# CHECK: vsri.32	d17, d16, #32
+0xb0 0x14 0xc0 0xf3
+# CHECK: vsri.64	d17, d16, #64
+0x70 0x24 0xc8 0xf3
+# CHECK: vsri.8	q9, q8, #8
+0x70 0x24 0xd0 0xf3
+# CHECK: vsri.16	q9, q8, #16
+0x70 0x24 0xe0 0xf3
+# CHECK: vsri.32	q9, q8, #32
+0xf0 0x24 0xc0 0xf3
+# CHECK: vsri.64	q9, q8, #64
+
+
+0xa0 0x03 0xf1 0xf2
+# CHECK: vext.8	d16, d17, d16, #3
+0xa0 0x05 0xf1 0xf2
+# CHECK: vext.8	d16, d17, d16, #5
+0xe0 0x03 0xf2 0xf2
+# CHECK: vext.8	q8, q9, q8, #3
+0xe0 0x07 0xf2 0xf2
+# CHECK: vext.8	q8, q9, q8, #7
+0xa0 0x06 0xf1 0xf2
+# CHECK: vext.16	d16, d17, d16, #3
+0xe0 0x0c 0xf2 0xf2
+# CHECK: vext.32	q8, q9, q8, #3
+0xa0 0x10 0xf2 0xf3
+# CHECK: vtrn.8	d17, d16
+0xa0 0x10 0xf6 0xf3
+# CHECK: vtrn.16	d17, d16
+0xa0 0x10 0xfa 0xf3
+# CHECK: vtrn.32	d17, d16
+0xe0 0x20 0xf2 0xf3
+# CHECK: vtrn.8	q9, q8
+0xe0 0x20 0xf6 0xf3
+# CHECK: vtrn.16	q9, q8
+0xe0 0x20 0xfa 0xf3
+# CHECK: vtrn.32	q9, q8
+0x20 0x11 0xf2 0xf3
+# CHECK: vuzp.8	d17, d16
+0x20 0x11 0xf6 0xf3
+# CHECK: vuzp.16	d17, d16
+0x60 0x21 0xf2 0xf3
+# CHECK: vuzp.8	q9, q8
+0x60 0x21 0xf6 0xf3
+# CHECK: vuzp.16	q9, q8
+0x60 0x21 0xfa 0xf3
+# CHECK: vuzp.32	q9, q8
+0xa0 0x11 0xf2 0xf3
+# CHECK: vzip.8	d17, d16
+0xa0 0x11 0xf6 0xf3
+# CHECK: vzip.16	d17, d16
+0xe0 0x21 0xf2 0xf3
+# CHECK: vzip.8	q9, q8
+0xe0 0x21 0xf6 0xf3
+# CHECK: vzip.16	q9, q8
+0xe0 0x21 0xfa 0xf3
+# CHECK: vzip.32	q9, q8
+
+
+0xa0 0x08 0x41 0xf3
+# CHECK: vsub.i8	d16, d17, d16
+0xa0 0x08 0x51 0xf3
+# CHECK: vsub.i16	d16, d17, d16
+0xa0 0x08 0x61 0xf3
+# CHECK: vsub.i32	d16, d17, d16
+0xa0 0x08 0x71 0xf3
+# CHECK: vsub.i64	d16, d17, d16
+0xa1 0x0d 0x60 0xf2
+# CHECK: vsub.f32	d16, d16, d17
+0xe2 0x08 0x40 0xf3
+# CHECK: vsub.i8	q8, q8, q9
+0xe2 0x08 0x50 0xf3
+# CHECK: vsub.i16	q8, q8, q9
+0xe2 0x08 0x60 0xf3
+# CHECK: vsub.i32	q8, q8, q9
+0xe2 0x08 0x70 0xf3
+# CHECK: vsub.i64	q8, q8, q9
+0xe2 0x0d 0x60 0xf2
+# CHECK: vsub.f32	q8, q8, q9
+0xa0 0x02 0xc1 0xf2
+# CHECK: vsubl.s8	q8, d17, d16
+0xa0 0x02 0xd1 0xf2
+# CHECK: vsubl.s16	q8, d17, d16
+0xa0 0x02 0xe1 0xf2
+# CHECK: vsubl.s32	q8, d17, d16
+0xa0 0x02 0xc1 0xf3
+# CHECK: vsubl.u8	q8, d17, d16
+0xa0 0x02 0xd1 0xf3
+# CHECK: vsubl.u16	q8, d17, d16
+0xa0 0x02 0xe1 0xf3
+# CHECK: vsubl.u32	q8, d17, d16
+0xa2 0x03 0xc0 0xf2
+# CHECK: vsubw.s8	q8, q8, d18
+0xa2 0x03 0xd0 0xf2
+# CHECK: vsubw.s16	q8, q8, d18
+0xa2 0x03 0xe0 0xf2
+# CHECK: vsubw.s32	q8, q8, d18
+0xa2 0x03 0xc0 0xf3
+# CHECK: vsubw.u8	q8, q8, d18
+0xa2 0x03 0xd0 0xf3
+# CHECK: vsubw.u16	q8, q8, d18
+0xa2 0x03 0xe0 0xf3
+# CHECK: vsubw.u32	q8, q8, d18
+0xa1 0x02 0x40 0xf2
+# CHECK: vhsub.s8	d16, d16, d17
+0xa1 0x02 0x50 0xf2
+# CHECK: vhsub.s16	d16, d16, d17
+0xa1 0x02 0x60 0xf2
+# CHECK: vhsub.s32	d16, d16, d17
+0xa1 0x02 0x40 0xf3
+# CHECK: vhsub.u8	d16, d16, d17
+0xa1 0x02 0x50 0xf3
+# CHECK: vhsub.u16	d16, d16, d17
+0xa1 0x02 0x60 0xf3
+# CHECK: vhsub.u32	d16, d16, d17
+0xe2 0x02 0x40 0xf2
+# CHECK: vhsub.s8	q8, q8, q9
+0xe2 0x02 0x50 0xf2
+# CHECK: vhsub.s16	q8, q8, q9
+0xe2 0x02 0x60 0xf2
+# CHECK: vhsub.s32	q8, q8, q9
+0xb1 0x02 0x40 0xf2
+# CHECK: vqsub.s8	d16, d16, d17
+0xb1 0x02 0x50 0xf2
+# CHECK: vqsub.s16	d16, d16, d17
+0xb1 0x02 0x60 0xf2
+# CHECK: vqsub.s32	d16, d16, d17
+0xb1 0x02 0x70 0xf2
+# CHECK: vqsub.s64	d16, d16, d17
+0xb1 0x02 0x40 0xf3
+# CHECK: vqsub.u8	d16, d16, d17
+0xb1 0x02 0x50 0xf3
+# CHECK: vqsub.u16	d16, d16, d17
+0xb1 0x02 0x60 0xf3
+# CHECK: vqsub.u32	d16, d16, d17
+0xb1 0x02 0x70 0xf3
+# CHECK: vqsub.u64	d16, d16, d17
+0xf2 0x02 0x40 0xf2
+# CHECK: vqsub.s8	q8, q8, q9
+0xf2 0x02 0x50 0xf2
+# CHECK: vqsub.s16	q8, q8, q9
+0xf2 0x02 0x60 0xf2
+# CHECK: vqsub.s32	q8, q8, q9
+0xf2 0x02 0x70 0xf2
+# CHECK: vqsub.s64	q8, q8, q9
+0xf2 0x02 0x40 0xf3
+# CHECK: vqsub.u8	q8, q8, q9
+0xf2 0x02 0x50 0xf3
+# CHECK: vqsub.u16	q8, q8, q9
+0xf2 0x02 0x60 0xf3
+# CHECK: vqsub.u32	q8, q8, q9
+0xf2 0x02 0x70 0xf3
+# CHECK: vqsub.u64	q8, q8, q9
+0xa2 0x06 0xc0 0xf2
+# CHECK: vsubhn.i16	d16, q8, q9
+0xa2 0x06 0xd0 0xf2
+# CHECK: vsubhn.i32	d16, q8, q9
+0xa2 0x06 0xe0 0xf2
+# CHECK: vsubhn.i64	d16, q8, q9
+0xa2 0x06 0xc0 0xf3
+# CHECK: vrsubhn.i16	d16, q8, q9
+0xa2 0x06 0xd0 0xf3
+# CHECK: vrsubhn.i32	d16, q8, q9
+0xa2 0x06 0xe0 0xf3
+# CHECK: vrsubhn.i64	d16, q8, q9
+
+
+
+0xa0 0x08 0xf1 0xf3
+# CHECK: vtbl.8	d16, {d17}, d16
+0xa2 0x09 0xf0 0xf3
+# CHECK: vtbl.8	d16, {d16, d17}, d18
+0xa4 0x0a 0xf0 0xf3
+# CHECK: vtbl.8	d16, {d16, d17, d18}, d20
+0xa4 0x0b 0xf0 0xf3
+# CHECK: vtbl.8	d16, {d16, d17, d18, d19}, d20
+0xe1 0x28 0xf0 0xf3
+# CHECK: vtbx.8	d18, {d16}, d17
+0xe2 0x39 0xf0 0xf3
+# CHECK: vtbx.8	d19, {d16, d17}, d18
+0xe5 0x4a 0xf0 0xf3
+# CHECK: vtbx.8	d20, {d16, d17, d18}, d21
+0xe5 0x4b 0xf0 0xf3
+# CHECK: vtbx.8	d20, {d16, d17, d18, d19}, d21
+
+
+
+0x1f 0x07 0x60 0xf4
+# CHECK: vld1.8	{d16}, [r0, :64]
+0x4f 0x07 0x60 0xf4
+# CHECK: vld1.16	{d16}, [r0]
+0x8f 0x07 0x60 0xf4
+# CHECK: vld1.32	{d16}, [r0]
+0xcf 0x07 0x60 0xf4
+# CHECK: vld1.64	{d16}, [r0]
+0x1f 0x0a 0x60 0xf4
+# CHECK: vld1.8	{d16, d17}, [r0, :64]
+0x6f 0x0a 0x60 0xf4
+# CHECK: vld1.16	{d16, d17}, [r0, :128]
+0x8f 0x0a 0x60 0xf4
+# CHECK: vld1.32	{d16, d17}, [r0]
+0xcf 0x0a 0x60 0xf4
+# CHECK: vld1.64	{d16, d17}, [r0]
+
+0x1f 0x08 0x60 0xf4
+# CHECK: vld2.8	{d16, d17}, [r0, :64]
+0x6f 0x08 0x60 0xf4
+# CHECK: vld2.16	{d16, d17}, [r0, :128]
+0x8f 0x08 0x60 0xf4
+# CHECK: vld2.32	{d16, d17}, [r0]
+0x1f 0x03 0x60 0xf4
+# CHECK: vld2.8	{d16, d17, d18, d19}, [r0, :64]
+0x6f 0x03 0x60 0xf4
+# CHECK: vld2.16	{d16, d17, d18, d19}, [r0, :128]
+0xbf 0x03 0x60 0xf4
+# CHECK: vld2.32	{d16, d17, d18, d19}, [r0, :256]
+
+0x1f 0x04 0x60 0xf4
+# CHECK: vld3.8	{d16, d17, d18}, [r0, :64]
+0x4f 0x04 0x60 0xf4
+# CHECK: vld3.16	{d16, d17, d18}, [r0]
+0x8f 0x04 0x60 0xf4
+# CHECK: vld3.32	{d16, d17, d18}, [r0]
+0x1d 0x05 0x60 0xf4
+# CHECK: vld3.8	{d16, d18, d20}, [r0, :64]!
+0x1d 0x15 0x60 0xf4
+# CHECK: vld3.8	{d17, d19, d21}, [r0, :64]!
+0x4d 0x05 0x60 0xf4
+# CHECK: vld3.16	{d16, d18, d20}, [r0]!
+0x4d 0x15 0x60 0xf4
+# CHECK: vld3.16	{d17, d19, d21}, [r0]!
+0x8d 0x05 0x60 0xf4
+# CHECK: vld3.32	{d16, d18, d20}, [r0]!
+0x8d 0x15 0x60 0xf4
+# CHECK: vld3.32	{d17, d19, d21}, [r0]!
+
+0x1f 0x00 0x60 0xf4
+# CHECK: vld4.8	{d16, d17, d18, d19}, [r0, :64]
+0x6f 0x00 0x60 0xf4
+# CHECK: vld4.16	{d16, d17, d18, d19}, [r0, :128]
+0xbf 0x00 0x60 0xf4
+# CHECK: vld4.32	{d16, d17, d18, d19}, [r0, :256]
+0x3d 0x01 0x60 0xf4
+# CHECK: vld4.8	{d16, d18, d20, d22}, [r0, :256]!
+0x3d 0x11 0x60 0xf4
+# CHECK: vld4.8	{d17, d19, d21, d23}, [r0, :256]!
+0x4d 0x01 0x60 0xf4
+# CHECK: vld4.16	{d16, d18, d20, d22}, [r0]!
+0x4d 0x11 0x60 0xf4
+# CHECK: vld4.16	{d17, d19, d21, d23}, [r0]!
+0x8d 0x01 0x60 0xf4
+# CHECK: vld4.32	{d16, d18, d20, d22}, [r0]!
+0x8d 0x11 0x60 0xf4
+# CHECK: vld4.32	{d17, d19, d21, d23}, [r0]!
+
+0x6f 0x00 0xe0 0xf4
+# CHECK: vld1.8	{d16[3]}, [r0]
+0x9f 0x04 0xe0 0xf4
+# CHECK: vld1.16	{d16[2]}, [r0, :16]
+0xbf 0x08 0xe0 0xf4
+# CHECK: vld1.32	{d16[1]}, [r0, :32]
+
+0x3f 0x01 0xe0 0xf4
+# CHECK: vld2.8	{d16[1], d17[1]}, [r0, :16]
+0x5f 0x05 0xe0 0xf4
+# CHECK: vld2.16	{d16[1], d17[1]}, [r0, :32]
+0x8f 0x09 0xe0 0xf4
+# CHECK: vld2.32	{d16[1], d17[1]}, [r0]
+0x6f 0x15 0xe0 0xf4
+# CHECK: vld2.16	{d17[1], d19[1]}, [r0]
+0x5f 0x19 0xe0 0xf4
+# CHECK: vld2.32	{d17[0], d19[0]}, [r0, :64]
+
+0x2f 0x02 0xe0 0xf4
+# CHECK: vld3.8	{d16[1], d17[1], d18[1]}, [r0]
+0x4f 0x06 0xe0 0xf4
+# CHECK: vld3.16	{d16[1], d17[1], d18[1]}, [r0]
+0x8f 0x0a 0xe0 0xf4
+# CHECK: vld3.32	{d16[1], d17[1], d18[1]}, [r0]
+0x6f 0x06 0xe0 0xf4
+# CHECK: vld3.16	{d16[1], d18[1], d20[1]}, [r0]
+0xcf 0x1a 0xe0 0xf4
+# CHECK: vld3.32	{d17[1], d19[1], d21[1]}, [r0]
+
+0x3f 0x03 0xe0 0xf4
+# CHECK: vld4.8	{d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
+0x4f 0x07 0xe0 0xf4
+# CHECK: vld4.16	{d16[1], d17[1], d18[1], d19[1]}, [r0]
+0xaf 0x0b 0xe0 0xf4
+# CHECK: vld4.32	{d16[1], d17[1], d18[1], d19[1]}, [r0, :128]
+0x7f 0x07 0xe0 0xf4
+# CHECK: vld4.16	{d16[1], d18[1], d20[1], d22[1]}, [r0, :64]
+0x4f 0x1b 0xe0 0xf4
+# CHECK: vld4.32	{d17[0], d19[0], d21[0], d23[0]}, [r0]
+
+
+
+0x1f 0x07 0x40 0xf4
+# CHECK: vst1.8	{d16}, [r0, :64]
+0x4f 0x07 0x40 0xf4
+# CHECK: vst1.16	{d16}, [r0]
+0x8f 0x07 0x40 0xf4
+# CHECK: vst1.32	{d16}, [r0]
+0xcf 0x07 0x40 0xf4
+# CHECK: vst1.64	{d16}, [r0]
+0x1f 0x0a 0x40 0xf4
+# CHECK: vst1.8	{d16, d17}, [r0, :64]
+0x6f 0x0a 0x40 0xf4
+# CHECK: vst1.16	{d16, d17}, [r0, :128]
+0x8f 0x0a 0x40 0xf4
+# CHECK: vst1.32	{d16, d17}, [r0]
+0xcf 0x0a 0x40 0xf4
+# CHECK: vst1.64	{d16, d17}, [r0]
+
+0x1f 0x08 0x40 0xf4
+# CHECK: vst2.8	{d16, d17}, [r0, :64]
+0x6f 0x08 0x40 0xf4
+# CHECK: vst2.16	{d16, d17}, [r0, :128]
+0x8f 0x08 0x40 0xf4
+# CHECK: vst2.32	{d16, d17}, [r0]
+0x1f 0x03 0x40 0xf4
+# CHECK: vst2.8	{d16, d17, d18, d19}, [r0, :64]
+0x6f 0x03 0x40 0xf4
+# CHECK: vst2.16	{d16, d17, d18, d19}, [r0, :128]
+0xbf 0x03 0x40 0xf4
+# CHECK: vst2.32	{d16, d17, d18, d19}, [r0, :256]
+
+0x1f 0x04 0x40 0xf4
+# CHECK: vst3.8	{d16, d17, d18}, [r0, :64]
+0x4f 0x04 0x40 0xf4
+# CHECK: vst3.16	{d16, d17, d18}, [r0]
+0x8f 0x04 0x40 0xf4
+# CHECK: vst3.32	{d16, d17, d18}, [r0]
+0x1d 0x05 0x40 0xf4
+# CHECK: vst3.8	{d16, d18, d20}, [r0, :64]!
+0x1d 0x15 0x40 0xf4
+# CHECK: vst3.8	{d17, d19, d21}, [r0, :64]!
+0x4d 0x05 0x40 0xf4
+# CHECK: vst3.16	{d16, d18, d20}, [r0]!
+0x4d 0x15 0x40 0xf4
+# CHECK: vst3.16	{d17, d19, d21}, [r0]!
+0x8d 0x05 0x40 0xf4
+# CHECK: vst3.32	{d16, d18, d20}, [r0]!
+0x8d 0x15 0x40 0xf4
+# CHECK: vst3.32	{d17, d19, d21}, [r0]!
+
+0x1f 0x00 0x40 0xf4
+# CHECK: vst4.8	{d16, d17, d18, d19}, [r0, :64]
+0x6f 0x00 0x40 0xf4
+# CHECK: vst4.16	{d16, d17, d18, d19}, [r0, :128]
+0x3d 0x01 0x40 0xf4
+# CHECK: vst4.8	{d16, d18, d20, d22}, [r0, :256]!
+0x3d 0x11 0x40 0xf4
+# CHECK: vst4.8	{d17, d19, d21, d23}, [r0, :256]!
+0x4d 0x01 0x40 0xf4
+# CHECK: vst4.16	{d16, d18, d20, d22}, [r0]!
+0x4d 0x11 0x40 0xf4
+# CHECK: vst4.16	{d17, d19, d21, d23}, [r0]!
+0x8d 0x01 0x40 0xf4
+# CHECK: vst4.32	{d16, d18, d20, d22}, [r0]!
+0x8d 0x11 0x40 0xf4
+# CHECK: vst4.32	{d17, d19, d21, d23}, [r0]!
+
+0x3f 0x01 0xc0 0xf4
+# CHECK: vst2.8	{d16[1], d17[1]}, [r0, :16]
+0x5f 0x05 0xc0 0xf4
+# CHECK: vst2.16	{d16[1], d17[1]}, [r0, :32]
+0x8f 0x09 0xc0 0xf4
+# CHECK: vst2.32	{d16[1], d17[1]}, [r0]
+0x6f 0x15 0xc0 0xf4
+# CHECK: vst2.16	{d17[1], d19[1]}, [r0]
+0x5f 0x19 0xc0 0xf4
+# CHECK: vst2.32	{d17[0], d19[0]}, [r0, :64]
+
+0x2f 0x02 0xc0 0xf4
+# CHECK: vst3.8	{d16[1], d17[1], d18[1]}, [r0]
+0x4f 0x06 0xc0 0xf4
+# CHECK: vst3.16	{d16[1], d17[1], d18[1]}, [r0]
+0x8f 0x0a 0xc0 0xf4
+# CHECK: vst3.32	{d16[1], d17[1], d18[1]}, [r0]
+0xaf 0x16 0xc0 0xf4
+# CHECK: vst3.16	{d17[2], d19[2], d21[2]}, [r0]
+0x4f 0x0a 0xc0 0xf4
+# CHECK: vst3.32	{d16[0], d18[0], d20[0]}, [r0]
+
+0x3f 0x03 0xc0 0xf4
+# CHECK: vst4.8	{d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
+0x4f 0x07 0xc0 0xf4
+# CHECK: vst4.16	{d16[1], d17[1], d18[1], d19[1]}, [r0]
+0xaf 0x0b 0xc0 0xf4
+# CHECK: vst4.32	{d16[1], d17[1], d18[1], d19[1]}, [r0, :128]
+0xff 0x17 0xc0 0xf4
+# CHECK: vst4.16	{d17[3], d19[3], d21[3], d23[3]}, [r0, :64]
+0x4f 0x1b 0xc0 0xf4
+# CHECK: vst4.32	{d17[0], d19[0], d21[0], d23[0]}, [r0]





More information about the llvm-commits mailing list