[llvm-commits] [llvm] r146605 - in /llvm/trunk: lib/Target/ARM/ARMInstrNEON.td lib/Target/ARM/AsmParser/ARMAsmParser.cpp test/MC/ARM/neon-vld-encoding.s test/MC/ARM/neon-vst-encoding.s
Jim Grosbach
grosbach at apple.com
Wed Dec 14 15:25:47 PST 2011
Author: grosbach
Date: Wed Dec 14 17:25:46 2011
New Revision: 146605
URL: http://llvm.org/viewvc/llvm-project?rev=146605&view=rev
Log:
ARM NEON VLD2/VST2 lane indexed assembly parsing and encoding.
Modified:
llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
llvm/trunk/test/MC/ARM/neon-vld-encoding.s
llvm/trunk/test/MC/ARM/neon-vst-encoding.s
Modified: llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrNEON.td?rev=146605&r1=146604&r2=146605&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrNEON.td Wed Dec 14 17:25:46 2011
@@ -149,6 +149,16 @@
let ParserMatchClass = VecListOneDByteIndexAsmOperand;
let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
}
+// Register list of one D register, with byte lane subscripting.
+def VecListTwoDByteIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListTwoDByteIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListTwoDByteIndexed : Operand<i32> {
+ let ParserMatchClass = VecListTwoDByteIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
//===----------------------------------------------------------------------===//
// NEON-specific DAG Nodes.
@@ -1866,10 +1876,10 @@
// ...with address register writeback:
class VST2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
- "\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
- "$addr.addr = $wb", []> {
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
+ "\\{$Vd[$lane], $src2[$lane]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVST2LN";
}
@@ -5673,6 +5683,63 @@
(ins VecListOneDByteIndexed:$list, addrmode6:$addr,
rGPR:$Rm, pred:$p)>;
+// VLD2 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+defm VLD2LNdAsm : NEONDT8AsmPseudoInst<"vld2${p}", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VLD2LNdAsm : NEONDT16AsmPseudoInst<"vld2${p}", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VLD2LNdAsm : NEONDT32AsmPseudoInst<"vld2${p}", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+defm VLD2LNdWB_fixed_Asm : NEONDT8AsmPseudoInst<"vld2${p}", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VLD2LNdWB_fixed_Asm : NEONDT16AsmPseudoInst<"vld2${p}", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VLD2LNdWB_fixed_Asm : NEONDT32AsmPseudoInst<"vld2${p}", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VLD2LNdWB_register_Asm :
+ NEONDT8AsmPseudoInst<"vld2${p}", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+defm VLD2LNdWB_register_Asm :
+ NEONDT16AsmPseudoInst<"vld2${p}", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+defm VLD2LNdWB_register_Asm :
+ NEONDT32AsmPseudoInst<"vld2${p}", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VST2 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+defm VST2LNdAsm : NEONDT8AsmPseudoInst<"vst2${p}", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VST2LNdAsm : NEONDT16AsmPseudoInst<"vst2${p}", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VST2LNdAsm : NEONDT32AsmPseudoInst<"vst2${p}", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+defm VST2LNdWB_fixed_Asm : NEONDT8AsmPseudoInst<"vst2${p}", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VST2LNdWB_fixed_Asm : NEONDT16AsmPseudoInst<"vst2${p}", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VST2LNdWB_fixed_Asm : NEONDT32AsmPseudoInst<"vst2${p}", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+defm VST2LNdWB_register_Asm :
+ NEONDT8AsmPseudoInst<"vst2${p}", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+defm VST2LNdWB_register_Asm :
+ NEONDT16AsmPseudoInst<"vst2${p}", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+defm VST2LNdWB_register_Asm :
+ NEONDT32AsmPseudoInst<"vst2${p}", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
// VMOV takes an optional datatype suffix
defm : VFPDTAnyInstAlias<"vmov${p}", "$Vd, $Vm",
(VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
Modified: llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp?rev=146605&r1=146604&r2=146605&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (original)
+++ llvm/trunk/lib/Target/ARM/AsmParser/ARMAsmParser.cpp Wed Dec 14 17:25:46 2011
@@ -1116,6 +1116,11 @@
return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
}
+ bool isVecListTwoDByteIndexed() const {
+ if (Kind != k_VectorListIndexed) return false;
+ return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
+ }
+
bool isVectorIndex8() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < 8;
@@ -4977,108 +4982,162 @@
static unsigned getRealVSTLNOpcode(unsigned Opc) {
switch(Opc) {
default: assert(0 && "unexpected opcode!");
- case ARM::VST1LNdWB_fixed_Asm_8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_fixed_Asm_P8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_fixed_Asm_I8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_fixed_Asm_S8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_fixed_Asm_U8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_fixed_Asm_16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_fixed_Asm_32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_fixed_Asm_F: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_register_Asm_8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_register_Asm_P8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_register_Asm_I8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_register_Asm_S8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_register_Asm_U8: return ARM::VST1LNd8_UPD;
- case ARM::VST1LNdWB_register_Asm_16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD;
- case ARM::VST1LNdWB_register_Asm_32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_register_Asm_F: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD;
- case ARM::VST1LNdAsm_8: return ARM::VST1LNd8;
- case ARM::VST1LNdAsm_P8: return ARM::VST1LNd8;
- case ARM::VST1LNdAsm_I8: return ARM::VST1LNd8;
- case ARM::VST1LNdAsm_S8: return ARM::VST1LNd8;
- case ARM::VST1LNdAsm_U8: return ARM::VST1LNd8;
- case ARM::VST1LNdAsm_16: return ARM::VST1LNd16;
- case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16;
- case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16;
- case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16;
- case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16;
- case ARM::VST1LNdAsm_32: return ARM::VST1LNd32;
- case ARM::VST1LNdAsm_F: return ARM::VST1LNd32;
- case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32;
- case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32;
- case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32;
- case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32;
+ // VST1LN
+ case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
+ case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
+ case ARM::VST1LNdWB_fixed_Asm_U8:
+ return ARM::VST1LNd8_UPD;
+ case ARM::VST1LNdWB_fixed_Asm_16: case ARM::VST1LNdWB_fixed_Asm_P16:
+ case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
+ case ARM::VST1LNdWB_fixed_Asm_U16:
+ return ARM::VST1LNd16_UPD;
+ case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
+ case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
+ case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
+ return ARM::VST1LNd32_UPD;
+ case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
+ case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
+ case ARM::VST1LNdWB_register_Asm_U8:
+ return ARM::VST1LNd8_UPD;
+ case ARM::VST1LNdWB_register_Asm_16: case ARM::VST1LNdWB_register_Asm_P16:
+ case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
+ case ARM::VST1LNdWB_register_Asm_U16:
+ return ARM::VST1LNd16_UPD;
+ case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
+ case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
+ case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
+ return ARM::VST1LNd32_UPD;
+ case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8:
+ case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
+ case ARM::VST1LNdAsm_U8:
+ return ARM::VST1LNd8;
+ case ARM::VST1LNdAsm_16: case ARM::VST1LNdAsm_P16:
+ case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
+ case ARM::VST1LNdAsm_U16:
+ return ARM::VST1LNd16;
+ case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
+ case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
+ case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
+ return ARM::VST1LNd32;
+
+ // VST2LN
+ case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8:
+ case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
+ case ARM::VST2LNdWB_fixed_Asm_U8:
+ return ARM::VST2LNd8_UPD;
+ case ARM::VST2LNdWB_fixed_Asm_16: case ARM::VST2LNdWB_fixed_Asm_P16:
+ case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
+ case ARM::VST2LNdWB_fixed_Asm_U16:
+ return ARM::VST2LNd16_UPD;
+ case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F:
+ case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
+ case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
+ return ARM::VST2LNd32_UPD;
+ case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8:
+ case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
+ case ARM::VST2LNdWB_register_Asm_U8:
+ return ARM::VST2LNd8_UPD;
+ case ARM::VST2LNdWB_register_Asm_16: case ARM::VST2LNdWB_register_Asm_P16:
+ case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
+ case ARM::VST2LNdWB_register_Asm_U16:
+ return ARM::VST2LNd16_UPD;
+ case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F:
+ case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
+ case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
+ return ARM::VST2LNd32_UPD;
+ case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8:
+ case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
+ case ARM::VST2LNdAsm_U8:
+ return ARM::VST2LNd8;
+ case ARM::VST2LNdAsm_16: case ARM::VST2LNdAsm_P16:
+ case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
+ case ARM::VST2LNdAsm_U16:
+ return ARM::VST2LNd16;
+ case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F:
+ case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
+ case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
+ return ARM::VST2LNd32;
}
}
static unsigned getRealVLDLNOpcode(unsigned Opc) {
switch(Opc) {
default: assert(0 && "unexpected opcode!");
- case ARM::VLD1LNdWB_fixed_Asm_8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_P8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_I8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_S8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_U8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_F: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_register_Asm_8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_register_Asm_P8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_register_Asm_I8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_register_Asm_S8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_register_Asm_U8: return ARM::VLD1LNd8_UPD;
- case ARM::VLD1LNdWB_register_Asm_16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD;
- case ARM::VLD1LNdWB_register_Asm_32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_register_Asm_F: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD;
- case ARM::VLD1LNdAsm_8: return ARM::VLD1LNd8;
- case ARM::VLD1LNdAsm_P8: return ARM::VLD1LNd8;
- case ARM::VLD1LNdAsm_I8: return ARM::VLD1LNd8;
- case ARM::VLD1LNdAsm_S8: return ARM::VLD1LNd8;
- case ARM::VLD1LNdAsm_U8: return ARM::VLD1LNd8;
- case ARM::VLD1LNdAsm_16: return ARM::VLD1LNd16;
- case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16;
- case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16;
- case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16;
- case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16;
- case ARM::VLD1LNdAsm_32: return ARM::VLD1LNd32;
- case ARM::VLD1LNdAsm_F: return ARM::VLD1LNd32;
- case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32;
- case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32;
- case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32;
- case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32;
+ // VLD1LN
+ case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
+ case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
+ case ARM::VLD1LNdWB_fixed_Asm_U8:
+ return ARM::VLD1LNd8_UPD;
+ case ARM::VLD1LNdWB_fixed_Asm_16: case ARM::VLD1LNdWB_fixed_Asm_P16:
+ case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
+ case ARM::VLD1LNdWB_fixed_Asm_U16:
+ return ARM::VLD1LNd16_UPD;
+ case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
+ case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
+ case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
+ return ARM::VLD1LNd32_UPD;
+ case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
+ case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
+ case ARM::VLD1LNdWB_register_Asm_U8:
+ return ARM::VLD1LNd8_UPD;
+ case ARM::VLD1LNdWB_register_Asm_16: case ARM::VLD1LNdWB_register_Asm_P16:
+ case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
+ case ARM::VLD1LNdWB_register_Asm_U16:
+ return ARM::VLD1LNd16_UPD;
+ case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
+ case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
+ case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
+ return ARM::VLD1LNd32_UPD;
+ case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8:
+ case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
+ case ARM::VLD1LNdAsm_U8:
+ return ARM::VLD1LNd8;
+ case ARM::VLD1LNdAsm_16: case ARM::VLD1LNdAsm_P16:
+ case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
+ case ARM::VLD1LNdAsm_U16:
+ return ARM::VLD1LNd16;
+ case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F:
+ case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
+ case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
+ return ARM::VLD1LNd32;
+
+ // VLD2LN
+ case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8:
+ case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
+ case ARM::VLD2LNdWB_fixed_Asm_U8:
+ return ARM::VLD2LNd8_UPD;
+ case ARM::VLD2LNdWB_fixed_Asm_16: case ARM::VLD2LNdWB_fixed_Asm_P16:
+ case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
+ case ARM::VLD2LNdWB_fixed_Asm_U16:
+ return ARM::VLD2LNd16_UPD;
+ case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F:
+ case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
+ case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
+ return ARM::VLD2LNd32_UPD;
+ case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8:
+ case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
+ case ARM::VLD2LNdWB_register_Asm_U8:
+ return ARM::VLD2LNd8_UPD;
+ case ARM::VLD2LNdWB_register_Asm_16: case ARM::VLD2LNdWB_register_Asm_P16:
+ case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
+ case ARM::VLD2LNdWB_register_Asm_U16:
+ return ARM::VLD2LNd16_UPD;
+ case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F:
+ case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
+ case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
+ return ARM::VLD2LNd32_UPD;
+ case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8:
+ case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
+ case ARM::VLD2LNdAsm_U8:
+ return ARM::VLD2LNd8;
+ case ARM::VLD2LNdAsm_16: case ARM::VLD2LNdAsm_P16:
+ case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
+ case ARM::VLD2LNdAsm_U16:
+ return ARM::VLD2LNd16;
+ case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F:
+ case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
+ case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
+ return ARM::VLD2LNd32;
}
}
@@ -5086,23 +5145,15 @@
processInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
switch (Inst.getOpcode()) {
- // Handle NEON VST1 complex aliases.
- case ARM::VST1LNdWB_register_Asm_8:
- case ARM::VST1LNdWB_register_Asm_P8:
- case ARM::VST1LNdWB_register_Asm_I8:
- case ARM::VST1LNdWB_register_Asm_S8:
- case ARM::VST1LNdWB_register_Asm_U8:
- case ARM::VST1LNdWB_register_Asm_16:
- case ARM::VST1LNdWB_register_Asm_P16:
- case ARM::VST1LNdWB_register_Asm_I16:
- case ARM::VST1LNdWB_register_Asm_S16:
- case ARM::VST1LNdWB_register_Asm_U16:
- case ARM::VST1LNdWB_register_Asm_32:
- case ARM::VST1LNdWB_register_Asm_F:
- case ARM::VST1LNdWB_register_Asm_F32:
- case ARM::VST1LNdWB_register_Asm_I32:
- case ARM::VST1LNdWB_register_Asm_S32:
- case ARM::VST1LNdWB_register_Asm_U32: {
+ // Handle NEON VST complex aliases.
+ case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
+ case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
+ case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
+ case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
+ case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
+ case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
+ case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
+ case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
@@ -5118,22 +5169,39 @@
Inst = TmpInst;
return true;
}
- case ARM::VST1LNdWB_fixed_Asm_8:
- case ARM::VST1LNdWB_fixed_Asm_P8:
- case ARM::VST1LNdWB_fixed_Asm_I8:
- case ARM::VST1LNdWB_fixed_Asm_S8:
- case ARM::VST1LNdWB_fixed_Asm_U8:
- case ARM::VST1LNdWB_fixed_Asm_16:
- case ARM::VST1LNdWB_fixed_Asm_P16:
- case ARM::VST1LNdWB_fixed_Asm_I16:
- case ARM::VST1LNdWB_fixed_Asm_S16:
- case ARM::VST1LNdWB_fixed_Asm_U16:
- case ARM::VST1LNdWB_fixed_Asm_32:
- case ARM::VST1LNdWB_fixed_Asm_F:
- case ARM::VST1LNdWB_fixed_Asm_F32:
- case ARM::VST1LNdWB_fixed_Asm_I32:
- case ARM::VST1LNdWB_fixed_Asm_S32:
- case ARM::VST1LNdWB_fixed_Asm_U32: {
+
+ case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8:
+ case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
+ case ARM::VST2LNdWB_register_Asm_U8: case ARM::VST2LNdWB_register_Asm_16:
+ case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
+ case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
+ case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F:
+ case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
+ case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
+ case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
+ case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
+ case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
+ case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
+ case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
+ case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
+ case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
@@ -5149,21 +5217,36 @@
Inst = TmpInst;
return true;
}
- case ARM::VST1LNdAsm_8:
- case ARM::VST1LNdAsm_P8:
- case ARM::VST1LNdAsm_I8:
- case ARM::VST1LNdAsm_S8:
- case ARM::VST1LNdAsm_U8:
- case ARM::VST1LNdAsm_16:
- case ARM::VST1LNdAsm_P16:
- case ARM::VST1LNdAsm_I16:
- case ARM::VST1LNdAsm_S16:
- case ARM::VST1LNdAsm_U16:
- case ARM::VST1LNdAsm_32:
- case ARM::VST1LNdAsm_F:
- case ARM::VST1LNdAsm_F32:
- case ARM::VST1LNdAsm_I32:
- case ARM::VST1LNdAsm_S32:
+
+ case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8:
+ case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
+ case ARM::VST2LNdWB_fixed_Asm_U8: case ARM::VST2LNdWB_fixed_Asm_16:
+ case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
+ case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
+ case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F:
+ case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
+ case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
+ case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
+ case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
+ case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
+ case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
case ARM::VST1LNdAsm_U32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
@@ -5178,23 +5261,36 @@
Inst = TmpInst;
return true;
}
- // Handle NEON VLD1 complex aliases.
- case ARM::VLD1LNdWB_register_Asm_8:
- case ARM::VLD1LNdWB_register_Asm_P8:
- case ARM::VLD1LNdWB_register_Asm_I8:
- case ARM::VLD1LNdWB_register_Asm_S8:
- case ARM::VLD1LNdWB_register_Asm_U8:
- case ARM::VLD1LNdWB_register_Asm_16:
- case ARM::VLD1LNdWB_register_Asm_P16:
- case ARM::VLD1LNdWB_register_Asm_I16:
- case ARM::VLD1LNdWB_register_Asm_S16:
- case ARM::VLD1LNdWB_register_Asm_U16:
- case ARM::VLD1LNdWB_register_Asm_32:
- case ARM::VLD1LNdWB_register_Asm_F:
- case ARM::VLD1LNdWB_register_Asm_F32:
- case ARM::VLD1LNdWB_register_Asm_I32:
- case ARM::VLD1LNdWB_register_Asm_S32:
- case ARM::VLD1LNdWB_register_Asm_U32: {
+
+ case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: case ARM::VST2LNdAsm_I8:
+ case ARM::VST2LNdAsm_S8: case ARM::VST2LNdAsm_U8: case ARM::VST2LNdAsm_16:
+ case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
+ case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F:
+ case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
+ case ARM::VST2LNdAsm_U32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+ // Handle NEON VLD complex aliases.
+ case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
+ case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
+ case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
+ case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
+ case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
+ case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
+ case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
+ case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
@@ -5211,22 +5307,42 @@
Inst = TmpInst;
return true;
}
- case ARM::VLD1LNdWB_fixed_Asm_8:
- case ARM::VLD1LNdWB_fixed_Asm_P8:
- case ARM::VLD1LNdWB_fixed_Asm_I8:
- case ARM::VLD1LNdWB_fixed_Asm_S8:
- case ARM::VLD1LNdWB_fixed_Asm_U8:
- case ARM::VLD1LNdWB_fixed_Asm_16:
- case ARM::VLD1LNdWB_fixed_Asm_P16:
- case ARM::VLD1LNdWB_fixed_Asm_I16:
- case ARM::VLD1LNdWB_fixed_Asm_S16:
- case ARM::VLD1LNdWB_fixed_Asm_U16:
- case ARM::VLD1LNdWB_fixed_Asm_32:
- case ARM::VLD1LNdWB_fixed_Asm_F:
- case ARM::VLD1LNdWB_fixed_Asm_F32:
- case ARM::VLD1LNdWB_fixed_Asm_I32:
- case ARM::VLD1LNdWB_fixed_Asm_S32:
- case ARM::VLD1LNdWB_fixed_Asm_U32: {
+
+ case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8:
+ case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
+ case ARM::VLD2LNdWB_register_Asm_U8: case ARM::VLD2LNdWB_register_Asm_16:
+ case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
+ case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
+ case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F:
+ case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
+ case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
+ case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
+ case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
+ case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
+ case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
+ case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
+ case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
+ case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
// right place.
@@ -5243,21 +5359,39 @@
Inst = TmpInst;
return true;
}
- case ARM::VLD1LNdAsm_8:
- case ARM::VLD1LNdAsm_P8:
- case ARM::VLD1LNdAsm_I8:
- case ARM::VLD1LNdAsm_S8:
- case ARM::VLD1LNdAsm_U8:
- case ARM::VLD1LNdAsm_16:
- case ARM::VLD1LNdAsm_P16:
- case ARM::VLD1LNdAsm_I16:
- case ARM::VLD1LNdAsm_S16:
- case ARM::VLD1LNdAsm_U16:
- case ARM::VLD1LNdAsm_32:
- case ARM::VLD1LNdAsm_F:
- case ARM::VLD1LNdAsm_F32:
- case ARM::VLD1LNdAsm_I32:
- case ARM::VLD1LNdAsm_S32:
+
+ case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8:
+ case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
+ case ARM::VLD2LNdWB_fixed_Asm_U8: case ARM::VLD2LNdWB_fixed_Asm_16:
+ case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
+ case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
+ case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F:
+ case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
+ case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: case ARM::VLD1LNdAsm_I8:
+ case ARM::VLD1LNdAsm_S8: case ARM::VLD1LNdAsm_U8: case ARM::VLD1LNdAsm_16:
+ case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
+ case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F:
+ case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
case ARM::VLD1LNdAsm_U32: {
MCInst TmpInst;
// Shuffle the operands around so the lane index operand is in the
@@ -5273,6 +5407,29 @@
Inst = TmpInst;
return true;
}
+
+ case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: case ARM::VLD2LNdAsm_I8:
+ case ARM::VLD2LNdAsm_S8: case ARM::VLD2LNdAsm_U8: case ARM::VLD2LNdAsm_16:
+ case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
+ case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F:
+ case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
+ case ARM::VLD2LNdAsm_U32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
// Handle the Thumb2 mode MOV complex aliases.
case ARM::t2MOVsi:
case ARM::t2MOVSsi: {
Modified: llvm/trunk/test/MC/ARM/neon-vld-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM/neon-vld-encoding.s?rev=146605&r1=146604&r2=146605&view=diff
==============================================================================
--- llvm/trunk/test/MC/ARM/neon-vld-encoding.s (original)
+++ llvm/trunk/test/MC/ARM/neon-vld-encoding.s Wed Dec 14 17:25:46 2011
@@ -229,15 +229,22 @@
@ vld2.8 {d16[1], d17[1]}, [r0, :16]
@ vld2.16 {d16[1], d17[1]}, [r0, :32]
-@ vld2.32 {d16[1], d17[1]}, [r0]
+ vld2.32 {d16[1], d17[1]}, [r0]
@ vld2.16 {d17[1], d19[1]}, [r0]
@ vld2.32 {d17[0], d19[0]}, [r0, :64]
+ vld2.8 {d2[4], d3[4]}, [r2], r3
+ vld2.8 {d2[4], d3[4]}, [r2]!
+ vld2.8 {d2[4], d3[4]}, [r2]
@ FIXME: vld2.8 {d16[1], d17[1]}, [r0, :16] @ encoding: [0x3f,0x01,0xe0,0xf4]
@ FIXME: vld2.16 {d16[1], d17[1]}, [r0, :32] @ encoding: [0x5f,0x05,0xe0,0xf4]
-@ FIXME: vld2.32 {d16[1], d17[1]}, [r0] @ encoding: [0x8f,0x09,0xe0,0xf4]
+@ CHECK: vld2.32 {d16[1], d17[1]}, [r0] @ encoding: [0x8f,0x09,0xe0,0xf4]
@ FIXME: vld2.16 {d17[1], d19[1]}, [r0] @ encoding: [0x6f,0x15,0xe0,0xf4]
@ FIXME: vld2.32 {d17[0], d19[0]}, [r0, :64] @ encoding: [0x5f,0x19,0xe0,0xf4]
+@ CHECK: vld2.8 {d2[4], d3[4]}, [r2], r3 @ encoding: [0x83,0x21,0xa2,0xf4]
+@ CHECK: vld2.8 {d2[4], d3[4]}, [r2]! @ encoding: [0x8d,0x21,0xa2,0xf4]
+@ CHECK: vld2.8 {d2[4], d3[4]}, [r2] @ encoding: [0x8f,0x21,0xa2,0xf4]
+
@ vld3.8 {d16[1], d17[1], d18[1]}, [r0]
Modified: llvm/trunk/test/MC/ARM/neon-vst-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM/neon-vst-encoding.s?rev=146605&r1=146604&r2=146605&view=diff
==============================================================================
--- llvm/trunk/test/MC/ARM/neon-vst-encoding.s (original)
+++ llvm/trunk/test/MC/ARM/neon-vst-encoding.s Wed Dec 14 17:25:46 2011
@@ -100,16 +100,24 @@
@ vst2.8 {d16[1], d17[1]}, [r0, :16]
@ vst2.16 {d16[1], d17[1]}, [r0, :32]
-@ vst2.32 {d16[1], d17[1]}, [r0]
+ vst2.32 {d16[1], d17[1]}, [r0]
@ vst2.16 {d17[1], d19[1]}, [r0]
@ vst2.32 {d17[0], d19[0]}, [r0, :64]
+ vst2.8 {d2[4], d3[4]}, [r2], r3
+ vst2.8 {d2[4], d3[4]}, [r2]!
+ vst2.8 {d2[4], d3[4]}, [r2]
+
@ FIXME: vst2.8 {d16[1], d17[1]}, [r0, :16] @ encoding: [0x3f,0x01,0xc0,0xf4]
@ FIXME: vst2.16 {d16[1], d17[1]}, [r0, :32] @ encoding: [0x5f,0x05,0xc0,0xf4]
-@ FIXME: vst2.32 {d16[1], d17[1]}, [r0] @ encoding: [0x8f,0x09,0xc0,0xf4]
+@ CHECK: vst2.32 {d16[1], d17[1]}, [r0] @ encoding: [0x8f,0x09,0xc0,0xf4]
@ FIXME: vst2.16 {d17[1], d19[1]}, [r0] @ encoding: [0x6f,0x15,0xc0,0xf4]
@ FIXME: vst2.32 {d17[0], d19[0]}, [r0, :64] @ encoding: [0x5f,0x19,0xc0,0xf4]
+@ CHECK: vst2.8 {d2[4], d3[4]}, [r2], r3 @ encoding: [0x83,0x21,0x82,0xf4]
+@ CHECK: vst2.8 {d2[4], d3[4]}, [r2]! @ encoding: [0x8d,0x21,0x82,0xf4]
+@ CHECK: vst2.8 {d2[4], d3[4]}, [r2] @ encoding: [0x8f,0x21,0x82,0xf4]
+
@ vst3.8 {d16[1], d17[1], d18[1]}, [r0]
@ vst3.16 {d16[1], d17[1], d18[1]}, [r0]
More information about the llvm-commits
mailing list