[llvm-branch-commits] [llvm-branch] r84879 - in /llvm/branches/Apple/Leela: lib/Target/ARM/ARMISelDAGToDAG.cpp lib/Target/ARM/ARMISelLowering.cpp lib/Target/ARM/ARMInstrInfo.td lib/Target/ARM/ARMInstrThumb2.td lib/Target/ARM/ARMTargetMachine.cpp test/CodeGen/ARM/ifcvt5.ll test/CodeGen/ARM/sbfx.ll

Bill Wendling isanbard at gmail.com
Thu Oct 22 10:48:30 PDT 2009


Author: void
Date: Thu Oct 22 12:48:30 2009
New Revision: 84879

URL: http://llvm.org/viewvc/llvm-project?rev=84879&view=rev
Log:
$ svn merge -c 84009 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84009 into '.':
A    test/CodeGen/ARM/sbfx.ll
U    lib/Target/ARM/ARMInstrThumb2.td
U    lib/Target/ARM/ARMInstrInfo.td
U    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84017 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84017 into '.':
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84036 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84036 into '.':
U    lib/Target/ARM/ARMISelLowering.cpp
$ svn merge -c 84042 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84042 into '.':
G    lib/Target/ARM/ARMISelLowering.cpp
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84109 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84109 into '.':
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84110 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84110 into '.':
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84117 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84117 into '.':
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84122 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84122 into '.':
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84144 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84144 into '.':
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84218 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84218 into '.':
G    lib/Target/ARM/ARMISelLowering.cpp
$ svn merge -c 84785 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84785 into '.':
G    lib/Target/ARM/ARMISelLowering.cpp
$ svn merge -c 84813 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84813 into '.':
U    test/CodeGen/ARM/sbfx.ll
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 84868 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r84868 into '.':
U    test/CodeGen/ARM/ifcvt5.ll
U    lib/Target/ARM/ARMTargetMachine.cpp


Added:
    llvm/branches/Apple/Leela/test/CodeGen/ARM/sbfx.ll
      - copied, changed from r84009, llvm/trunk/test/CodeGen/ARM/sbfx.ll
Modified:
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrInfo.td
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrThumb2.td
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMTargetMachine.cpp
    llvm/branches/Apple/Leela/test/CodeGen/ARM/ifcvt5.ll

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=84879&r1=84878&r2=84879&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp Thu Oct 22 12:48:30 2009
@@ -126,6 +126,30 @@
   /// SelectDYN_ALLOC - Select dynamic alloc for Thumb.
   SDNode *SelectDYN_ALLOC(SDValue Op);
 
+  /// SelectVLD - Select NEON load intrinsics.  NumVecs should
+  /// be 2, 3 or 4.  The opcode arrays specify the instructions used for
+  /// loads of D registers and even subregs and odd subregs of Q registers.
+  /// For NumVecs == 2, QOpcodes1 is not used.
+  SDNode *SelectVLD(SDValue Op, unsigned NumVecs, unsigned *DOpcodes,
+                    unsigned *QOpcodes0, unsigned *QOpcodes1);
+
+  /// SelectVST - Select NEON store intrinsics.  NumVecs should
+  /// be 2, 3 or 4.  The opcode arrays specify the instructions used for
+  /// stores of D registers and even subregs and odd subregs of Q registers.
+  /// For NumVecs == 2, QOpcodes1 is not used.
+  SDNode *SelectVST(SDValue Op, unsigned NumVecs, unsigned *DOpcodes,
+                    unsigned *QOpcodes0, unsigned *QOpcodes1);
+
+  /// SelectVLDSTLane - Select NEON load/store lane intrinsics.  NumVecs should
+  /// be 2, 3 or 4.  The opcode arrays specify the instructions used for
+  /// load/store of D registers and even subregs and odd subregs of Q registers.
+  SDNode *SelectVLDSTLane(SDValue Op, bool IsLoad, unsigned NumVecs,
+                          unsigned *DOpcodes, unsigned *QOpcodes0,
+                          unsigned *QOpcodes1);
+
+  /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
+  SDNode *SelectV6T2BitfieldExtractOp(SDValue Op, unsigned Opc);
+
   /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
   /// inline asm expressions.
   virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
@@ -138,6 +162,31 @@
 };
 }
 
+/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
+/// operand. If so Imm will receive the 32-bit value.
+static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
+  if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
+    Imm = cast<ConstantSDNode>(N)->getZExtValue();
+    return true;
+  }
+  return false;
+}
+
+// isInt32Immediate - This method tests to see if a constant operand.
+// If so Imm will receive the 32 bit value.
+static bool isInt32Immediate(SDValue N, unsigned &Imm) {
+  return isInt32Immediate(N.getNode(), Imm);
+}
+
+// isOpcWithIntImmediate - This method tests to see if the node is a specific
+// opcode and that it has a immediate integer right operand.
+// If so Imm will receive the 32 bit value.
+static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
+  return N->getOpcode() == Opc &&
+         isInt32Immediate(N->getOperand(1).getNode(), Imm);
+}
+
+
 void ARMDAGToDAGISel::InstructionSelect() {
   DEBUG(BB->dump());
 
@@ -942,6 +991,315 @@
                                 VT, SDValue(Pair, 0), V1, SubReg1);
 }
 
+/// GetNEONSubregVT - Given a type for a 128-bit NEON vector, return the type
+/// for a 64-bit subregister of the vector.
+static EVT GetNEONSubregVT(EVT VT) {
+  switch (VT.getSimpleVT().SimpleTy) {
+  default: llvm_unreachable("unhandled NEON type");
+  case MVT::v16i8: return MVT::v8i8;
+  case MVT::v8i16: return MVT::v4i16;
+  case MVT::v4f32: return MVT::v2f32;
+  case MVT::v4i32: return MVT::v2i32;
+  case MVT::v2i64: return MVT::v1i64;
+  }
+}
+
+SDNode *ARMDAGToDAGISel::SelectVLD(SDValue Op, unsigned NumVecs,
+                                   unsigned *DOpcodes, unsigned *QOpcodes0,
+                                   unsigned *QOpcodes1) {
+  assert(NumVecs >=2 && NumVecs <= 4 && "VLD NumVecs out-of-range");
+  SDNode *N = Op.getNode();
+  DebugLoc dl = N->getDebugLoc();
+
+  SDValue MemAddr, MemUpdate, MemOpc;
+  if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
+    return NULL;
+
+  SDValue Chain = N->getOperand(0);
+  EVT VT = N->getValueType(0);
+  bool is64BitVector = VT.is64BitVector();
+
+  unsigned OpcodeIndex;
+  switch (VT.getSimpleVT().SimpleTy) {
+  default: llvm_unreachable("unhandled vld type");
+    // Double-register operations:
+  case MVT::v8i8:  OpcodeIndex = 0; break;
+  case MVT::v4i16: OpcodeIndex = 1; break;
+  case MVT::v2f32:
+  case MVT::v2i32: OpcodeIndex = 2; break;
+  case MVT::v1i64: OpcodeIndex = 3; break;
+    // Quad-register operations:
+  case MVT::v16i8: OpcodeIndex = 0; break;
+  case MVT::v8i16: OpcodeIndex = 1; break;
+  case MVT::v4f32:
+  case MVT::v4i32: OpcodeIndex = 2; break;
+  }
+
+  if (is64BitVector) {
+    unsigned Opc = DOpcodes[OpcodeIndex];
+    const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
+    std::vector<EVT> ResTys(NumVecs, VT);
+    ResTys.push_back(MVT::Other);
+    return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 4);
+  }
+
+  EVT RegVT = GetNEONSubregVT(VT);
+  if (NumVecs == 2) {
+    // Quad registers are directly supported for VLD2,
+    // loading 2 pairs of D regs.
+    unsigned Opc = QOpcodes0[OpcodeIndex];
+    const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
+    std::vector<EVT> ResTys(4, VT);
+    ResTys.push_back(MVT::Other);
+    SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 4);
+    Chain = SDValue(VLd, 4);
+
+    // Combine the even and odd subregs to produce the result.
+    for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+      SDNode *Q = PairDRegs(VT, SDValue(VLd, 2*Vec), SDValue(VLd, 2*Vec+1));
+      ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
+    }
+  } else {
+    // Otherwise, quad registers are loaded with two separate instructions,
+    // where one loads the even registers and the other loads the odd registers.
+
+    // Enable writeback to the address register.
+    MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
+
+    std::vector<EVT> ResTys(NumVecs, RegVT);
+    ResTys.push_back(MemAddr.getValueType());
+    ResTys.push_back(MVT::Other);
+
+    // Load the even subregs.
+    unsigned Opc = QOpcodes0[OpcodeIndex];
+    const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc, Chain };
+    SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 4);
+    Chain = SDValue(VLdA, NumVecs+1);
+
+    // Load the odd subregs.
+    Opc = QOpcodes1[OpcodeIndex];
+    const SDValue OpsB[] = { SDValue(VLdA, NumVecs), MemUpdate, MemOpc, Chain };
+    SDNode *VLdB = CurDAG->getMachineNode(Opc, dl, ResTys, OpsB, 4);
+    Chain = SDValue(VLdB, NumVecs+1);
+
+    // Combine the even and odd subregs to produce the result.
+    for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+      SDNode *Q = PairDRegs(VT, SDValue(VLdA, Vec), SDValue(VLdB, Vec));
+      ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
+    }
+  }
+  ReplaceUses(SDValue(N, NumVecs), Chain);
+  return NULL;
+}
+
+SDNode *ARMDAGToDAGISel::SelectVST(SDValue Op, unsigned NumVecs,
+                                   unsigned *DOpcodes, unsigned *QOpcodes0,
+                                   unsigned *QOpcodes1) {
+  assert(NumVecs >=2 && NumVecs <= 4 && "VST NumVecs out-of-range");
+  SDNode *N = Op.getNode();
+  DebugLoc dl = N->getDebugLoc();
+
+  SDValue MemAddr, MemUpdate, MemOpc;
+  if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
+    return NULL;
+
+  SDValue Chain = N->getOperand(0);
+  EVT VT = N->getOperand(3).getValueType();
+  bool is64BitVector = VT.is64BitVector();
+
+  unsigned OpcodeIndex;
+  switch (VT.getSimpleVT().SimpleTy) {
+  default: llvm_unreachable("unhandled vst type");
+    // Double-register operations:
+  case MVT::v8i8:  OpcodeIndex = 0; break;
+  case MVT::v4i16: OpcodeIndex = 1; break;
+  case MVT::v2f32:
+  case MVT::v2i32: OpcodeIndex = 2; break;
+  case MVT::v1i64: OpcodeIndex = 3; break;
+    // Quad-register operations:
+  case MVT::v16i8: OpcodeIndex = 0; break;
+  case MVT::v8i16: OpcodeIndex = 1; break;
+  case MVT::v4f32:
+  case MVT::v4i32: OpcodeIndex = 2; break;
+  }
+
+  SmallVector<SDValue, 8> Ops;
+  Ops.push_back(MemAddr);
+  Ops.push_back(MemUpdate);
+  Ops.push_back(MemOpc);
+
+  if (is64BitVector) {
+    unsigned Opc = DOpcodes[OpcodeIndex];
+    for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+      Ops.push_back(N->getOperand(Vec+3));
+    Ops.push_back(Chain);
+    return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+4);
+  }
+
+  EVT RegVT = GetNEONSubregVT(VT);
+  if (NumVecs == 2) {
+    // Quad registers are directly supported for VST2,
+    // storing 2 pairs of D regs.
+    unsigned Opc = QOpcodes0[OpcodeIndex];
+    for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+      Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
+                                                   N->getOperand(Vec+3)));
+      Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
+                                                   N->getOperand(Vec+3)));
+    }
+    Ops.push_back(Chain);
+    return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 8);
+  }
+
+  // Otherwise, quad registers are stored with two separate instructions,
+  // where one stores the even registers and the other stores the odd registers.
+
+  // Enable writeback to the address register.
+  MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
+
+  // Store the even subregs.
+  for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+    Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
+                                                 N->getOperand(Vec+3)));
+  Ops.push_back(Chain);
+  unsigned Opc = QOpcodes0[OpcodeIndex];
+  SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
+                                        MVT::Other, Ops.data(), NumVecs+4);
+  Chain = SDValue(VStA, 1);
+
+  // Store the odd subregs.
+  Ops[0] = SDValue(VStA, 0); // MemAddr
+  for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+    Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
+                                                N->getOperand(Vec+3));
+  Ops[NumVecs+3] = Chain;
+  Opc = QOpcodes1[OpcodeIndex];
+  SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
+                                        MVT::Other, Ops.data(), NumVecs+4);
+  Chain = SDValue(VStB, 1);
+  ReplaceUses(SDValue(N, 0), Chain);
+  return NULL;
+}
+
+SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDValue Op, bool IsLoad,
+                                         unsigned NumVecs, unsigned *DOpcodes,
+                                         unsigned *QOpcodes0,
+                                         unsigned *QOpcodes1) {
+  assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
+  SDNode *N = Op.getNode();
+  DebugLoc dl = N->getDebugLoc();
+
+  SDValue MemAddr, MemUpdate, MemOpc;
+  if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
+    return NULL;
+
+  SDValue Chain = N->getOperand(0);
+  unsigned Lane =
+    cast<ConstantSDNode>(N->getOperand(NumVecs+3))->getZExtValue();
+  EVT VT = IsLoad ? N->getValueType(0) : N->getOperand(3).getValueType();
+  bool is64BitVector = VT.is64BitVector();
+
+  // Quad registers are handled by load/store of subregs. Find the subreg info.
+  unsigned NumElts = 0;
+  int SubregIdx = 0;
+  EVT RegVT = VT;
+  if (!is64BitVector) {
+    RegVT = GetNEONSubregVT(VT);
+    NumElts = RegVT.getVectorNumElements();
+    SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
+  }
+
+  unsigned OpcodeIndex;
+  switch (VT.getSimpleVT().SimpleTy) {
+  default: llvm_unreachable("unhandled vld/vst lane type");
+    // Double-register operations:
+  case MVT::v8i8:  OpcodeIndex = 0; break;
+  case MVT::v4i16: OpcodeIndex = 1; break;
+  case MVT::v2f32:
+  case MVT::v2i32: OpcodeIndex = 2; break;
+    // Quad-register operations:
+  case MVT::v8i16: OpcodeIndex = 0; break;
+  case MVT::v4f32:
+  case MVT::v4i32: OpcodeIndex = 1; break;
+  }
+
+  SmallVector<SDValue, 9> Ops;
+  Ops.push_back(MemAddr);
+  Ops.push_back(MemUpdate);
+  Ops.push_back(MemOpc);
+
+  unsigned Opc = 0;
+  if (is64BitVector) {
+    Opc = DOpcodes[OpcodeIndex];
+    for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+      Ops.push_back(N->getOperand(Vec+3));
+  } else {
+    // Check if this is loading the even or odd subreg of a Q register.
+    if (Lane < NumElts) {
+      Opc = QOpcodes0[OpcodeIndex];
+    } else {
+      Lane -= NumElts;
+      Opc = QOpcodes1[OpcodeIndex];
+    }
+    // Extract the subregs of the input vector.
+    for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+      Ops.push_back(CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                   N->getOperand(Vec+3)));
+  }
+  Ops.push_back(getI32Imm(Lane));
+  Ops.push_back(Chain);
+
+  if (!IsLoad)
+    return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+5);
+
+  std::vector<EVT> ResTys(NumVecs, RegVT);
+  ResTys.push_back(MVT::Other);
+  SDNode *VLdLn =
+    CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), NumVecs+5);
+  // For a 64-bit vector load to D registers, nothing more needs to be done.
+  if (is64BitVector)
+    return VLdLn;
+
+  // For 128-bit vectors, take the 64-bit results of the load and insert them
+  // as subregs into the result.
+  for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+    SDValue QuadVec = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                    N->getOperand(Vec+3),
+                                                    SDValue(VLdLn, Vec));
+    ReplaceUses(SDValue(N, Vec), QuadVec);
+  }
+
+  Chain = SDValue(VLdLn, NumVecs);
+  ReplaceUses(SDValue(N, NumVecs), Chain);
+  return NULL;
+}
+
+SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDValue Op,
+                                                     unsigned Opc) {
+  if (!Subtarget->hasV6T2Ops())
+    return NULL;
+
+  unsigned Shl_imm = 0;
+  if (isOpcWithIntImmediate(Op.getOperand(0).getNode(), ISD::SHL, Shl_imm)){
+    assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
+    unsigned Srl_imm = 0;
+    if (isInt32Immediate(Op.getOperand(1), Srl_imm)) {
+      assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
+      unsigned Width = 32 - Srl_imm;
+      int LSB = Srl_imm - Shl_imm;
+      if (LSB < 0)
+        return NULL;
+      SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
+      SDValue Ops[] = { Op.getOperand(0).getOperand(0),
+                        CurDAG->getTargetConstant(LSB, MVT::i32),
+                        CurDAG->getTargetConstant(Width, MVT::i32),
+                        getAL(CurDAG), Reg0 };
+      return CurDAG->SelectNodeTo(Op.getNode(), Opc, MVT::i32, Ops, 5);
+    }
+  }
+  return NULL;
+}
+
 SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
   SDNode *N = Op.getNode();
   DebugLoc dl = N->getDebugLoc();
@@ -1019,6 +1377,16 @@
   }
   case ARMISD::DYN_ALLOC:
     return SelectDYN_ALLOC(Op);
+  case ISD::SRL:
+    if (SDNode *I = SelectV6T2BitfieldExtractOp(Op,
+                      Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX))
+      return I;
+    break;
+  case ISD::SRA:
+    if (SDNode *I = SelectV6T2BitfieldExtractOp(Op,
+                      Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX))
+      return I;
+    break;
   case ISD::MUL:
     if (Subtarget->isThumb1Only())
       break;
@@ -1340,746 +1708,96 @@
   case ISD::INTRINSIC_VOID:
   case ISD::INTRINSIC_W_CHAIN: {
     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
-    EVT VT = N->getValueType(0);
-    unsigned Opc = 0;
-
     switch (IntNo) {
     default:
       break;
 
     case Intrinsic::arm_neon_vld2: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vld2 type");
-        case MVT::v8i8:  Opc = ARM::VLD2d8; break;
-        case MVT::v4i16: Opc = ARM::VLD2d16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VLD2d32; break;
-        case MVT::v1i64: Opc = ARM::VLD2d64; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
-        return CurDAG->getMachineNode(Opc, dl, VT, VT, MVT::Other, Ops, 4);
-      }
-      // Quad registers are loaded as pairs of double registers.
-      EVT RegVT;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vld2 type");
-      case MVT::v16i8: Opc = ARM::VLD2q8; RegVT = MVT::v8i8; break;
-      case MVT::v8i16: Opc = ARM::VLD2q16; RegVT = MVT::v4i16; break;
-      case MVT::v4f32: Opc = ARM::VLD2q32; RegVT = MVT::v2f32; break;
-      case MVT::v4i32: Opc = ARM::VLD2q32; RegVT = MVT::v2i32; break;
-      }
-      SDValue Chain = N->getOperand(0);
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
-      std::vector<EVT> ResTys(4, RegVT);
-      ResTys.push_back(MVT::Other);
-      SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 4);
-      SDNode *Q0 = PairDRegs(VT, SDValue(VLd, 0), SDValue(VLd, 1));
-      SDNode *Q1 = PairDRegs(VT, SDValue(VLd, 2), SDValue(VLd, 3));
-      ReplaceUses(SDValue(N, 0), SDValue(Q0, 0));
-      ReplaceUses(SDValue(N, 1), SDValue(Q1, 0));
-      ReplaceUses(SDValue(N, 2), SDValue(VLd, 4));
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
+                              ARM::VLD2d32, ARM::VLD2d64 };
+      unsigned QOpcodes[] = { ARM::VLD2q8, ARM::VLD2q16, ARM::VLD2q32 };
+      return SelectVLD(Op, 2, DOpcodes, QOpcodes, 0);
     }
 
     case Intrinsic::arm_neon_vld3: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vld3 type");
-        case MVT::v8i8:  Opc = ARM::VLD3d8; break;
-        case MVT::v4i16: Opc = ARM::VLD3d16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VLD3d32; break;
-        case MVT::v1i64: Opc = ARM::VLD3d64; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
-        return CurDAG->getMachineNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 4);
-      }
-      // Quad registers are loaded with two separate instructions, where one
-      // loads the even registers and the other loads the odd registers.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vld3 type");
-      case MVT::v16i8:
-        Opc = ARM::VLD3q8a;  Opc2 = ARM::VLD3q8b;  RegVT = MVT::v8i8; break;
-      case MVT::v8i16:
-        Opc = ARM::VLD3q16a; Opc2 = ARM::VLD3q16b; RegVT = MVT::v4i16; break;
-      case MVT::v4f32:
-        Opc = ARM::VLD3q32a; Opc2 = ARM::VLD3q32b; RegVT = MVT::v2f32; break;
-      case MVT::v4i32:
-        Opc = ARM::VLD3q32a; Opc2 = ARM::VLD3q32b; RegVT = MVT::v2i32; break;
-      }
-      SDValue Chain = N->getOperand(0);
-      // Enable writeback to the address register.
-      MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
-
-      std::vector<EVT> ResTys(3, RegVT);
-      ResTys.push_back(MemAddr.getValueType());
-      ResTys.push_back(MVT::Other);
-
-      const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc, Chain };
-      SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 4);
-      Chain = SDValue(VLdA, 4);
-
-      const SDValue OpsB[] = { SDValue(VLdA, 3), MemUpdate, MemOpc, Chain };
-      SDNode *VLdB = CurDAG->getMachineNode(Opc2, dl, ResTys, OpsB, 4);
-      Chain = SDValue(VLdB, 4);
-
-      SDNode *Q0 = PairDRegs(VT, SDValue(VLdA, 0), SDValue(VLdB, 0));
-      SDNode *Q1 = PairDRegs(VT, SDValue(VLdA, 1), SDValue(VLdB, 1));
-      SDNode *Q2 = PairDRegs(VT, SDValue(VLdA, 2), SDValue(VLdB, 2));
-      ReplaceUses(SDValue(N, 0), SDValue(Q0, 0));
-      ReplaceUses(SDValue(N, 1), SDValue(Q1, 0));
-      ReplaceUses(SDValue(N, 2), SDValue(Q2, 0));
-      ReplaceUses(SDValue(N, 3), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VLD3d8, ARM::VLD3d16,
+                              ARM::VLD3d32, ARM::VLD3d64 };
+      unsigned QOpcodes0[] = { ARM::VLD3q8a, ARM::VLD3q16a, ARM::VLD3q32a };
+      unsigned QOpcodes1[] = { ARM::VLD3q8b, ARM::VLD3q16b, ARM::VLD3q32b };
+      return SelectVLD(Op, 3, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vld4: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vld4 type");
-        case MVT::v8i8:  Opc = ARM::VLD4d8; break;
-        case MVT::v4i16: Opc = ARM::VLD4d16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VLD4d32; break;
-        case MVT::v1i64: Opc = ARM::VLD4d64; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Chain };
-        std::vector<EVT> ResTys(4, VT);
-        ResTys.push_back(MVT::Other);
-        return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 4);
-      }
-      // Quad registers are loaded with two separate instructions, where one
-      // loads the even registers and the other loads the odd registers.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vld4 type");
-      case MVT::v16i8:
-        Opc = ARM::VLD4q8a;  Opc2 = ARM::VLD4q8b;  RegVT = MVT::v8i8; break;
-      case MVT::v8i16:
-        Opc = ARM::VLD4q16a; Opc2 = ARM::VLD4q16b; RegVT = MVT::v4i16; break;
-      case MVT::v4f32:
-        Opc = ARM::VLD4q32a; Opc2 = ARM::VLD4q32b; RegVT = MVT::v2f32; break;
-      case MVT::v4i32:
-        Opc = ARM::VLD4q32a; Opc2 = ARM::VLD4q32b; RegVT = MVT::v2i32; break;
-      }
-      SDValue Chain = N->getOperand(0);
-      // Enable writeback to the address register.
-      MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
-
-      std::vector<EVT> ResTys(4, RegVT);
-      ResTys.push_back(MemAddr.getValueType());
-      ResTys.push_back(MVT::Other);
-
-      const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc, Chain };
-      SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 4);
-      Chain = SDValue(VLdA, 5);
-
-      const SDValue OpsB[] = { SDValue(VLdA, 4), MemUpdate, MemOpc, Chain };
-      SDNode *VLdB = CurDAG->getMachineNode(Opc2, dl, ResTys, OpsB, 4);
-      Chain = SDValue(VLdB, 5);
-
-      SDNode *Q0 = PairDRegs(VT, SDValue(VLdA, 0), SDValue(VLdB, 0));
-      SDNode *Q1 = PairDRegs(VT, SDValue(VLdA, 1), SDValue(VLdB, 1));
-      SDNode *Q2 = PairDRegs(VT, SDValue(VLdA, 2), SDValue(VLdB, 2));
-      SDNode *Q3 = PairDRegs(VT, SDValue(VLdA, 3), SDValue(VLdB, 3));
-      ReplaceUses(SDValue(N, 0), SDValue(Q0, 0));
-      ReplaceUses(SDValue(N, 1), SDValue(Q1, 0));
-      ReplaceUses(SDValue(N, 2), SDValue(Q2, 0));
-      ReplaceUses(SDValue(N, 3), SDValue(Q3, 0));
-      ReplaceUses(SDValue(N, 4), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VLD4d8, ARM::VLD4d16,
+                              ARM::VLD4d32, ARM::VLD4d64 };
+      unsigned QOpcodes0[] = { ARM::VLD4q8a, ARM::VLD4q16a, ARM::VLD4q32a };
+      unsigned QOpcodes1[] = { ARM::VLD4q8b, ARM::VLD4q16b, ARM::VLD4q32b };
+      return SelectVLD(Op, 4, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vld2lane: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vld2lane type");
-        case MVT::v8i8:  Opc = ARM::VLD2LNd8; break;
-        case MVT::v4i16: Opc = ARM::VLD2LNd16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VLD2LNd32; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), Chain };
-        return CurDAG->getMachineNode(Opc, dl, VT, VT, MVT::Other, Ops, 7);
-      }
-      // Quad registers are handled by extracting subregs, doing the load,
-      // and then inserting the results as subregs.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vld2lane type");
-      case MVT::v8i16:
-        Opc = ARM::VLD2LNq16a;
-        Opc2 = ARM::VLD2LNq16b;
-        RegVT = MVT::v4i16;
-        break;
-      case MVT::v4f32:
-        Opc = ARM::VLD2LNq32a;
-        Opc2 = ARM::VLD2LNq32b;
-        RegVT = MVT::v2f32;
-        break;
-      case MVT::v4i32:
-        Opc = ARM::VLD2LNq32a;
-        Opc2 = ARM::VLD2LNq32b;
-        RegVT = MVT::v2i32;
-        break;
-      }
-      SDValue Chain = N->getOperand(0);
-      unsigned Lane = cast<ConstantSDNode>(N->getOperand(5))->getZExtValue();
-      unsigned NumElts = RegVT.getVectorNumElements();
-      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(4));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1,
-                              getI32Imm(Lane % NumElts), Chain };
-      SDNode *VLdLn = CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
-                                             dl, RegVT, RegVT, MVT::Other,
-                                             Ops, 7);
-      SDValue Q0 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(3),
-                                                 SDValue(VLdLn, 0));
-      SDValue Q1 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(4),
-                                                 SDValue(VLdLn, 1));
-      Chain = SDValue(VLdLn, 2);
-      ReplaceUses(SDValue(N, 0), Q0);
-      ReplaceUses(SDValue(N, 1), Q1);
-      ReplaceUses(SDValue(N, 2), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VLD2LNd8, ARM::VLD2LNd16, ARM::VLD2LNd32 };
+      unsigned QOpcodes0[] = { ARM::VLD2LNq16a, ARM::VLD2LNq32a };
+      unsigned QOpcodes1[] = { ARM::VLD2LNq16b, ARM::VLD2LNq32b };
+      return SelectVLDSTLane(Op, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vld3lane: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vld3lane type");
-        case MVT::v8i8:  Opc = ARM::VLD3LNd8; break;
-        case MVT::v4i16: Opc = ARM::VLD3LNd16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VLD3LNd32; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), N->getOperand(6), Chain };
-        return CurDAG->getMachineNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 8);
-      }
-      // Quad registers are handled by extracting subregs, doing the load,
-      // and then inserting the results as subregs.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vld3lane type");
-      case MVT::v8i16:
-        Opc = ARM::VLD3LNq16a;
-        Opc2 = ARM::VLD3LNq16b;
-        RegVT = MVT::v4i16;
-        break;
-      case MVT::v4f32:
-        Opc = ARM::VLD3LNq32a;
-        Opc2 = ARM::VLD3LNq32b;
-        RegVT = MVT::v2f32;
-        break;
-      case MVT::v4i32:
-        Opc = ARM::VLD3LNq32a;
-        Opc2 = ARM::VLD3LNq32b;
-        RegVT = MVT::v2i32;
-        break;
-      }
-      SDValue Chain = N->getOperand(0);
-      unsigned Lane = cast<ConstantSDNode>(N->getOperand(6))->getZExtValue();
-      unsigned NumElts = RegVT.getVectorNumElements();
-      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(5));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2,
-                              getI32Imm(Lane % NumElts), Chain };
-      SDNode *VLdLn = CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
-                                             dl, RegVT, RegVT, RegVT,
-                                             MVT::Other, Ops, 8);
-      SDValue Q0 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(3),
-                                                 SDValue(VLdLn, 0));
-      SDValue Q1 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(4),
-                                                 SDValue(VLdLn, 1));
-      SDValue Q2 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(5),
-                                                 SDValue(VLdLn, 2));
-      Chain = SDValue(VLdLn, 3);
-      ReplaceUses(SDValue(N, 0), Q0);
-      ReplaceUses(SDValue(N, 1), Q1);
-      ReplaceUses(SDValue(N, 2), Q2);
-      ReplaceUses(SDValue(N, 3), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VLD3LNd8, ARM::VLD3LNd16, ARM::VLD3LNd32 };
+      unsigned QOpcodes0[] = { ARM::VLD3LNq16a, ARM::VLD3LNq32a };
+      unsigned QOpcodes1[] = { ARM::VLD3LNq16b, ARM::VLD3LNq32b };
+      return SelectVLDSTLane(Op, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vld4lane: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vld4lane type");
-        case MVT::v8i8:  Opc = ARM::VLD4LNd8; break;
-        case MVT::v4i16: Opc = ARM::VLD4LNd16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VLD4LNd32; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), N->getOperand(6),
-                                N->getOperand(7), Chain };
-        std::vector<EVT> ResTys(4, VT);
-        ResTys.push_back(MVT::Other);
-        return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 9);
-      }
-      // Quad registers are handled by extracting subregs, doing the load,
-      // and then inserting the results as subregs.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vld4lane type");
-      case MVT::v8i16:
-        Opc = ARM::VLD4LNq16a;
-        Opc2 = ARM::VLD4LNq16b;
-        RegVT = MVT::v4i16;
-        break;
-      case MVT::v4f32:
-        Opc = ARM::VLD4LNq32a;
-        Opc2 = ARM::VLD4LNq32b;
-        RegVT = MVT::v2f32;
-        break;
-      case MVT::v4i32:
-        Opc = ARM::VLD4LNq32a;
-        Opc2 = ARM::VLD4LNq32b;
-        RegVT = MVT::v2i32;
-        break;
-      }
-      SDValue Chain = N->getOperand(0);
-      unsigned Lane = cast<ConstantSDNode>(N->getOperand(7))->getZExtValue();
-      unsigned NumElts = RegVT.getVectorNumElements();
-      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(5));
-      SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(6));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3,
-                              getI32Imm(Lane % NumElts), Chain };
-      std::vector<EVT> ResTys(4, RegVT);
-      ResTys.push_back(MVT::Other);
-      SDNode *VLdLn = CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
-                                             dl, ResTys, Ops, 9);
-      SDValue Q0 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(3),
-                                                 SDValue(VLdLn, 0));
-      SDValue Q1 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(4),
-                                                 SDValue(VLdLn, 1));
-      SDValue Q2 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(5),
-                                                 SDValue(VLdLn, 2));
-      SDValue Q3 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
-                                                 N->getOperand(6),
-                                                 SDValue(VLdLn, 3));
-      Chain = SDValue(VLdLn, 4);
-      ReplaceUses(SDValue(N, 0), Q0);
-      ReplaceUses(SDValue(N, 1), Q1);
-      ReplaceUses(SDValue(N, 2), Q2);
-      ReplaceUses(SDValue(N, 3), Q3);
-      ReplaceUses(SDValue(N, 4), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VLD4LNd8, ARM::VLD4LNd16, ARM::VLD4LNd32 };
+      unsigned QOpcodes0[] = { ARM::VLD4LNq16a, ARM::VLD4LNq32a };
+      unsigned QOpcodes1[] = { ARM::VLD4LNq16b, ARM::VLD4LNq32b };
+      return SelectVLDSTLane(Op, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vst2: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      VT = N->getOperand(3).getValueType();
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vst2 type");
-        case MVT::v8i8:  Opc = ARM::VST2d8; break;
-        case MVT::v4i16: Opc = ARM::VST2d16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VST2d32; break;
-        case MVT::v1i64: Opc = ARM::VST2d64; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4), Chain };
-        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6);
-      }
-      // Quad registers are stored as pairs of double registers.
-      EVT RegVT;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vst2 type");
-      case MVT::v16i8: Opc = ARM::VST2q8; RegVT = MVT::v8i8; break;
-      case MVT::v8i16: Opc = ARM::VST2q16; RegVT = MVT::v4i16; break;
-      case MVT::v4f32: Opc = ARM::VST2q32; RegVT = MVT::v2f32; break;
-      case MVT::v4i32: Opc = ARM::VST2q32; RegVT = MVT::v2i32; break;
-      }
-      SDValue Chain = N->getOperand(0);
-      SDValue D0 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D3 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(4));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                              D0, D1, D2, D3, Chain };
-      return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 8);
+      unsigned DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
+                              ARM::VST2d32, ARM::VST2d64 };
+      unsigned QOpcodes[] = { ARM::VST2q8, ARM::VST2q16, ARM::VST2q32 };
+      return SelectVST(Op, 2, DOpcodes, QOpcodes, 0);
     }
 
     case Intrinsic::arm_neon_vst3: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      VT = N->getOperand(3).getValueType();
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vst3 type");
-        case MVT::v8i8:  Opc = ARM::VST3d8; break;
-        case MVT::v4i16: Opc = ARM::VST3d16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VST3d32; break;
-        case MVT::v1i64: Opc = ARM::VST3d64; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), Chain };
-        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7);
-      }
-      // Quad registers are stored with two separate instructions, where one
-      // stores the even registers and the other stores the odd registers.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vst3 type");
-      case MVT::v16i8:
-        Opc = ARM::VST3q8a;  Opc2 = ARM::VST3q8b;  RegVT = MVT::v8i8; break;
-      case MVT::v8i16:
-        Opc = ARM::VST3q16a; Opc2 = ARM::VST3q16b; RegVT = MVT::v4i16; break;
-      case MVT::v4f32:
-        Opc = ARM::VST3q32a; Opc2 = ARM::VST3q32b; RegVT = MVT::v2f32; break;
-      case MVT::v4i32:
-        Opc = ARM::VST3q32a; Opc2 = ARM::VST3q32b; RegVT = MVT::v2i32; break;
-      }
-      SDValue Chain = N->getOperand(0);
-      // Enable writeback to the address register.
-      MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D4 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(5));
-      const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc, D0, D2, D4, Chain };
-      SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
-                                            MVT::Other, OpsA, 7);
-      Chain = SDValue(VStA, 1);
-
-      SDValue D1 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D3 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D5 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(5));
-      MemAddr = SDValue(VStA, 0);
-      const SDValue OpsB[] = { MemAddr, MemUpdate, MemOpc, D1, D3, D5, Chain };
-      SDNode *VStB = CurDAG->getMachineNode(Opc2, dl, MemAddr.getValueType(),
-                                            MVT::Other, OpsB, 7);
-      Chain = SDValue(VStB, 1);
-      ReplaceUses(SDValue(N, 0), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VST3d8, ARM::VST3d16,
+                              ARM::VST3d32, ARM::VST3d64 };
+      unsigned QOpcodes0[] = { ARM::VST3q8a, ARM::VST3q16a, ARM::VST3q32a };
+      unsigned QOpcodes1[] = { ARM::VST3q8b, ARM::VST3q16b, ARM::VST3q32b };
+      return SelectVST(Op, 3, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vst4: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      VT = N->getOperand(3).getValueType();
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vst4 type");
-        case MVT::v8i8:  Opc = ARM::VST4d8; break;
-        case MVT::v4i16: Opc = ARM::VST4d16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VST4d32; break;
-        case MVT::v1i64: Opc = ARM::VST4d64; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), N->getOperand(6), Chain };
-        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 8);
-      }
-      // Quad registers are stored with two separate instructions, where one
-      // stores the even registers and the other stores the odd registers.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vst4 type");
-      case MVT::v16i8:
-        Opc = ARM::VST4q8a;  Opc2 = ARM::VST4q8b;  RegVT = MVT::v8i8; break;
-      case MVT::v8i16:
-        Opc = ARM::VST4q16a; Opc2 = ARM::VST4q16b; RegVT = MVT::v4i16; break;
-      case MVT::v4f32:
-        Opc = ARM::VST4q32a; Opc2 = ARM::VST4q32b; RegVT = MVT::v2f32; break;
-      case MVT::v4i32:
-        Opc = ARM::VST4q32a; Opc2 = ARM::VST4q32b; RegVT = MVT::v2i32; break;
-      }
-      SDValue Chain = N->getOperand(0);
-      // Enable writeback to the address register.
-      MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D4 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(5));
-      SDValue D6 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
-                                                  N->getOperand(6));
-      const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc,
-                               D0, D2, D4, D6, Chain };
-      SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
-                                            MVT::Other, OpsA, 8);
-      Chain = SDValue(VStA, 1);
-
-      SDValue D1 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D3 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D5 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(5));
-      SDValue D7 = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
-                                                  N->getOperand(6));
-      MemAddr = SDValue(VStA, 0);
-      const SDValue OpsB[] = { MemAddr, MemUpdate, MemOpc,
-                               D1, D3, D5, D7, Chain };
-      SDNode *VStB = CurDAG->getMachineNode(Opc2, dl, MemAddr.getValueType(),
-                                            MVT::Other, OpsB, 8);
-      Chain = SDValue(VStB, 1);
-      ReplaceUses(SDValue(N, 0), Chain);
-      return NULL;
+      unsigned DOpcodes[] = { ARM::VST4d8, ARM::VST4d16,
+                              ARM::VST4d32, ARM::VST4d64 };
+      unsigned QOpcodes0[] = { ARM::VST4q8a, ARM::VST4q16a, ARM::VST4q32a };
+      unsigned QOpcodes1[] = { ARM::VST4q8b, ARM::VST4q16b, ARM::VST4q32b };
+      return SelectVST(Op, 4, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vst2lane: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      VT = N->getOperand(3).getValueType();
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vst2lane type");
-        case MVT::v8i8:  Opc = ARM::VST2LNd8; break;
-        case MVT::v4i16: Opc = ARM::VST2LNd16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VST2LNd32; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), Chain };
-        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7);
-      }
-      // Quad registers are handled by extracting subregs and then doing
-      // the store.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vst2lane type");
-      case MVT::v8i16:
-        Opc = ARM::VST2LNq16a;
-        Opc2 = ARM::VST2LNq16b;
-        RegVT = MVT::v4i16;
-        break;
-      case MVT::v4f32:
-        Opc = ARM::VST2LNq32a;
-        Opc2 = ARM::VST2LNq32b;
-        RegVT = MVT::v2f32;
-        break;
-      case MVT::v4i32:
-        Opc = ARM::VST2LNq32a;
-        Opc2 = ARM::VST2LNq32b;
-        RegVT = MVT::v2i32;
-        break;
-      }
-      SDValue Chain = N->getOperand(0);
-      unsigned Lane = cast<ConstantSDNode>(N->getOperand(5))->getZExtValue();
-      unsigned NumElts = RegVT.getVectorNumElements();
-      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(4));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1,
-                              getI32Imm(Lane % NumElts), Chain };
-      return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
-                                    dl, MVT::Other, Ops, 7);
+      unsigned DOpcodes[] = { ARM::VST2LNd8, ARM::VST2LNd16, ARM::VST2LNd32 };
+      unsigned QOpcodes0[] = { ARM::VST2LNq16a, ARM::VST2LNq32a };
+      unsigned QOpcodes1[] = { ARM::VST2LNq16b, ARM::VST2LNq32b };
+      return SelectVLDSTLane(Op, false, 2, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vst3lane: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      VT = N->getOperand(3).getValueType();
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vst3lane type");
-        case MVT::v8i8:  Opc = ARM::VST3LNd8; break;
-        case MVT::v4i16: Opc = ARM::VST3LNd16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VST3LNd32; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), N->getOperand(6), Chain };
-        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 8);
-      }
-      // Quad registers are handled by extracting subregs and then doing
-      // the store.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vst3lane type");
-      case MVT::v8i16:
-        Opc = ARM::VST3LNq16a;
-        Opc2 = ARM::VST3LNq16b;
-        RegVT = MVT::v4i16;
-        break;
-      case MVT::v4f32:
-        Opc = ARM::VST3LNq32a;
-        Opc2 = ARM::VST3LNq32b;
-        RegVT = MVT::v2f32;
-        break;
-      case MVT::v4i32:
-        Opc = ARM::VST3LNq32a;
-        Opc2 = ARM::VST3LNq32b;
-        RegVT = MVT::v2i32;
-        break;
-      }
-      SDValue Chain = N->getOperand(0);
-      unsigned Lane = cast<ConstantSDNode>(N->getOperand(6))->getZExtValue();
-      unsigned NumElts = RegVT.getVectorNumElements();
-      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(5));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2,
-                              getI32Imm(Lane % NumElts), Chain };
-      return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
-                                    dl, MVT::Other, Ops, 8);
+      unsigned DOpcodes[] = { ARM::VST3LNd8, ARM::VST3LNd16, ARM::VST3LNd32 };
+      unsigned QOpcodes0[] = { ARM::VST3LNq16a, ARM::VST3LNq32a };
+      unsigned QOpcodes1[] = { ARM::VST3LNq16b, ARM::VST3LNq32b };
+      return SelectVLDSTLane(Op, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
     }
 
     case Intrinsic::arm_neon_vst4lane: {
-      SDValue MemAddr, MemUpdate, MemOpc;
-      if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
-        return NULL;
-      VT = N->getOperand(3).getValueType();
-      if (VT.is64BitVector()) {
-        switch (VT.getSimpleVT().SimpleTy) {
-        default: llvm_unreachable("unhandled vst4lane type");
-        case MVT::v8i8:  Opc = ARM::VST4LNd8; break;
-        case MVT::v4i16: Opc = ARM::VST4LNd16; break;
-        case MVT::v2f32:
-        case MVT::v2i32: Opc = ARM::VST4LNd32; break;
-        }
-        SDValue Chain = N->getOperand(0);
-        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                                N->getOperand(3), N->getOperand(4),
-                                N->getOperand(5), N->getOperand(6),
-                                N->getOperand(7), Chain };
-        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9);
-      }
-      // Quad registers are handled by extracting subregs and then doing
-      // the store.
-      EVT RegVT;
-      unsigned Opc2 = 0;
-      switch (VT.getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled vst4lane type");
-      case MVT::v8i16:
-        Opc = ARM::VST4LNq16a;
-        Opc2 = ARM::VST4LNq16b;
-        RegVT = MVT::v4i16;
-        break;
-      case MVT::v4f32:
-        Opc = ARM::VST4LNq32a;
-        Opc2 = ARM::VST4LNq32b;
-        RegVT = MVT::v2f32;
-        break;
-      case MVT::v4i32:
-        Opc = ARM::VST4LNq32a;
-        Opc2 = ARM::VST4LNq32b;
-        RegVT = MVT::v2i32;
-        break;
-      }
-      SDValue Chain = N->getOperand(0);
-      unsigned Lane = cast<ConstantSDNode>(N->getOperand(7))->getZExtValue();
-      unsigned NumElts = RegVT.getVectorNumElements();
-      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
-
-      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(3));
-      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(4));
-      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(5));
-      SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
-                                                  N->getOperand(6));
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3,
-                              getI32Imm(Lane % NumElts), Chain };
-      return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
-                                    dl, MVT::Other, Ops, 9);
+      unsigned DOpcodes[] = { ARM::VST4LNd8, ARM::VST4LNd16, ARM::VST4LNd32 };
+      unsigned QOpcodes0[] = { ARM::VST4LNq16a, ARM::VST4LNq32a };
+      unsigned QOpcodes1[] = { ARM::VST4LNq16b, ARM::VST4LNq32b };
+      return SelectVLDSTLane(Op, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
     }
     }
   }

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp?rev=84879&r1=84878&r2=84879&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp Thu Oct 22 12:48:30 2009
@@ -392,8 +392,6 @@
 
   // We want to custom lower some of our intrinsics.
   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
 
   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
@@ -1369,102 +1367,6 @@
   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
 }
 
-static SDValue LowerNeonVLDIntrinsic(SDValue Op, SelectionDAG &DAG,
-                                     unsigned NumVecs) {
-  SDNode *Node = Op.getNode();
-  EVT VT = Node->getValueType(0);
-
-  // No expansion needed for 64-bit vectors.
-  if (VT.is64BitVector())
-    return SDValue();
-
-  // FIXME: We need to expand VLD3 and VLD4 of 128-bit vectors into separate
-  // operations to load the even and odd registers.
-  return SDValue();
-}
-
-static SDValue LowerNeonVSTIntrinsic(SDValue Op, SelectionDAG &DAG,
-                                     unsigned NumVecs) {
-  SDNode *Node = Op.getNode();
-  EVT VT = Node->getOperand(3).getValueType();
-
-  // No expansion needed for 64-bit vectors.
-  if (VT.is64BitVector())
-    return SDValue();
-
-  // FIXME: We need to expand VST3 and VST4 of 128-bit vectors into separate
-  // operations to store the even and odd registers.
-  return SDValue();
-}
-
-static SDValue LowerNeonVLDLaneIntrinsic(SDValue Op, SelectionDAG &DAG,
-                                         unsigned NumVecs) {
-  SDNode *Node = Op.getNode();
-  EVT VT = Node->getValueType(0);
-
-  if (!VT.is64BitVector())
-    return SDValue(); // unimplemented
-
-  // Change the lane number operand to be a TargetConstant; otherwise it
-  // will be legalized into a register.
-  ConstantSDNode *Lane = dyn_cast<ConstantSDNode>(Node->getOperand(NumVecs+3));
-  if (!Lane) {
-    assert(false && "vld lane number must be a constant");
-    return SDValue();
-  }
-  SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
-  Ops[NumVecs+3] = DAG.getTargetConstant(Lane->getZExtValue(), MVT::i32);
-  return DAG.UpdateNodeOperands(Op, &Ops[0], Ops.size());
-}
-
-static SDValue LowerNeonVSTLaneIntrinsic(SDValue Op, SelectionDAG &DAG,
-                                         unsigned NumVecs) {
-  SDNode *Node = Op.getNode();
-  EVT VT = Node->getOperand(3).getValueType();
-
-  if (!VT.is64BitVector())
-    return SDValue(); // unimplemented
-
-  // Change the lane number operand to be a TargetConstant; otherwise it
-  // will be legalized into a register.
-  ConstantSDNode *Lane = dyn_cast<ConstantSDNode>(Node->getOperand(NumVecs+3));
-  if (!Lane) {
-    assert(false && "vst lane number must be a constant");
-    return SDValue();
-  }
-  SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
-  Ops[NumVecs+3] = DAG.getTargetConstant(Lane->getZExtValue(), MVT::i32);
-  return DAG.UpdateNodeOperands(Op, &Ops[0], Ops.size());
-}
-
-SDValue
-ARMTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) {
-  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-  switch (IntNo) {
-  case Intrinsic::arm_neon_vld3:
-    return LowerNeonVLDIntrinsic(Op, DAG, 3);
-  case Intrinsic::arm_neon_vld4:
-    return LowerNeonVLDIntrinsic(Op, DAG, 4);
-  case Intrinsic::arm_neon_vld2lane:
-    return LowerNeonVLDLaneIntrinsic(Op, DAG, 2);
-  case Intrinsic::arm_neon_vld3lane:
-    return LowerNeonVLDLaneIntrinsic(Op, DAG, 3);
-  case Intrinsic::arm_neon_vld4lane:
-    return LowerNeonVLDLaneIntrinsic(Op, DAG, 4);
-  case Intrinsic::arm_neon_vst3:
-    return LowerNeonVSTIntrinsic(Op, DAG, 3);
-  case Intrinsic::arm_neon_vst4:
-    return LowerNeonVSTIntrinsic(Op, DAG, 4);
-  case Intrinsic::arm_neon_vst2lane:
-    return LowerNeonVSTLaneIntrinsic(Op, DAG, 2);
-  case Intrinsic::arm_neon_vst3lane:
-    return LowerNeonVSTLaneIntrinsic(Op, DAG, 3);
-  case Intrinsic::arm_neon_vst4lane:
-    return LowerNeonVSTLaneIntrinsic(Op, DAG, 4);
-  default: return SDValue();    // Don't custom lower most intrinsics.
-  }
-}
-
 SDValue
 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -2458,8 +2360,11 @@
   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
          "Only possible block sizes for VREV are: 16, 32, 64");
 
-  unsigned NumElts = VT.getVectorNumElements();
   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+  if (EltSz == 64)
+    return false;
+
+  unsigned NumElts = VT.getVectorNumElements();
   unsigned BlockElts = M[0] + 1;
 
   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
@@ -2476,6 +2381,10 @@
 
 static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
                        unsigned &WhichResult) {
+  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+  if (EltSz == 64)
+    return false;
+
   unsigned NumElts = VT.getVectorNumElements();
   WhichResult = (M[0] == 0 ? 0 : 1);
   for (unsigned i = 0; i < NumElts; i += 2) {
@@ -2488,6 +2397,10 @@
 
 static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
                        unsigned &WhichResult) {
+  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+  if (EltSz == 64)
+    return false;
+
   unsigned NumElts = VT.getVectorNumElements();
   WhichResult = (M[0] == 0 ? 0 : 1);
   for (unsigned i = 0; i != NumElts; ++i) {
@@ -2496,7 +2409,7 @@
   }
 
   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
-  if (VT.is64BitVector() && VT.getVectorElementType().getSizeInBits() == 32)
+  if (VT.is64BitVector() && EltSz == 32)
     return false;
 
   return true;
@@ -2504,6 +2417,10 @@
 
 static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
                        unsigned &WhichResult) {
+  unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+  if (EltSz == 64)
+    return false;
+
   unsigned NumElts = VT.getVectorNumElements();
   WhichResult = (M[0] == 0 ? 0 : 1);
   unsigned Idx = WhichResult * NumElts / 2;
@@ -2515,7 +2432,7 @@
   }
 
   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
-  if (VT.is64BitVector() && VT.getVectorElementType().getSizeInBits() == 32)
+  if (VT.is64BitVector() && EltSz == 32)
     return false;
 
   return true;
@@ -2793,18 +2710,10 @@
   DebugLoc dl = Op.getDebugLoc();
   SDValue Vec = Op.getOperand(0);
   SDValue Lane = Op.getOperand(1);
-
-  // FIXME: This is invalid for 8 and 16-bit elements - the information about
-  // sign / zero extension is lost!
-  Op = DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
-  Op = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Op, DAG.getValueType(VT));
-
-  if (VT.bitsLT(MVT::i32))
-    Op = DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
-  else if (VT.bitsGT(MVT::i32))
-    Op = DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op);
-
-  return Op;
+  assert(VT == MVT::i32 &&
+         Vec.getValueType().getVectorElementType().getSizeInBits() < 32 &&
+         "unexpected type for custom-lowering vector extract");
+  return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
 }
 
 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
@@ -2848,8 +2757,6 @@
   case ISD::RETURNADDR:    break;
   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
   case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
-  case ISD::INTRINSIC_VOID:
-  case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
   case ISD::BIT_CONVERT:   return ExpandBIT_CONVERT(Op.getNode(), DAG);
   case ISD::SHL:

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrInfo.td?rev=84879&r1=84878&r2=84879&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrInfo.td Thu Oct 22 12:48:30 2009
@@ -284,6 +284,10 @@
   return CurDAG->getTargetConstant(V, MVT::i32);
 }]>;
 
+/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
+def imm0_31 : Operand<i32>, PatLeaf<(imm), [{
+  return (int32_t)N->getZExtValue() < 32;
+}]>;
 
 // Define ARM specific addressing modes.
 
@@ -1009,6 +1013,24 @@
 
 // TODO: UXT(A){B|H}16
 
+def SBFX  : I<(outs GPR:$dst),
+              (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
+               AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iALUi,
+               "sbfx", " $dst, $src, $lsb, $width", "", []>,
+               Requires<[IsARM, HasV6T2]> {
+  let Inst{27-21} = 0b0111101;
+  let Inst{6-4}   = 0b101;
+}
+
+def UBFX  : I<(outs GPR:$dst),
+              (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
+               AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iALUi,
+               "ubfx", " $dst, $src, $lsb, $width", "", []>,
+               Requires<[IsARM, HasV6T2]> {
+  let Inst{27-21} = 0b0111111;
+  let Inst{6-4}   = 0b101;
+}
+
 //===----------------------------------------------------------------------===//
 //  Arithmetic Instructions.
 //

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrThumb2.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrThumb2.td?rev=84879&r1=84878&r2=84879&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrThumb2.td (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrThumb2.td Thu Oct 22 12:48:30 2009
@@ -786,6 +786,12 @@
                 IIC_iALUi, "bfc", " $dst, $imm",
                 [(set GPR:$dst, (and GPR:$src, bf_inv_mask_imm:$imm))]>;
 
+def t2SBFX : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
+                 IIC_iALUi, "sbfx", " $dst, $src, $lsb, $width", []>;
+
+def t2UBFX : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
+                 IIC_iALUi, "ubfx", " $dst, $src, $lsb, $width", []>;
+
 // FIXME: A8.6.18  BFI - Bitfield insert (Encoding T1)
 
 defm t2ORN  : T2I_bin_irs<"orn", BinOpFrag<(or  node:$LHS, (not node:$RHS))>>;

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMTargetMachine.cpp?rev=84879&r1=84878&r2=84879&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMTargetMachine.cpp Thu Oct 22 12:48:30 2009
@@ -104,16 +104,18 @@
 bool ARMBaseTargetMachine::addPreSched2(PassManagerBase &PM,
                                         CodeGenOpt::Level OptLevel) {
   // FIXME: temporarily disabling load / store optimization pass for Thumb1.
-  if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only()) {
+  if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
     PM.add(createARMLoadStoreOptimizationPass());
-    PM.add(createIfConverterPass());
-  }
 
   return true;
 }
 
 bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
                                           CodeGenOpt::Level OptLevel) {
+  // FIXME: temporarily disabling load / store optimization pass for Thumb1.
+  if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
+    PM.add(createIfConverterPass());
+
   if (Subtarget.isThumb2()) {
     PM.add(createThumb2ITBlockPass());
     PM.add(createThumb2SizeReductionPass());

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/ifcvt5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/ifcvt5.ll?rev=84879&r1=84878&r2=84879&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/ifcvt5.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/ifcvt5.ll Thu Oct 22 12:48:30 2009
@@ -11,8 +11,7 @@
 
 define void @t1(i32 %a, i32 %b) {
 ; CHECK: t1:
-; CHECK: movge
-; CHECK: blge _foo
+; CHECK: ldmltfd sp!, {r7, pc}
 entry:
 	%tmp1 = icmp sgt i32 %a, 10		; <i1> [#uses=1]
 	br i1 %tmp1, label %cond_true, label %UnifiedReturnBlock

Copied: llvm/branches/Apple/Leela/test/CodeGen/ARM/sbfx.ll (from r84009, llvm/trunk/test/CodeGen/ARM/sbfx.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/sbfx.ll?p2=llvm/branches/Apple/Leela/test/CodeGen/ARM/sbfx.ll&p1=llvm/trunk/test/CodeGen/ARM/sbfx.ll&r1=84009&r2=84879&rev=84879&view=diff

==============================================================================
--- llvm/trunk/test/CodeGen/ARM/sbfx.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/sbfx.ll Thu Oct 22 12:48:30 2009
@@ -35,3 +35,13 @@
     %tmp2 = lshr i32 %tmp, 29
     ret i32 %tmp2
 }
+
+define i32 @f5(i32 %a) {
+entry:
+; CHECK: f5:
+; CHECK-NOT: sbfx
+; CHECK: bx
+    %tmp = shl i32 %a, 3
+    %tmp2 = ashr i32 %tmp, 1
+    ret i32 %tmp2
+}





More information about the llvm-branch-commits mailing list