[llvm-branch-commits] [llvm-branch] r83604 - in /llvm/branches/Apple/Leela: lib/Target/ARM/ARMISelDAGToDAG.cpp lib/Target/ARM/ARMInstrNEON.td lib/Target/ARM/NEONPreAllocPass.cpp test/CodeGen/ARM/vldlane.ll test/CodeGen/ARM/vraddhn.ll test/CodeGen/ARM/vrecpe.ll test/CodeGen/ARM/vrecps.ll test/CodeGen/ARM/vrhadd.ll test/CodeGen/ARM/vrshl.ll test/CodeGen/ARM/vrshrn.ll test/CodeGen/ARM/vrsqrte.ll test/CodeGen/ARM/vrsqrts.ll test/CodeGen/ARM/vrsubhn.ll test/CodeGen/ARM/vstlane.ll

Bill Wendling isanbard at gmail.com
Thu Oct 8 17:14:14 PDT 2009


Author: void
Date: Thu Oct  8 19:14:14 2009
New Revision: 83604

URL: http://llvm.org/viewvc/llvm-project?rev=83604&view=rev
Log:
$ svn merge -c 83585 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r83585 into '.':
U    test/CodeGen/ARM/vldlane.ll
U    lib/Target/ARM/ARMInstrNEON.td
U    lib/Target/ARM/NEONPreAllocPass.cpp
U    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 83590 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r83590 into '.':
G    test/CodeGen/ARM/vldlane.ll
G    lib/Target/ARM/ARMInstrNEON.td
G    lib/Target/ARM/NEONPreAllocPass.cpp
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 83595 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r83595 into '.':
U    test/CodeGen/ARM/vrsqrte.ll
U    test/CodeGen/ARM/vrhadd.ll
U    test/CodeGen/ARM/vrsubhn.ll
U    test/CodeGen/ARM/vrecps.ll
U    test/CodeGen/ARM/vraddhn.ll
U    test/CodeGen/ARM/vrshl.ll
U    test/CodeGen/ARM/vrsqrts.ll
U    test/CodeGen/ARM/vrecpe.ll
U    test/CodeGen/ARM/vrshrn.ll
$ svn merge -c 83596 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r83596 into '.':
U    test/CodeGen/ARM/vstlane.ll
G    lib/Target/ARM/ARMInstrNEON.td
G    lib/Target/ARM/NEONPreAllocPass.cpp
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 83598 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r83598 into '.':
G    test/CodeGen/ARM/vstlane.ll
G    lib/Target/ARM/ARMInstrNEON.td
G    lib/Target/ARM/NEONPreAllocPass.cpp
G    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 83600 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r83600 into '.':
G    test/CodeGen/ARM/vstlane.ll
G    lib/Target/ARM/ARMInstrNEON.td
G    lib/Target/ARM/NEONPreAllocPass.cpp
G    lib/Target/ARM/ARMISelDAGToDAG.cpp


Modified:
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td
    llvm/branches/Apple/Leela/lib/Target/ARM/NEONPreAllocPass.cpp
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vldlane.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vraddhn.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecpe.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecps.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrhadd.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshl.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshrn.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrte.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrts.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsubhn.ll
    llvm/branches/Apple/Leela/test/CodeGen/ARM/vstlane.ll

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp Thu Oct  8 19:14:14 2009
@@ -1576,39 +1576,156 @@
       SDValue MemAddr, MemUpdate, MemOpc;
       if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
         return NULL;
+      if (VT.is64BitVector()) {
+        switch (VT.getSimpleVT().SimpleTy) {
+        default: llvm_unreachable("unhandled vld3lane type");
+        case MVT::v8i8:  Opc = ARM::VLD3LNd8; break;
+        case MVT::v4i16: Opc = ARM::VLD3LNd16; break;
+        case MVT::v2f32:
+        case MVT::v2i32: Opc = ARM::VLD3LNd32; break;
+        }
+        SDValue Chain = N->getOperand(0);
+        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
+                                N->getOperand(3), N->getOperand(4),
+                                N->getOperand(5), N->getOperand(6), Chain };
+        return CurDAG->getMachineNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 8);
+      }
+      // Quad registers are handled by extracting subregs, doing the load,
+      // and then inserting the results as subregs.
+      EVT RegVT;
+      unsigned Opc2 = 0;
       switch (VT.getSimpleVT().SimpleTy) {
       default: llvm_unreachable("unhandled vld3lane type");
-      case MVT::v8i8:  Opc = ARM::VLD3LNd8; break;
-      case MVT::v4i16: Opc = ARM::VLD3LNd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VLD3LNd32; break;
+      case MVT::v8i16:
+        Opc = ARM::VLD3LNq16a;
+        Opc2 = ARM::VLD3LNq16b;
+        RegVT = MVT::v4i16;
+        break;
+      case MVT::v4f32:
+        Opc = ARM::VLD3LNq32a;
+        Opc2 = ARM::VLD3LNq32b;
+        RegVT = MVT::v2f32;
+        break;
+      case MVT::v4i32:
+        Opc = ARM::VLD3LNq32a;
+        Opc2 = ARM::VLD3LNq32b;
+        RegVT = MVT::v2i32;
+        break;
       }
       SDValue Chain = N->getOperand(0);
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                              N->getOperand(3), N->getOperand(4),
-                              N->getOperand(5), N->getOperand(6), Chain };
-      return CurDAG->getMachineNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 8);
+      unsigned Lane = cast<ConstantSDNode>(N->getOperand(6))->getZExtValue();
+      unsigned NumElts = RegVT.getVectorNumElements();
+      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
+
+      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(3));
+      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(4));
+      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(5));
+      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2,
+                              getI32Imm(Lane % NumElts), Chain };
+      SDNode *VLdLn = CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
+                                             dl, RegVT, RegVT, RegVT,
+                                             MVT::Other, Ops, 8);
+      SDValue Q0 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(3),
+                                                 SDValue(VLdLn, 0));
+      SDValue Q1 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(4),
+                                                 SDValue(VLdLn, 1));
+      SDValue Q2 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(5),
+                                                 SDValue(VLdLn, 2));
+      Chain = SDValue(VLdLn, 3);
+      ReplaceUses(SDValue(N, 0), Q0);
+      ReplaceUses(SDValue(N, 1), Q1);
+      ReplaceUses(SDValue(N, 2), Q2);
+      ReplaceUses(SDValue(N, 3), Chain);
+      return NULL;
     }
 
     case Intrinsic::arm_neon_vld4lane: {
       SDValue MemAddr, MemUpdate, MemOpc;
       if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
         return NULL;
+      if (VT.is64BitVector()) {
+        switch (VT.getSimpleVT().SimpleTy) {
+        default: llvm_unreachable("unhandled vld4lane type");
+        case MVT::v8i8:  Opc = ARM::VLD4LNd8; break;
+        case MVT::v4i16: Opc = ARM::VLD4LNd16; break;
+        case MVT::v2f32:
+        case MVT::v2i32: Opc = ARM::VLD4LNd32; break;
+        }
+        SDValue Chain = N->getOperand(0);
+        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
+                                N->getOperand(3), N->getOperand(4),
+                                N->getOperand(5), N->getOperand(6),
+                                N->getOperand(7), Chain };
+        std::vector<EVT> ResTys(4, VT);
+        ResTys.push_back(MVT::Other);
+        return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 9);
+      }
+      // Quad registers are handled by extracting subregs, doing the load,
+      // and then inserting the results as subregs.
+      EVT RegVT;
+      unsigned Opc2 = 0;
       switch (VT.getSimpleVT().SimpleTy) {
       default: llvm_unreachable("unhandled vld4lane type");
-      case MVT::v8i8:  Opc = ARM::VLD4LNd8; break;
-      case MVT::v4i16: Opc = ARM::VLD4LNd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VLD4LNd32; break;
+      case MVT::v8i16:
+        Opc = ARM::VLD4LNq16a;
+        Opc2 = ARM::VLD4LNq16b;
+        RegVT = MVT::v4i16;
+        break;
+      case MVT::v4f32:
+        Opc = ARM::VLD4LNq32a;
+        Opc2 = ARM::VLD4LNq32b;
+        RegVT = MVT::v2f32;
+        break;
+      case MVT::v4i32:
+        Opc = ARM::VLD4LNq32a;
+        Opc2 = ARM::VLD4LNq32b;
+        RegVT = MVT::v2i32;
+        break;
       }
       SDValue Chain = N->getOperand(0);
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                              N->getOperand(3), N->getOperand(4),
-                              N->getOperand(5), N->getOperand(6),
-                              N->getOperand(7), Chain };
-      std::vector<EVT> ResTys(4, VT);
+      unsigned Lane = cast<ConstantSDNode>(N->getOperand(7))->getZExtValue();
+      unsigned NumElts = RegVT.getVectorNumElements();
+      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
+
+      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(3));
+      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(4));
+      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(5));
+      SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(6));
+      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3,
+                              getI32Imm(Lane % NumElts), Chain };
+      std::vector<EVT> ResTys(4, RegVT);
       ResTys.push_back(MVT::Other);
-      return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 9);
+      SDNode *VLdLn = CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
+                                             dl, ResTys, Ops, 9);
+      SDValue Q0 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(3),
+                                                 SDValue(VLdLn, 0));
+      SDValue Q1 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(4),
+                                                 SDValue(VLdLn, 1));
+      SDValue Q2 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(5),
+                                                 SDValue(VLdLn, 2));
+      SDValue Q3 = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
+                                                 N->getOperand(6),
+                                                 SDValue(VLdLn, 3));
+      Chain = SDValue(VLdLn, 4);
+      ReplaceUses(SDValue(N, 0), Q0);
+      ReplaceUses(SDValue(N, 1), Q1);
+      ReplaceUses(SDValue(N, 2), Q2);
+      ReplaceUses(SDValue(N, 3), Q3);
+      ReplaceUses(SDValue(N, 4), Chain);
+      return NULL;
     }
 
     case Intrinsic::arm_neon_vst2: {
@@ -1793,55 +1910,175 @@
       SDValue MemAddr, MemUpdate, MemOpc;
       if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
         return NULL;
-      switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
+      VT = N->getOperand(3).getValueType();
+      if (VT.is64BitVector()) {
+        switch (VT.getSimpleVT().SimpleTy) {
+        default: llvm_unreachable("unhandled vst2lane type");
+        case MVT::v8i8:  Opc = ARM::VST2LNd8; break;
+        case MVT::v4i16: Opc = ARM::VST2LNd16; break;
+        case MVT::v2f32:
+        case MVT::v2i32: Opc = ARM::VST2LNd32; break;
+        }
+        SDValue Chain = N->getOperand(0);
+        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
+                                N->getOperand(3), N->getOperand(4),
+                                N->getOperand(5), Chain };
+        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7);
+      }
+      // Quad registers are handled by extracting subregs and then doing
+      // the store.
+      EVT RegVT;
+      unsigned Opc2 = 0;
+      switch (VT.getSimpleVT().SimpleTy) {
       default: llvm_unreachable("unhandled vst2lane type");
-      case MVT::v8i8:  Opc = ARM::VST2LNd8; break;
-      case MVT::v4i16: Opc = ARM::VST2LNd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VST2LNd32; break;
+      case MVT::v8i16:
+        Opc = ARM::VST2LNq16a;
+        Opc2 = ARM::VST2LNq16b;
+        RegVT = MVT::v4i16;
+        break;
+      case MVT::v4f32:
+        Opc = ARM::VST2LNq32a;
+        Opc2 = ARM::VST2LNq32b;
+        RegVT = MVT::v2f32;
+        break;
+      case MVT::v4i32:
+        Opc = ARM::VST2LNq32a;
+        Opc2 = ARM::VST2LNq32b;
+        RegVT = MVT::v2i32;
+        break;
       }
       SDValue Chain = N->getOperand(0);
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                              N->getOperand(3), N->getOperand(4),
-                              N->getOperand(5), Chain };
-      return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7);
+      unsigned Lane = cast<ConstantSDNode>(N->getOperand(5))->getZExtValue();
+      unsigned NumElts = RegVT.getVectorNumElements();
+      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
+
+      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(3));
+      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(4));
+      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1,
+                              getI32Imm(Lane % NumElts), Chain };
+      return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
+                                    dl, MVT::Other, Ops, 7);
     }
 
     case Intrinsic::arm_neon_vst3lane: {
       SDValue MemAddr, MemUpdate, MemOpc;
       if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
         return NULL;
-      switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
+      VT = N->getOperand(3).getValueType();
+      if (VT.is64BitVector()) {
+        switch (VT.getSimpleVT().SimpleTy) {
+        default: llvm_unreachable("unhandled vst3lane type");
+        case MVT::v8i8:  Opc = ARM::VST3LNd8; break;
+        case MVT::v4i16: Opc = ARM::VST3LNd16; break;
+        case MVT::v2f32:
+        case MVT::v2i32: Opc = ARM::VST3LNd32; break;
+        }
+        SDValue Chain = N->getOperand(0);
+        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
+                                N->getOperand(3), N->getOperand(4),
+                                N->getOperand(5), N->getOperand(6), Chain };
+        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 8);
+      }
+      // Quad registers are handled by extracting subregs and then doing
+      // the store.
+      EVT RegVT;
+      unsigned Opc2 = 0;
+      switch (VT.getSimpleVT().SimpleTy) {
       default: llvm_unreachable("unhandled vst3lane type");
-      case MVT::v8i8:  Opc = ARM::VST3LNd8; break;
-      case MVT::v4i16: Opc = ARM::VST3LNd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VST3LNd32; break;
+      case MVT::v8i16:
+        Opc = ARM::VST3LNq16a;
+        Opc2 = ARM::VST3LNq16b;
+        RegVT = MVT::v4i16;
+        break;
+      case MVT::v4f32:
+        Opc = ARM::VST3LNq32a;
+        Opc2 = ARM::VST3LNq32b;
+        RegVT = MVT::v2f32;
+        break;
+      case MVT::v4i32:
+        Opc = ARM::VST3LNq32a;
+        Opc2 = ARM::VST3LNq32b;
+        RegVT = MVT::v2i32;
+        break;
       }
       SDValue Chain = N->getOperand(0);
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                              N->getOperand(3), N->getOperand(4),
-                              N->getOperand(5), N->getOperand(6), Chain };
-      return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 8);
+      unsigned Lane = cast<ConstantSDNode>(N->getOperand(6))->getZExtValue();
+      unsigned NumElts = RegVT.getVectorNumElements();
+      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
+
+      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(3));
+      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(4));
+      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(5));
+      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2,
+                              getI32Imm(Lane % NumElts), Chain };
+      return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
+                                    dl, MVT::Other, Ops, 8);
     }
 
     case Intrinsic::arm_neon_vst4lane: {
       SDValue MemAddr, MemUpdate, MemOpc;
       if (!SelectAddrMode6(Op, N->getOperand(2), MemAddr, MemUpdate, MemOpc))
         return NULL;
-      switch (N->getOperand(3).getValueType().getSimpleVT().SimpleTy) {
+      VT = N->getOperand(3).getValueType();
+      if (VT.is64BitVector()) {
+        switch (VT.getSimpleVT().SimpleTy) {
+        default: llvm_unreachable("unhandled vst4lane type");
+        case MVT::v8i8:  Opc = ARM::VST4LNd8; break;
+        case MVT::v4i16: Opc = ARM::VST4LNd16; break;
+        case MVT::v2f32:
+        case MVT::v2i32: Opc = ARM::VST4LNd32; break;
+        }
+        SDValue Chain = N->getOperand(0);
+        const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
+                                N->getOperand(3), N->getOperand(4),
+                                N->getOperand(5), N->getOperand(6),
+                                N->getOperand(7), Chain };
+        return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9);
+      }
+      // Quad registers are handled by extracting subregs and then doing
+      // the store.
+      EVT RegVT;
+      unsigned Opc2 = 0;
+      switch (VT.getSimpleVT().SimpleTy) {
       default: llvm_unreachable("unhandled vst4lane type");
-      case MVT::v8i8:  Opc = ARM::VST4LNd8; break;
-      case MVT::v4i16: Opc = ARM::VST4LNd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VST4LNd32; break;
+      case MVT::v8i16:
+        Opc = ARM::VST4LNq16a;
+        Opc2 = ARM::VST4LNq16b;
+        RegVT = MVT::v4i16;
+        break;
+      case MVT::v4f32:
+        Opc = ARM::VST4LNq32a;
+        Opc2 = ARM::VST4LNq32b;
+        RegVT = MVT::v2f32;
+        break;
+      case MVT::v4i32:
+        Opc = ARM::VST4LNq32a;
+        Opc2 = ARM::VST4LNq32b;
+        RegVT = MVT::v2i32;
+        break;
       }
       SDValue Chain = N->getOperand(0);
-      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
-                              N->getOperand(3), N->getOperand(4),
-                              N->getOperand(5), N->getOperand(6),
-                              N->getOperand(7), Chain };
-      return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 9);
+      unsigned Lane = cast<ConstantSDNode>(N->getOperand(7))->getZExtValue();
+      unsigned NumElts = RegVT.getVectorNumElements();
+      int SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
+
+      SDValue D0 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(3));
+      SDValue D1 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(4));
+      SDValue D2 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(5));
+      SDValue D3 = CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
+                                                  N->getOperand(6));
+      const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, D0, D1, D2, D3,
+                              getI32Imm(Lane % NumElts), Chain };
+      return CurDAG->getMachineNode((Lane < NumElts) ? Opc : Opc2,
+                                    dl, MVT::Other, Ops, 9);
     }
     }
   }

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td Thu Oct  8 19:14:14 2009
@@ -286,7 +286,7 @@
 def VLD2LNq32b: VLD2LN<0b1001, "vld2.32">;
 
 //   VLD3LN   : Vector Load (single 3-element structure to one lane)
-class VLD3LND<bits<4> op11_8, string OpcodeStr>
+class VLD3LN<bits<4> op11_8, string OpcodeStr>
   : NLdSt<1,0b10,op11_8,0b0000, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
           (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
           nohash_imm:$lane), IIC_VLD3,
@@ -294,12 +294,20 @@
           "\t\\{$dst1[$lane],$dst2[$lane],$dst3[$lane]\\}, $addr"),
           "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3", []>;
 
-def VLD3LNd8  : VLD3LND<0b0010, "vld3.8">;
-def VLD3LNd16 : VLD3LND<0b0110, "vld3.16">;
-def VLD3LNd32 : VLD3LND<0b1010, "vld3.32">;
+def VLD3LNd8  : VLD3LN<0b0010, "vld3.8">;
+def VLD3LNd16 : VLD3LN<0b0110, "vld3.16">;
+def VLD3LNd32 : VLD3LN<0b1010, "vld3.32">;
+
+// vld3 to double-spaced even registers.
+def VLD3LNq16a: VLD3LN<0b0110, "vld3.16">;
+def VLD3LNq32a: VLD3LN<0b1010, "vld3.32">;
+
+// vld3 to double-spaced odd registers.
+def VLD3LNq16b: VLD3LN<0b0110, "vld3.16">;
+def VLD3LNq32b: VLD3LN<0b1010, "vld3.32">;
 
 //   VLD4LN   : Vector Load (single 4-element structure to one lane)
-class VLD4LND<bits<4> op11_8, string OpcodeStr>
+class VLD4LN<bits<4> op11_8, string OpcodeStr>
   : NLdSt<1,0b10,op11_8,0b0000,
           (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
           (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
@@ -308,9 +316,17 @@
           "\t\\{$dst1[$lane],$dst2[$lane],$dst3[$lane],$dst4[$lane]\\}, $addr"),
           "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []>;
 
-def VLD4LNd8  : VLD4LND<0b0011, "vld4.8">;
-def VLD4LNd16 : VLD4LND<0b0111, "vld4.16">;
-def VLD4LNd32 : VLD4LND<0b1011, "vld4.32">;
+def VLD4LNd8  : VLD4LN<0b0011, "vld4.8">;
+def VLD4LNd16 : VLD4LN<0b0111, "vld4.16">;
+def VLD4LNd32 : VLD4LN<0b1011, "vld4.32">;
+
+// vld4 to double-spaced even registers.
+def VLD4LNq16a: VLD4LN<0b0111, "vld4.16">;
+def VLD4LNq32a: VLD4LN<0b1011, "vld4.32">;
+
+// vld4 to double-spaced odd registers.
+def VLD4LNq16b: VLD4LN<0b0111, "vld4.16">;
+def VLD4LNq32b: VLD4LN<0b1011, "vld4.32">;
 
 //   VLD1DUP  : Vector Load (single element to all lanes)
 //   VLD2DUP  : Vector Load (single 2-element structure to all lanes)
@@ -433,31 +449,47 @@
 //   FIXME: Not yet implemented.
 
 //   VST2LN   : Vector Store (single 2-element structure from one lane)
-class VST2LND<bits<4> op11_8, string OpcodeStr>
+class VST2LN<bits<4> op11_8, string OpcodeStr>
   : NLdSt<1,0b00,op11_8,0b0000, (outs),
           (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
           IIC_VST,
           !strconcat(OpcodeStr, "\t\\{$src1[$lane],$src2[$lane]\\}, $addr"),
           "", []>;
 
-def VST2LNd8  : VST2LND<0b0000, "vst2.8">;
-def VST2LNd16 : VST2LND<0b0100, "vst2.16">;
-def VST2LNd32 : VST2LND<0b1000, "vst2.32">;
+def VST2LNd8  : VST2LN<0b0000, "vst2.8">;
+def VST2LNd16 : VST2LN<0b0100, "vst2.16">;
+def VST2LNd32 : VST2LN<0b1000, "vst2.32">;
+
+// vst2 to double-spaced even registers.
+def VST2LNq16a: VST2LN<0b0100, "vst2.16">;
+def VST2LNq32a: VST2LN<0b1000, "vst2.32">;
+
+// vst2 to double-spaced odd registers.
+def VST2LNq16b: VST2LN<0b0100, "vst2.16">;
+def VST2LNq32b: VST2LN<0b1000, "vst2.32">;
 
 //   VST3LN   : Vector Store (single 3-element structure from one lane)
-class VST3LND<bits<4> op11_8, string OpcodeStr>
+class VST3LN<bits<4> op11_8, string OpcodeStr>
   : NLdSt<1,0b00,op11_8,0b0000, (outs),
           (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
            nohash_imm:$lane), IIC_VST,
           !strconcat(OpcodeStr,
           "\t\\{$src1[$lane],$src2[$lane],$src3[$lane]\\}, $addr"), "", []>;
 
-def VST3LNd8  : VST3LND<0b0010, "vst3.8">;
-def VST3LNd16 : VST3LND<0b0110, "vst3.16">;
-def VST3LNd32 : VST3LND<0b1010, "vst3.32">;
+def VST3LNd8  : VST3LN<0b0010, "vst3.8">;
+def VST3LNd16 : VST3LN<0b0110, "vst3.16">;
+def VST3LNd32 : VST3LN<0b1010, "vst3.32">;
+
+// vst3 to double-spaced even registers.
+def VST3LNq16a: VST3LN<0b0110, "vst3.16">;
+def VST3LNq32a: VST3LN<0b1010, "vst3.32">;
+
+// vst3 to double-spaced odd registers.
+def VST3LNq16b: VST3LN<0b0110, "vst3.16">;
+def VST3LNq32b: VST3LN<0b1010, "vst3.32">;
 
 //   VST4LN   : Vector Store (single 4-element structure from one lane)
-class VST4LND<bits<4> op11_8, string OpcodeStr>
+class VST4LN<bits<4> op11_8, string OpcodeStr>
   : NLdSt<1,0b00,op11_8,0b0000, (outs),
           (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
            nohash_imm:$lane), IIC_VST,
@@ -465,9 +497,18 @@
           "\t\\{$src1[$lane],$src2[$lane],$src3[$lane],$src4[$lane]\\}, $addr"),
           "", []>;
 
-def VST4LNd8  : VST4LND<0b0011, "vst4.8">;
-def VST4LNd16 : VST4LND<0b0111, "vst4.16">;
-def VST4LNd32 : VST4LND<0b1011, "vst4.32">;
+def VST4LNd8  : VST4LN<0b0011, "vst4.8">;
+def VST4LNd16 : VST4LN<0b0111, "vst4.16">;
+def VST4LNd32 : VST4LN<0b1011, "vst4.32">;
+
+// vst4 to double-spaced even registers.
+def VST4LNq16a: VST4LN<0b0111, "vst4.16">;
+def VST4LNq32a: VST4LN<0b1011, "vst4.32">;
+
+// vst4 to double-spaced odd registers.
+def VST4LNq16b: VST4LN<0b0111, "vst4.16">;
+def VST4LNq32b: VST4LN<0b1011, "vst4.32">;
+
 } // mayStore = 1, hasExtraSrcRegAllocReq = 1
 
 

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/NEONPreAllocPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/NEONPreAllocPass.cpp?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/NEONPreAllocPass.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/NEONPreAllocPass.cpp Thu Oct  8 19:14:14 2009
@@ -57,6 +57,13 @@
     NumRegs = 2;
     return true;
 
+  case ARM::VLD2q8:
+  case ARM::VLD2q16:
+  case ARM::VLD2q32:
+    FirstOpnd = 0;
+    NumRegs = 4;
+    return true;
+
   case ARM::VLD2LNq16a:
   case ARM::VLD2LNq32a:
     FirstOpnd = 0;
@@ -73,13 +80,6 @@
     Stride = 2;
     return true;
 
-  case ARM::VLD2q8:
-  case ARM::VLD2q16:
-  case ARM::VLD2q32:
-    FirstOpnd = 0;
-    NumRegs = 4;
-    return true;
-
   case ARM::VLD3d8:
   case ARM::VLD3d16:
   case ARM::VLD3d32:
@@ -109,6 +109,22 @@
     Stride = 2;
     return true;
 
+  case ARM::VLD3LNq16a:
+  case ARM::VLD3LNq32a:
+    FirstOpnd = 0;
+    NumRegs = 3;
+    Offset = 0;
+    Stride = 2;
+    return true;
+
+  case ARM::VLD3LNq16b:
+  case ARM::VLD3LNq32b:
+    FirstOpnd = 0;
+    NumRegs = 3;
+    Offset = 1;
+    Stride = 2;
+    return true;
+
   case ARM::VLD4d8:
   case ARM::VLD4d16:
   case ARM::VLD4d32:
@@ -138,6 +154,22 @@
     Stride = 2;
     return true;
 
+  case ARM::VLD4LNq16a:
+  case ARM::VLD4LNq32a:
+    FirstOpnd = 0;
+    NumRegs = 4;
+    Offset = 0;
+    Stride = 2;
+    return true;
+
+  case ARM::VLD4LNq16b:
+  case ARM::VLD4LNq32b:
+    FirstOpnd = 0;
+    NumRegs = 4;
+    Offset = 1;
+    Stride = 2;
+    return true;
+
   case ARM::VST2d8:
   case ARM::VST2d16:
   case ARM::VST2d32:
@@ -156,6 +188,22 @@
     NumRegs = 4;
     return true;
 
+  case ARM::VST2LNq16a:
+  case ARM::VST2LNq32a:
+    FirstOpnd = 3;
+    NumRegs = 2;
+    Offset = 0;
+    Stride = 2;
+    return true;
+
+  case ARM::VST2LNq16b:
+  case ARM::VST2LNq32b:
+    FirstOpnd = 3;
+    NumRegs = 2;
+    Offset = 1;
+    Stride = 2;
+    return true;
+
   case ARM::VST3d8:
   case ARM::VST3d16:
   case ARM::VST3d32:
@@ -185,6 +233,22 @@
     Stride = 2;
     return true;
 
+  case ARM::VST3LNq16a:
+  case ARM::VST3LNq32a:
+    FirstOpnd = 3;
+    NumRegs = 3;
+    Offset = 0;
+    Stride = 2;
+    return true;
+
+  case ARM::VST3LNq16b:
+  case ARM::VST3LNq32b:
+    FirstOpnd = 3;
+    NumRegs = 3;
+    Offset = 1;
+    Stride = 2;
+    return true;
+
   case ARM::VST4d8:
   case ARM::VST4d16:
   case ARM::VST4d32:
@@ -214,6 +278,22 @@
     Stride = 2;
     return true;
 
+  case ARM::VST4LNq16a:
+  case ARM::VST4LNq32a:
+    FirstOpnd = 3;
+    NumRegs = 4;
+    Offset = 0;
+    Stride = 2;
+    return true;
+
+  case ARM::VST4LNq16b:
+  case ARM::VST4LNq32b:
+    FirstOpnd = 3;
+    NumRegs = 4;
+    Offset = 1;
+    Stride = 2;
+    return true;
+
   case ARM::VTBL2:
     FirstOpnd = 1;
     NumRegs = 2;

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vldlane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vldlane.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vldlane.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vldlane.ll Thu Oct  8 19:14:14 2009
@@ -100,6 +100,10 @@
 %struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
 %struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
 
+%struct.__neon_int16x8x3_t = type { <8 x i16>, <8 x i16>, <8 x i16> }
+%struct.__neon_int32x4x3_t = type { <4 x i32>, <4 x i32>, <4 x i32> }
+%struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
+
 define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vld3lanei8:
 ;CHECK: vld3.8
@@ -152,16 +156,63 @@
 	ret <2 x float> %tmp7
 }
 
+define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vld3laneQi16:
+;CHECK: vld3.16
+	%tmp1 = load <8 x i16>* %B
+	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+        %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
+        %tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
+        %tmp5 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 2
+        %tmp6 = add <8 x i16> %tmp3, %tmp4
+        %tmp7 = add <8 x i16> %tmp5, %tmp6
+	ret <8 x i16> %tmp7
+}
+
+define <4 x i32> @vld3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vld3laneQi32:
+;CHECK: vld3.32
+	%tmp1 = load <4 x i32>* %B
+	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 3)
+        %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 0
+        %tmp4 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 1
+        %tmp5 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 2
+        %tmp6 = add <4 x i32> %tmp3, %tmp4
+        %tmp7 = add <4 x i32> %tmp5, %tmp6
+	ret <4 x i32> %tmp7
+}
+
+define <4 x float> @vld3laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vld3laneQf:
+;CHECK: vld3.32
+	%tmp1 = load <4 x float>* %B
+	%tmp2 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+        %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 0
+        %tmp4 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 1
+        %tmp5 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 2
+        %tmp6 = add <4 x float> %tmp3, %tmp4
+        %tmp7 = add <4 x float> %tmp5, %tmp6
+	ret <4 x float> %tmp7
+}
+
 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
 declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
 declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
 declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
 
+declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
+declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly
+
 %struct.__neon_int8x8x4_t = type { <8 x i8>,  <8 x i8>,  <8 x i8>,  <8 x i8> }
 %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
 %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
 %struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
 
+%struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
+%struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
+%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
+
 define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vld4lanei8:
 ;CHECK: vld4.8
@@ -222,7 +273,56 @@
 	ret <2 x float> %tmp9
 }
 
+define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vld4laneQi16:
+;CHECK: vld4.16
+	%tmp1 = load <8 x i16>* %B
+	%tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+        %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 0
+        %tmp4 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 1
+        %tmp5 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 2
+        %tmp6 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 3
+        %tmp7 = add <8 x i16> %tmp3, %tmp4
+        %tmp8 = add <8 x i16> %tmp5, %tmp6
+        %tmp9 = add <8 x i16> %tmp7, %tmp8
+	ret <8 x i16> %tmp9
+}
+
+define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vld4laneQi32:
+;CHECK: vld4.32
+	%tmp1 = load <4 x i32>* %B
+	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
+        %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 0
+        %tmp4 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 1
+        %tmp5 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 2
+        %tmp6 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 3
+        %tmp7 = add <4 x i32> %tmp3, %tmp4
+        %tmp8 = add <4 x i32> %tmp5, %tmp6
+        %tmp9 = add <4 x i32> %tmp7, %tmp8
+	ret <4 x i32> %tmp9
+}
+
+define <4 x float> @vld4laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vld4laneQf:
+;CHECK: vld4.32
+	%tmp1 = load <4 x float>* %B
+	%tmp2 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+        %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 0
+        %tmp4 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 1
+        %tmp5 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 2
+        %tmp6 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 3
+        %tmp7 = add <4 x float> %tmp3, %tmp4
+        %tmp8 = add <4 x float> %tmp5, %tmp6
+        %tmp9 = add <4 x float> %tmp7, %tmp8
+	ret <4 x float> %tmp9
+}
+
 declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
 declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
 declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
 declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
+
+declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
+declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vraddhn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vraddhn.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vraddhn.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vraddhn.ll Thu Oct  8 19:14:14 2009
@@ -1,9 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vraddhn\\.i16} %t | count 1
-; RUN: grep {vraddhn\\.i32} %t | count 1
-; RUN: grep {vraddhn\\.i64} %t | count 1
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <8 x i8> @vraddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vraddhni16:
+;CHECK: vraddhn.i16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -11,6 +10,8 @@
 }
 
 define <4 x i16> @vraddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK: vraddhni32:
+;CHECK: vraddhn.i32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -18,6 +19,8 @@
 }
 
 define <2 x i32> @vraddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK: vraddhni64:
+;CHECK: vraddhn.i64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i64>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecpe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecpe.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecpe.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecpe.ll Thu Oct  8 19:14:14 2009
@@ -1,26 +1,32 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrecpe\\.u32} %t | count 2
-; RUN: grep {vrecpe\\.f32} %t | count 2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <2 x i32> @vrecpei32(<2 x i32>* %A) nounwind {
+;CHECK: vrecpei32:
+;CHECK: vrecpe.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
 
 define <4 x i32> @vrecpeQi32(<4 x i32>* %A) nounwind {
+;CHECK: vrecpeQi32:
+;CHECK: vrecpe.u32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }
 
 define <2 x float> @vrecpef32(<2 x float>* %A) nounwind {
+;CHECK: vrecpef32:
+;CHECK: vrecpe.f32
 	%tmp1 = load <2 x float>* %A
 	%tmp2 = call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp2
 }
 
 define <4 x float> @vrecpeQf32(<4 x float>* %A) nounwind {
+;CHECK: vrecpeQf32:
+;CHECK: vrecpe.f32
 	%tmp1 = load <4 x float>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp2

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecps.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecps.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecps.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrecps.ll Thu Oct  8 19:14:14 2009
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrecps\\.f32} %t | count 2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <2 x float> @vrecpsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK: vrecpsf32:
+;CHECK: vrecps.f32
 	%tmp1 = load <2 x float>* %A
 	%tmp2 = load <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
@@ -9,6 +10,8 @@
 }
 
 define <4 x float> @vrecpsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK: vrecpsQf32:
+;CHECK: vrecps.f32
 	%tmp1 = load <4 x float>* %A
 	%tmp2 = load <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrhadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrhadd.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrhadd.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrhadd.ll Thu Oct  8 19:14:14 2009
@@ -1,12 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrhadd\\.s8} %t | count 2
-; RUN: grep {vrhadd\\.s16} %t | count 2
-; RUN: grep {vrhadd\\.s32} %t | count 2
-; RUN: grep {vrhadd\\.u8} %t | count 2
-; RUN: grep {vrhadd\\.u16} %t | count 2
-; RUN: grep {vrhadd\\.u32} %t | count 2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <8 x i8> @vrhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vrhadds8:
+;CHECK: vrhadd.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -14,6 +10,8 @@
 }
 
 define <4 x i16> @vrhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK: vrhadds16:
+;CHECK: vrhadd.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -21,6 +19,8 @@
 }
 
 define <2 x i32> @vrhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK: vrhadds32:
+;CHECK: vrhadd.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -28,6 +28,8 @@
 }
 
 define <8 x i8> @vrhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vrhaddu8:
+;CHECK: vrhadd.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -35,6 +37,8 @@
 }
 
 define <4 x i16> @vrhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK: vrhaddu16:
+;CHECK: vrhadd.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -42,6 +46,8 @@
 }
 
 define <2 x i32> @vrhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK: vrhaddu32:
+;CHECK: vrhadd.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -49,6 +55,8 @@
 }
 
 define <16 x i8> @vrhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: vrhaddQs8:
+;CHECK: vrhadd.s8
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = load <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
@@ -56,6 +64,8 @@
 }
 
 define <8 x i16> @vrhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vrhaddQs16:
+;CHECK: vrhadd.s16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -63,6 +73,8 @@
 }
 
 define <4 x i32> @vrhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK: vrhaddQs32:
+;CHECK: vrhadd.s32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -70,6 +82,8 @@
 }
 
 define <16 x i8> @vrhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: vrhaddQu8:
+;CHECK: vrhadd.u8
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = load <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
@@ -77,6 +91,8 @@
 }
 
 define <8 x i16> @vrhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vrhaddQu16:
+;CHECK: vrhadd.u16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -84,6 +100,8 @@
 }
 
 define <4 x i32> @vrhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK: vrhaddQu32:
+;CHECK: vrhadd.u32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshl.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshl.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshl.ll Thu Oct  8 19:14:14 2009
@@ -1,22 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrshl\\.s8} %t | count 2
-; RUN: grep {vrshl\\.s16} %t | count 2
-; RUN: grep {vrshl\\.s32} %t | count 2
-; RUN: grep {vrshl\\.s64} %t | count 2
-; RUN: grep {vrshl\\.u8} %t | count 2
-; RUN: grep {vrshl\\.u16} %t | count 2
-; RUN: grep {vrshl\\.u32} %t | count 2
-; RUN: grep {vrshl\\.u64} %t | count 2
-; RUN: grep {vrshr\\.s8} %t | count 2
-; RUN: grep {vrshr\\.s16} %t | count 2
-; RUN: grep {vrshr\\.s32} %t | count 2
-; RUN: grep {vrshr\\.s64} %t | count 2
-; RUN: grep {vrshr\\.u8} %t | count 2
-; RUN: grep {vrshr\\.u16} %t | count 2
-; RUN: grep {vrshr\\.u32} %t | count 2
-; RUN: grep {vrshr\\.u64} %t | count 2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <8 x i8> @vrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vrshls8:
+;CHECK: vrshl.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -24,6 +10,8 @@
 }
 
 define <4 x i16> @vrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK: vrshls16:
+;CHECK: vrshl.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -31,6 +19,8 @@
 }
 
 define <2 x i32> @vrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK: vrshls32:
+;CHECK: vrshl.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -38,6 +28,8 @@
 }
 
 define <1 x i64> @vrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+;CHECK: vrshls64:
+;CHECK: vrshl.s64
 	%tmp1 = load <1 x i64>* %A
 	%tmp2 = load <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
@@ -45,6 +37,8 @@
 }
 
 define <8 x i8> @vrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vrshlu8:
+;CHECK: vrshl.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -52,6 +46,8 @@
 }
 
 define <4 x i16> @vrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+;CHECK: vrshlu16:
+;CHECK: vrshl.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -59,6 +55,8 @@
 }
 
 define <2 x i32> @vrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+;CHECK: vrshlu32:
+;CHECK: vrshl.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -66,6 +64,8 @@
 }
 
 define <1 x i64> @vrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+;CHECK: vrshlu64:
+;CHECK: vrshl.u64
 	%tmp1 = load <1 x i64>* %A
 	%tmp2 = load <1 x i64>* %B
 	%tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
@@ -73,6 +73,8 @@
 }
 
 define <16 x i8> @vrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: vrshlQs8:
+;CHECK: vrshl.s8
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = load <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
@@ -80,6 +82,8 @@
 }
 
 define <8 x i16> @vrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vrshlQs16:
+;CHECK: vrshl.s16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -87,6 +91,8 @@
 }
 
 define <4 x i32> @vrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK: vrshlQs32:
+;CHECK: vrshl.s32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -94,6 +100,8 @@
 }
 
 define <2 x i64> @vrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK: vrshlQs64:
+;CHECK: vrshl.s64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
@@ -101,6 +109,8 @@
 }
 
 define <16 x i8> @vrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: vrshlQu8:
+;CHECK: vrshl.u8
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = load <16 x i8>* %B
 	%tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
@@ -108,6 +118,8 @@
 }
 
 define <8 x i16> @vrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vrshlQu16:
+;CHECK: vrshl.u16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -115,6 +127,8 @@
 }
 
 define <4 x i32> @vrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK: vrshlQu32:
+;CHECK: vrshl.u32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -122,6 +136,8 @@
 }
 
 define <2 x i64> @vrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK: vrshlQu64:
+;CHECK: vrshl.u64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i64>* %B
 	%tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
@@ -129,96 +145,128 @@
 }
 
 define <8 x i8> @vrshrs8(<8 x i8>* %A) nounwind {
+;CHECK: vrshrs8:
+;CHECK: vrshr.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp2
 }
 
 define <4 x i16> @vrshrs16(<4 x i16>* %A) nounwind {
+;CHECK: vrshrs16:
+;CHECK: vrshr.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp2
 }
 
 define <2 x i32> @vrshrs32(<2 x i32>* %A) nounwind {
+;CHECK: vrshrs32:
+;CHECK: vrshr.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp2
 }
 
 define <1 x i64> @vrshrs64(<1 x i64>* %A) nounwind {
+;CHECK: vrshrs64:
+;CHECK: vrshr.s64
 	%tmp1 = load <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp2
 }
 
 define <8 x i8> @vrshru8(<8 x i8>* %A) nounwind {
+;CHECK: vrshru8:
+;CHECK: vrshr.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <8 x i8> %tmp2
 }
 
 define <4 x i16> @vrshru16(<4 x i16>* %A) nounwind {
+;CHECK: vrshru16:
+;CHECK: vrshr.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <4 x i16> %tmp2
 }
 
 define <2 x i32> @vrshru32(<2 x i32>* %A) nounwind {
+;CHECK: vrshru32:
+;CHECK: vrshr.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
 	ret <2 x i32> %tmp2
 }
 
 define <1 x i64> @vrshru64(<1 x i64>* %A) nounwind {
+;CHECK: vrshru64:
+;CHECK: vrshr.u64
 	%tmp1 = load <1 x i64>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
 	ret <1 x i64> %tmp2
 }
 
 define <16 x i8> @vrshrQs8(<16 x i8>* %A) nounwind {
+;CHECK: vrshrQs8:
+;CHECK: vrshr.s8
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp2
 }
 
 define <8 x i16> @vrshrQs16(<8 x i16>* %A) nounwind {
+;CHECK: vrshrQs16:
+;CHECK: vrshr.s16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp2
 }
 
 define <4 x i32> @vrshrQs32(<4 x i32>* %A) nounwind {
+;CHECK: vrshrQs32:
+;CHECK: vrshr.s32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp2
 }
 
 define <2 x i64> @vrshrQs64(<2 x i64>* %A) nounwind {
+;CHECK: vrshrQs64:
+;CHECK: vrshr.s64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp2
 }
 
 define <16 x i8> @vrshrQu8(<16 x i8>* %A) nounwind {
+;CHECK: vrshrQu8:
+;CHECK: vrshr.u8
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
 	ret <16 x i8> %tmp2
 }
 
 define <8 x i16> @vrshrQu16(<8 x i16>* %A) nounwind {
+;CHECK: vrshrQu16:
+;CHECK: vrshr.u16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
 	ret <8 x i16> %tmp2
 }
 
 define <4 x i32> @vrshrQu32(<4 x i32>* %A) nounwind {
+;CHECK: vrshrQu32:
+;CHECK: vrshr.u32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
 	ret <4 x i32> %tmp2
 }
 
 define <2 x i64> @vrshrQu64(<2 x i64>* %A) nounwind {
+;CHECK: vrshrQu64:
+;CHECK: vrshr.u64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
 	ret <2 x i64> %tmp2

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshrn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshrn.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshrn.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrshrn.ll Thu Oct  8 19:14:14 2009
@@ -1,21 +1,24 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrshrn\\.i16} %t | count 1
-; RUN: grep {vrshrn\\.i32} %t | count 1
-; RUN: grep {vrshrn\\.i64} %t | count 1
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <8 x i8> @vrshrns8(<8 x i16>* %A) nounwind {
+;CHECK: vrshrns8:
+;CHECK: vrshrn.i16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
 	ret <8 x i8> %tmp2
 }
 
 define <4 x i16> @vrshrns16(<4 x i32>* %A) nounwind {
+;CHECK: vrshrns16:
+;CHECK: vrshrn.i32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
 	ret <4 x i16> %tmp2
 }
 
 define <2 x i32> @vrshrns32(<2 x i64>* %A) nounwind {
+;CHECK: vrshrns32:
+;CHECK: vrshrn.i64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
 	ret <2 x i32> %tmp2

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrte.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrte.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrte.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrte.ll Thu Oct  8 19:14:14 2009
@@ -1,26 +1,32 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrsqrte\\.u32} %t | count 2
-; RUN: grep {vrsqrte\\.f32} %t | count 2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <2 x i32> @vrsqrtei32(<2 x i32>* %A) nounwind {
+;CHECK: vrsqrtei32:
+;CHECK: vrsqrte.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp2
 }
 
 define <4 x i32> @vrsqrteQi32(<4 x i32>* %A) nounwind {
+;CHECK: vrsqrteQi32:
+;CHECK: vrsqrte.u32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp2
 }
 
 define <2 x float> @vrsqrtef32(<2 x float>* %A) nounwind {
+;CHECK: vrsqrtef32:
+;CHECK: vrsqrte.f32
 	%tmp1 = load <2 x float>* %A
 	%tmp2 = call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp2
 }
 
 define <4 x float> @vrsqrteQf32(<4 x float>* %A) nounwind {
+;CHECK: vrsqrteQf32:
+;CHECK: vrsqrte.f32
 	%tmp1 = load <4 x float>* %A
 	%tmp2 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp2

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrts.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrts.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsqrts.ll Thu Oct  8 19:14:14 2009
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrsqrts\\.f32} %t | count 2
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <2 x float> @vrsqrtsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
+;CHECK: vrsqrtsf32:
+;CHECK: vrsqrts.f32
 	%tmp1 = load <2 x float>* %A
 	%tmp2 = load <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
@@ -9,6 +10,8 @@
 }
 
 define <4 x float> @vrsqrtsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
+;CHECK: vrsqrtsQf32:
+;CHECK: vrsqrts.f32
 	%tmp1 = load <4 x float>* %A
 	%tmp2 = load <4 x float>* %B
 	%tmp3 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsubhn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsubhn.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsubhn.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vrsubhn.ll Thu Oct  8 19:14:14 2009
@@ -1,9 +1,8 @@
-; RUN: llc < %s -march=arm -mattr=+neon > %t
-; RUN: grep {vrsubhn\\.i16} %t | count 1
-; RUN: grep {vrsubhn\\.i32} %t | count 1
-; RUN: grep {vrsubhn\\.i64} %t | count 1
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 
 define <8 x i8> @vrsubhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vrsubhni16:
+;CHECK: vrsubhn.i16
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -11,6 +10,8 @@
 }
 
 define <4 x i16> @vrsubhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+;CHECK: vrsubhni32:
+;CHECK: vrsubhn.i32
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -18,6 +19,8 @@
 }
 
 define <2 x i32> @vrsubhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+;CHECK: vrsubhni64:
+;CHECK: vrsubhn.i64
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i64>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)

Modified: llvm/branches/Apple/Leela/test/CodeGen/ARM/vstlane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/test/CodeGen/ARM/vstlane.ll?rev=83604&r1=83603&r2=83604&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/test/CodeGen/ARM/vstlane.ll (original)
+++ llvm/branches/Apple/Leela/test/CodeGen/ARM/vstlane.ll Thu Oct  8 19:14:14 2009
@@ -32,11 +32,39 @@
 	ret void
 }
 
+define void @vst2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vst2laneQi16:
+;CHECK: vst2.16
+	%tmp1 = load <8 x i16>* %B
+	call void @llvm.arm.neon.vst2lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+	ret void
+}
+
+define void @vst2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vst2laneQi32:
+;CHECK: vst2.32
+	%tmp1 = load <4 x i32>* %B
+	call void @llvm.arm.neon.vst2lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
+	ret void
+}
+
+define void @vst2laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vst2laneQf:
+;CHECK: vst2.32
+	%tmp1 = load <4 x float>* %B
+	call void @llvm.arm.neon.vst2lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, i32 3)
+	ret void
+}
+
 declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind
 declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind
 declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind
 declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind
 
+declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
+
 define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst3lanei8:
 ;CHECK: vst3.8
@@ -69,11 +97,39 @@
 	ret void
 }
 
+define void @vst3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vst3laneQi16:
+;CHECK: vst3.16
+	%tmp1 = load <8 x i16>* %B
+	call void @llvm.arm.neon.vst3lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6)
+	ret void
+}
+
+define void @vst3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vst3laneQi32:
+;CHECK: vst3.32
+	%tmp1 = load <4 x i32>* %B
+	call void @llvm.arm.neon.vst3lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0)
+	ret void
+}
+
+define void @vst3laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vst3laneQf:
+;CHECK: vst3.32
+	%tmp1 = load <4 x float>* %B
+	call void @llvm.arm.neon.vst3lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	ret void
+}
+
 declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
 declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
 declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
 declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
 
+declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind
+
 
 define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst4lanei8:
@@ -107,7 +163,35 @@
 	ret void
 }
 
+define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
+;CHECK: vst4laneQi16:
+;CHECK: vst4.16
+	%tmp1 = load <8 x i16>* %B
+	call void @llvm.arm.neon.vst4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7)
+	ret void
+}
+
+define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
+;CHECK: vst4laneQi32:
+;CHECK: vst4.32
+	%tmp1 = load <4 x i32>* %B
+	call void @llvm.arm.neon.vst4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
+	ret void
+}
+
+define void @vst4laneQf(float* %A, <4 x float>* %B) nounwind {
+;CHECK: vst4laneQf:
+;CHECK: vst4.32
+	%tmp1 = load <4 x float>* %B
+	call void @llvm.arm.neon.vst4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	ret void
+}
+
 declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
 declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
 declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
 declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
+
+declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind





More information about the llvm-branch-commits mailing list