[llvm-branch-commits] [llvm-branch] r78998 - in /llvm/branches/Apple/Leela/lib/Target/ARM: ARMISelDAGToDAG.cpp ARMISelLowering.cpp ARMISelLowering.h ARMInstrNEON.td

Bill Wendling isanbard at gmail.com
Fri Aug 14 00:45:45 PDT 2009


Author: void
Date: Fri Aug 14 02:45:45 2009
New Revision: 78998

URL: http://llvm.org/viewvc/llvm-project?rev=78998&view=rev
Log:
$ svn merge -c 78993 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r78993 into '.':
U    lib/Target/ARM/ARMInstrNEON.td
U    lib/Target/ARM/ARMISelLowering.h
U    lib/Target/ARM/ARMISelLowering.cpp
U    lib/Target/ARM/ARMISelDAGToDAG.cpp
$ svn merge -c 78994 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r78994 into '.':
G    lib/Target/ARM/ARMInstrNEON.td
G    lib/Target/ARM/ARMISelLowering.h
G    lib/Target/ARM/ARMISelLowering.cpp
$ svn merge -c 78995 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r78995 into '.':
G    lib/Target/ARM/ARMISelLowering.cpp


Modified:
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.h
    llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=78998&r1=78997&r2=78998&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelDAGToDAG.cpp Fri Aug 14 02:45:45 2009
@@ -36,9 +36,6 @@
 
 using namespace llvm;
 
-static const unsigned arm_dsubreg_0 = 5;
-static const unsigned arm_dsubreg_1 = 6;
-
 //===--------------------------------------------------------------------===//
 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
 /// instructions for SelectionDAG operations.
@@ -1306,44 +1303,6 @@
                                  MVT::Other, Ops, 3);
   }
 
-  case ISD::VECTOR_SHUFFLE: {
-    EVT VT = Op.getValueType();
-
-    // Match 128-bit splat to VDUPLANEQ.  (This could be done with a Pat in
-    // ARMInstrNEON.td but it is awkward because the shuffle mask needs to be
-    // transformed first into a lane number and then to both a subregister
-    // index and an adjusted lane number.)  If the source operand is a
-    // SCALAR_TO_VECTOR, leave it so it will be matched later as a VDUP.
-    ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
-    if (VT.is128BitVector() && SVOp->isSplat() &&
-        Op.getOperand(0).getOpcode() != ISD::SCALAR_TO_VECTOR &&
-        Op.getOperand(1).getOpcode() == ISD::UNDEF) {
-      unsigned LaneVal = SVOp->getSplatIndex();
-
-      EVT HalfVT;
-      unsigned Opc = 0;
-      switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
-      default: llvm_unreachable("unhandled VDUP splat type");
-      case MVT::i8:  Opc = ARM::VDUPLN8q;  HalfVT = MVT::v8i8; break;
-      case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
-      case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
-      case MVT::f32: Opc = ARM::VDUPLNfq;  HalfVT = MVT::v2f32; break;
-      }
-
-      // The source operand needs to be changed to a subreg of the original
-      // 128-bit operand, and the lane number needs to be adjusted accordingly.
-      unsigned NumElts = VT.getVectorNumElements() / 2;
-      unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
-      SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
-      SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
-      SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
-                                             dl, HalfVT, N->getOperand(0), SR);
-      return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
-    }
-
-    break;
-  }
-
   case ARMISD::VLD2D: {
     SDValue MemAddr, MemUpdate, MemOpc;
     if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp?rev=78998&r1=78997&r2=78998&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.cpp Fri Aug 14 02:45:45 2009
@@ -477,7 +477,8 @@
   case ARMISD::VQRSHRNsu:     return "ARMISD::VQRSHRNsu";
   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
-  case ARMISD::VDUPLANEQ:     return "ARMISD::VDUPLANEQ";
+  case ARMISD::VDUP:          return "ARMISD::VDUP";
+  case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
   case ARMISD::VLD2D:         return "ARMISD::VLD2D";
   case ARMISD::VLD3D:         return "ARMISD::VLD3D";
   case ARMISD::VLD4D:         return "ARMISD::VLD4D";
@@ -2444,6 +2445,15 @@
   // of inconsistencies between legalization and selection.
   // FIXME: floating-point vectors should be canonicalized to integer vectors
   // of the same time so that they get CSEd properly.
+  if (SVN->isSplat()) {
+    int Lane = SVN->getSplatIndex();
+    SDValue Op0 = SVN->getOperand(0);
+    if (Lane == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
+      return DAG.getNode(ARMISD::VDUP, dl, VT, Op0.getOperand(0));
+    }
+    return DAG.getNode(ARMISD::VDUPLANE, dl, VT, SVN->getOperand(0),
+		       DAG.getConstant(Lane, MVT::i32));
+  }
   if (isVREVMask(SVN, 64))
     return DAG.getNode(ARMISD::VREV64, dl, VT, SVN->getOperand(0));
   if (isVREVMask(SVN, 32))
@@ -2451,7 +2461,7 @@
   if (isVREVMask(SVN, 16))
     return DAG.getNode(ARMISD::VREV16, dl, VT, SVN->getOperand(0));
 
-  return Op;
+  return SDValue();
 }
 
 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.h?rev=78998&r1=78997&r2=78998&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMISelLowering.h Fri Aug 14 02:45:45 2009
@@ -115,8 +115,9 @@
       VGETLANEu,    // zero-extend vector extract element
       VGETLANEs,    // sign-extend vector extract element
 
-      // Vector duplicate lane (128-bit result only; 64-bit is a shuffle)
-      VDUPLANEQ,    // splat a lane from a 64-bit vector to a 128-bit vector
+      // Vector duplicate:
+      VDUP,
+      VDUPLANE,
 
       // Vector load/store with (de)interleaving
       VLD2D,

Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td?rev=78998&r1=78997&r2=78998&view=diff

==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMInstrNEON.td Fri Aug 14 02:45:45 2009
@@ -65,8 +65,13 @@
 def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
 def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
 
-def NEONvduplaneq : SDNode<"ARMISD::VDUPLANEQ",
-                           SDTypeProfile<1, 2, [SDTCisVT<2, i32>]>>;
+def NEONvdup      : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
+
+// VDUPLANE can produce a quad-register result from a double-register source,
+// so the result is not constrained to match the source.
+def NEONvduplane  : SDNode<"ARMISD::VDUPLANE",
+                           SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+                                                SDTCisVT<2, i32>]>>;
 
 def SDTARMVLD2    : SDTypeProfile<2, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
 def SDTARMVLD3    : SDTypeProfile<3, 1, [SDTCisSameAs<0, 1>,
@@ -1744,20 +1749,14 @@
 
 //   VDUP     : Vector Duplicate (from ARM core register to all elements)
 
-def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
-                       (vector_shuffle node:$lhs, node:$rhs), [{
-  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
-  return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
-}]>;
-
 class VDUPD<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
   : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$dst), (ins GPR:$src),
           NoItinerary, "vdup", !strconcat(asmSize, "\t$dst, $src"),
-          [(set DPR:$dst, (Ty (splat_lo (scalar_to_vector GPR:$src), undef)))]>;
+          [(set DPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
 class VDUPQ<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
   : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$dst), (ins GPR:$src),
           NoItinerary, "vdup", !strconcat(asmSize, "\t$dst, $src"),
-          [(set QPR:$dst, (Ty (splat_lo (scalar_to_vector GPR:$src), undef)))]>;
+          [(set QPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
 
 def  VDUP8d   : VDUPD<0b11101100, 0b00, ".8", v8i8>;
 def  VDUP16d  : VDUPD<0b11101000, 0b01, ".16", v4i16>;
@@ -1768,44 +1767,27 @@
 
 def  VDUPfd   : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$dst), (ins GPR:$src),
                       NoItinerary, "vdup", ".32\t$dst, $src",
-                      [(set DPR:$dst, (v2f32 (splat_lo
-                                              (scalar_to_vector
-                                               (f32 (bitconvert GPR:$src))),
-                                              undef)))]>;
+                      [(set DPR:$dst, (v2f32 (NEONvdup
+                                              (f32 (bitconvert GPR:$src)))))]>;
 def  VDUPfq   : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
                       NoItinerary, "vdup", ".32\t$dst, $src",
-                      [(set QPR:$dst, (v4f32 (splat_lo
-                                              (scalar_to_vector
-                                               (f32 (bitconvert GPR:$src))),
-                                              undef)))]>;
+                      [(set QPR:$dst, (v4f32 (NEONvdup
+                                              (f32 (bitconvert GPR:$src)))))]>;
 
 //   VDUP     : Vector Duplicate Lane (from scalar to all elements)
 
-def SHUFFLE_get_splat_lane : SDNodeXForm<vector_shuffle, [{
-  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
-  return CurDAG->getTargetConstant(SVOp->getSplatIndex(), MVT::i32);
-}]>;
-
-def splat_lane : PatFrag<(ops node:$lhs, node:$rhs),
-                         (vector_shuffle node:$lhs, node:$rhs), [{
-  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
-  return SVOp->isSplat();
-}], SHUFFLE_get_splat_lane>;
-
 class VDUPLND<bits<2> op19_18, bits<2> op17_16, string OpcodeStr, ValueType Ty>
   : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 0, 0,
         (outs DPR:$dst), (ins DPR:$src, lane_cst:$lane), NoItinerary,
         !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
-        [(set DPR:$dst, (Ty (splat_lane:$lane DPR:$src, undef)))]>;
+        [(set DPR:$dst, (Ty (NEONvduplane (Ty DPR:$src), imm:$lane)))]>;
 
-// vector_shuffle requires that the source and destination types match, so
-// VDUP to a 128-bit result uses a target-specific VDUPLANEQ node.
 class VDUPLNQ<bits<2> op19_18, bits<2> op17_16, string OpcodeStr,
               ValueType ResTy, ValueType OpTy>
   : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 1, 0,
         (outs QPR:$dst), (ins DPR:$src, lane_cst:$lane), NoItinerary,
         !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
-        [(set QPR:$dst, (ResTy (NEONvduplaneq (OpTy DPR:$src), imm:$lane)))]>;
+        [(set QPR:$dst, (ResTy (NEONvduplane (OpTy DPR:$src), imm:$lane)))]>;
 
 def VDUPLN8d  : VDUPLND<0b00, 0b01, "vdup.8", v8i8>;
 def VDUPLN16d : VDUPLND<0b00, 0b10, "vdup.16", v4i16>;
@@ -1816,19 +1798,32 @@
 def VDUPLN32q : VDUPLNQ<0b01, 0b00, "vdup.32", v4i32, v2i32>;
 def VDUPLNfq  : VDUPLNQ<0b01, 0b00, "vdup.32", v4f32, v2f32>;
 
+def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
+          (v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
+                                  (DSubReg_i8_reg imm:$lane))),
+                           (SubReg_i8_lane imm:$lane)))>;
+def : Pat<(v8i16 (NEONvduplane (v8i16 QPR:$src), imm:$lane)),
+          (v8i16 (VDUPLN16q (v4i16 (EXTRACT_SUBREG QPR:$src,
+                                    (DSubReg_i16_reg imm:$lane))),
+                            (SubReg_i16_lane imm:$lane)))>;
+def : Pat<(v4i32 (NEONvduplane (v4i32 QPR:$src), imm:$lane)),
+          (v4i32 (VDUPLN32q (v2i32 (EXTRACT_SUBREG QPR:$src,
+                                    (DSubReg_i32_reg imm:$lane))),
+                            (SubReg_i32_lane imm:$lane)))>;
+def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
+          (v4f32 (VDUPLNfq (v2f32 (EXTRACT_SUBREG QPR:$src,
+                                   (DSubReg_i32_reg imm:$lane))),
+                           (SubReg_i32_lane imm:$lane)))>;
+
 def VDUPfdf   : N2V<0b11, 0b11, 0b01, 0b00, 0b11000, 0, 0,
                     (outs DPR:$dst), (ins SPR:$src),
                     NoItinerary, "vdup.32\t$dst, ${src:lane}", "",
-                    [(set DPR:$dst, (v2f32 (splat_lo
-                                            (scalar_to_vector SPR:$src),
-                                            undef)))]>;
+                    [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
 
 def VDUPfqf   : N2V<0b11, 0b11, 0b01, 0b00, 0b11000, 1, 0,
                     (outs QPR:$dst), (ins SPR:$src),
                     NoItinerary, "vdup.32\t$dst, ${src:lane}", "",
-                    [(set QPR:$dst, (v4f32 (splat_lo
-                                            (scalar_to_vector SPR:$src),
-                                            undef)))]>;
+                    [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
 
 //   VMOVN    : Vector Narrowing Move
 defm VMOVN    : N2VNInt_HSD<0b11,0b11,0b10,0b00100,0,0, "vmovn.i",





More information about the llvm-branch-commits mailing list