[llvm] r277168 - [Hexagon] Custom lower VECTOR_SHUFFLE and EXTRACT_SUBVECTOR for HVX

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 29 09:44:27 PDT 2016


Author: kparzysz
Date: Fri Jul 29 11:44:27 2016
New Revision: 277168

URL: http://llvm.org/viewvc/llvm-project?rev=277168&view=rev
Log:
[Hexagon] Custom lower VECTOR_SHUFFLE and EXTRACT_SUBVECTOR for HVX

If the mask of a vector shuffle has alternating odd or even numbers
starting with 1 or 0 respectively up to the largest possible index
for the given type in the given HVX mode (single of double) we can
generate vpacko or vpacke instruction respectively.

E.g.
  %42 = shufflevector <32 x i16> %37, <32 x i16> %41,
                      <32 x i32> <i32 1, i32 3, ..., i32 63>
  is %42.h = vpacko(%41.w, %37.w)

Patch by Pranav Bhandarkar.

Added:
    llvm/trunk/test/CodeGen/Hexagon/lower-extract-subvector.ll
    llvm/trunk/test/CodeGen/Hexagon/vdmpy-halide-test.ll
    llvm/trunk/test/CodeGen/Hexagon/vmpa-halide-test.ll
    llvm/trunk/test/CodeGen/Hexagon/vpack_eo.ll
Modified:
    llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h
    llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV60.td

Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp?rev=277168&r1=277167&r2=277168&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp Fri Jul 29 11:44:27 2016
@@ -83,18 +83,24 @@ static cl::opt<int> MaxStoresPerMemsetOp
 
 
 namespace {
-class HexagonCCState : public CCState {
-  unsigned NumNamedVarArgParams;
+  class HexagonCCState : public CCState {
+    unsigned NumNamedVarArgParams;
 
-public:
-  HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
-                 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
-                 int NumNamedVarArgParams)
-      : CCState(CC, isVarArg, MF, locs, C),
-        NumNamedVarArgParams(NumNamedVarArgParams) {}
+  public:
+    HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
+                   SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
+                   int NumNamedVarArgParams)
+        : CCState(CC, isVarArg, MF, locs, C),
+          NumNamedVarArgParams(NumNamedVarArgParams) {}
 
-  unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
-};
+    unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
+  };
+
+  enum StridedLoadKind {
+    Even = 0,
+    Odd,
+    NoPattern
+  };
 }
 
 // Implement calling convention for Hexagon.
@@ -1992,11 +1998,26 @@ HexagonTargetLowering::HexagonTargetLowe
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i16,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i32,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i64,  Custom);
+      // We try to generate the vpack{e/o} instructions. If we fail
+      // we fall back upon ExpandOp.
+      setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8,  Custom);
+      setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i8, Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i16, Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
     } else if (UseHVXDbl) {
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v256i8,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i16, Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i32,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i64,  Custom);
+      // We try to generate the vpack{e/o} instructions. If we fail
+      // we fall back upon ExpandOp.
+      setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v128i8,  Custom);
+      setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i16,  Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v128i8, Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i16, Custom);
+      setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
     } else {
       llvm_unreachable("Unrecognized HVX mode");
     }
@@ -2228,6 +2249,7 @@ const char* HexagonTargetLowering::getTa
   case HexagonISD::VCMPWGT:       return "HexagonISD::VCMPWGT";
   case HexagonISD::VCMPWGTU:      return "HexagonISD::VCMPWGTU";
   case HexagonISD::VCOMBINE:      return "HexagonISD::VCOMBINE";
+  case HexagonISD::VPACK:         return "HexagonISD::VPACK";
   case HexagonISD::VSHLH:         return "HexagonISD::VSHLH";
   case HexagonISD::VSHLW:         return "HexagonISD::VSHLW";
   case HexagonISD::VSPLATB:       return "HexagonISD::VSPLTB";
@@ -2257,7 +2279,6 @@ bool HexagonTargetLowering::isTruncateFr
   return (VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32);
 }
 
-// shouldExpandBuildVectorWithShuffles
 // Should we expand the build vector with shuffles?
 bool
 HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
@@ -2272,14 +2293,41 @@ HexagonTargetLowering::shouldExpandBuild
   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
 }
 
-// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3).  V1 and
-// V2 are the two vectors to select data from, V3 is the permutation.
-static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
+static StridedLoadKind isStridedLoad(ArrayRef<int> &Mask) {
+  int even_start = -2;
+  int odd_start = -1;
+  size_t mask_len = Mask.size();
+  for (auto idx : Mask) {
+    if ((idx - even_start) == 2)
+      even_start = idx;
+    else
+      break;
+  }
+  if (even_start == (int)(mask_len * 2) - 2)
+    return StridedLoadKind::Even;
+  for (auto idx : Mask) {
+    if ((idx - odd_start) == 2)
+      odd_start = idx;
+    else
+      break;
+  }
+  if (odd_start == (int)(mask_len * 2) - 1)
+    return StridedLoadKind::Odd;
+
+  return StridedLoadKind::NoPattern;
+}
+
+// Lower a vector shuffle (V1, V2, V3).  V1 and V2 are the two vectors
+// to select data from, V3 is the permutation.
+SDValue
+HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
+      const {
   const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
   SDValue V1 = Op.getOperand(0);
   SDValue V2 = Op.getOperand(1);
   SDLoc dl(Op);
   EVT VT = Op.getValueType();
+  bool UseHVX = Subtarget.useHVXOps();
 
   if (V2.isUndef())
     V2 = V1;
@@ -2309,6 +2357,30 @@ static SDValue LowerVECTOR_SHUFFLE(SDVal
     return createSplat(DAG, dl, VT, DAG.getConstant(Lane, dl, MVT::i32));
   }
 
+  if (UseHVX) {
+    ArrayRef<int> Mask = SVN->getMask();
+    size_t MaskLen = Mask.size();
+    int ElemSizeInBits = VT.getVectorElementType().getSizeInBits();
+    if ((Subtarget.useHVXSglOps() && (ElemSizeInBits * MaskLen) == 64 * 8) ||
+        (Subtarget.useHVXDblOps() && (ElemSizeInBits * MaskLen) == 128 * 8)) {
+      // Return 1 for odd and 2 of even
+      StridedLoadKind Pattern = isStridedLoad(Mask);
+
+      if (Pattern == StridedLoadKind::NoPattern)
+        return SDValue();
+
+      SDValue Vec0 = Op.getOperand(0);
+      SDValue Vec1 = Op.getOperand(1);
+      SDValue StridePattern = DAG.getConstant(Pattern, dl, MVT::i32);
+      SDValue Ops[] = { Vec1, Vec0, StridePattern };
+      return DAG.getNode(HexagonISD::VPACK, dl, VT, Ops);
+    }
+    // We used to assert in the "else" part here, but that is bad for Halide
+    // Halide creates intermediate double registers by interleaving two
+    // concatenated vector registers. The interleaving requires vector_shuffle
+    // nodes and we shouldn't barf on a double register result of a
+    // vector_shuffle because it is most likely an intermediate result.
+  }
   // FIXME: We need to support more general vector shuffles.  See
   // below the comment from the ARM backend that deals in the general
   // case with the vector shuffles.  For now, let expand handle these.
@@ -2331,10 +2403,11 @@ static bool isCommonSplatElement(BuildVe
   return true;
 }
 
-// LowerVECTOR_SHIFT - Lower a vector shift. Try to convert
+// Lower a vector shift. Try to convert
 // <VT> = SHL/SRA/SRL <VT> by <VT> to Hexagon specific
 // <VT> = SHL/SRA/SRL <VT> by <IT/i32>.
-static SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) {
+SDValue
+HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
   BuildVectorSDNode *BVN = 0;
   SDValue V1 = Op.getOperand(0);
   SDValue V2 = Op.getOperand(1);
@@ -2484,6 +2557,9 @@ HexagonTargetLowering::LowerBUILD_VECTOR
     Res = (Res << EltSize) | Val;
   }
 
+  if (Size > 64)
+    return SDValue();
+
   if (Size == 64)
     ConstVal = DAG.getConstant(Res, dl, MVT::i64);
   else
@@ -2581,16 +2657,68 @@ HexagonTargetLowering::LowerCONCAT_VECTO
     SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, S, Offset);
     if (VT.getSizeInBits() == 32)
       V = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, {V, OpN, Or});
-    else
+    else if (VT.getSizeInBits() == 64)
       V = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, {V, OpN, Or});
+    else
+      return SDValue();
   }
 
   return DAG.getNode(ISD::BITCAST, dl, VT, V);
 }
 
 SDValue
+HexagonTargetLowering::LowerEXTRACT_SUBVECTOR_HVX(SDValue Op,
+                                                  SelectionDAG &DAG) const {
+  EVT VT = Op.getOperand(0).getValueType();
+  SDLoc dl(Op);
+  bool UseHVX = Subtarget.useHVXOps();
+  bool UseHVXSgl = Subtarget.useHVXSglOps();
+  // Just in case...
+
+  if (!VT.isVector() || !UseHVX)
+    return SDValue();
+
+  EVT ResVT = Op.getValueType();
+  unsigned ResSize = ResVT.getSizeInBits();
+  unsigned VectorSizeInBits = UseHVXSgl ? (64 * 8) : (128 * 8);
+  unsigned OpSize = VT.getSizeInBits();
+
+  // We deal only with cases where the result is the vector size
+  // and the vector operand is a double register.
+  if (!(ResVT.isByteSized() && ResSize == VectorSizeInBits) ||
+      !(VT.isByteSized() && OpSize == 2 * VectorSizeInBits))
+    return SDValue();
+
+  ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+  if (!Cst)
+    return SDValue();
+  unsigned Val = Cst->getZExtValue();
+
+  // These two will get lowered to an appropriate EXTRACT_SUBREG in ISel.
+  if (Val == 0) {
+    SDValue Vec = Op.getOperand(0);
+    unsigned Subreg = Hexagon::subreg_loreg;
+    return DAG.getTargetExtractSubreg(Subreg, dl, ResVT, Vec);
+  }
+
+  if (ResVT.getVectorNumElements() == Val) {
+    SDValue Vec = Op.getOperand(0);
+    unsigned Subreg = Hexagon::subreg_hireg;
+    return DAG.getTargetExtractSubreg(Subreg, dl, ResVT, Vec);
+  }
+
+  return SDValue();
+}
+
+SDValue
 HexagonTargetLowering::LowerEXTRACT_VECTOR(SDValue Op,
                                            SelectionDAG &DAG) const {
+  // If we are dealing with EXTRACT_SUBVECTOR on a HVX type, we may
+  // be able to simplify it to an EXTRACT_SUBREG.
+  if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && Subtarget.useHVXOps() &&
+      IsHvxVectorType(Op.getValueType().getSimpleVT()))
+    return LowerEXTRACT_SUBVECTOR_HVX(Op, DAG);
+
   EVT VT = Op.getValueType();
   int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
   SDLoc dl(Op);
@@ -2631,13 +2759,14 @@ HexagonTargetLowering::LowerEXTRACT_VECT
         llvm_unreachable("Bad offset");
       N = DAG.getTargetExtractSubreg(Subreg, dl, MVT::i32, Vec);
 
-    } else if (VecVT.getSizeInBits() == 32) {
+    } else if (SVT.getSizeInBits() == 32) {
       N = DAG.getNode(HexagonISD::EXTRACTU, dl, MVT::i32, Ops);
-    } else {
+    } else if (SVT.getSizeInBits() == 64) {
       N = DAG.getNode(HexagonISD::EXTRACTU, dl, MVT::i64, Ops);
       if (VT.getSizeInBits() == 32)
         N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::i32, N);
-    }
+    } else
+      return SDValue();
 
     return DAG.getNode(ISD::BITCAST, dl, VT, N);
   }
@@ -2684,8 +2813,10 @@ HexagonTargetLowering::LowerINSERT_VECTO
     SDValue N;
     if (VT.getSizeInBits() == 32)
       N = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, Ops);
-    else
+    else if (VT.getSizeInBits() == 64)
       N = DAG.getNode(HexagonISD::INSERT, dl, MVT::i64, Ops);
+    else
+      return SDValue();
 
     return DAG.getNode(ISD::BITCAST, dl, VT, N);
   }
@@ -2708,8 +2839,10 @@ HexagonTargetLowering::LowerINSERT_VECTO
   SDValue N;
   if (VT.getSizeInBits() == 32)
     N = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, Ops);
-  else
+  else if (VT.getSizeInBits() == 64)
     N = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, Ops);
+  else
+    return SDValue();
 
   return DAG.getNode(ISD::BITCAST, dl, VT, N);
 }

Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h?rev=277168&r1=277167&r2=277168&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.h Fri Jul 29 11:44:27 2016
@@ -79,6 +79,7 @@ bool isPositiveHalfWord(SDNode *N);
       EXTRACTU,
       EXTRACTURP,
       VCOMBINE,
+      VPACK,
       TC_RETURN,
       EH_RETURN,
       DCFETCH,
@@ -124,7 +125,10 @@ bool isPositiveHalfWord(SDNode *N);
     const char *getTargetNodeName(unsigned Opcode) const override;
     SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerEXTRACT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerEXTRACT_SUBVECTOR_HVX(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerINSERT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+    SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
     SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;

Modified: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV60.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV60.td?rev=277168&r1=277167&r2=277168&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV60.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfoV60.td Fri Jul 29 11:44:27 2016
@@ -1659,6 +1659,46 @@ defm V6_vpackoh :
      T_HVX_alu_VV <"$dst.h = vpacko($src1.w,$src2.w)">, V6_vpackoh_enc;
 }
 
+def SDTHexagonVPACK: SDTypeProfile<1, 3, [SDTCisSameAs<1, 2>,
+                                          SDTCisInt<3>]>;
+
+def HexagonVPACK: SDNode<"HexagonISD::VPACK", SDTHexagonVPACK>;
+
+// 0 as the last argument denotes vpacke. 1 denotes vpacko
+def: Pat<(v64i8 (HexagonVPACK (v64i8 VectorRegs:$Vs),
+                              (v64i8 VectorRegs:$Vt), (i32 0))),
+         (V6_vpackeb VectorRegs:$Vs, VectorRegs:$Vt)>,
+         Requires<[UseHVXSgl]>;
+def: Pat<(v64i8 (HexagonVPACK (v64i8 VectorRegs:$Vs),
+                              (v64i8 VectorRegs:$Vt), (i32 1))),
+         (V6_vpackob VectorRegs:$Vs, VectorRegs:$Vt)>,
+         Requires<[UseHVXSgl]>;
+def: Pat<(v32i16 (HexagonVPACK (v32i16 VectorRegs:$Vs),
+                               (v32i16 VectorRegs:$Vt), (i32 0))),
+         (V6_vpackeh VectorRegs:$Vs, VectorRegs:$Vt)>,
+         Requires<[UseHVXSgl]>;
+def: Pat<(v32i16 (HexagonVPACK (v32i16 VectorRegs:$Vs),
+                             (v32i16 VectorRegs:$Vt), (i32 1))),
+         (V6_vpackoh VectorRegs:$Vs, VectorRegs:$Vt)>,
+         Requires<[UseHVXSgl]>;
+
+def: Pat<(v128i8 (HexagonVPACK (v128i8 VecDblRegs:$Vs),
+                             (v128i8 VecDblRegs:$Vt), (i32 0))),
+         (V6_vpackeb_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>,
+         Requires<[UseHVXDbl]>;
+def: Pat<(v128i8 (HexagonVPACK (v128i8 VecDblRegs:$Vs),
+                             (v128i8 VecDblRegs:$Vt), (i32 1))),
+         (V6_vpackob_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>,
+         Requires<[UseHVXDbl]>;
+def: Pat<(v64i16 (HexagonVPACK (v64i16 VecDblRegs:$Vs),
+                             (v64i16 VecDblRegs:$Vt), (i32 0))),
+         (V6_vpackeh_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>,
+         Requires<[UseHVXDbl]>;
+def: Pat<(v64i16 (HexagonVPACK (v64i16 VecDblRegs:$Vs),
+                            (v64i16 VecDblRegs:$Vt), (i32 1))),
+        (V6_vpackoh_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>,
+        Requires<[UseHVXDbl]>;
+
 let hasNewValue = 1, hasSideEffects = 0 in
 class T_HVX_condALU <string asmString, RegisterClass RC1, RegisterClass RC2>
   : CVI_VA_Resource1 <(outs RC2:$dst),

Added: llvm/trunk/test/CodeGen/Hexagon/lower-extract-subvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/lower-extract-subvector.ll?rev=277168&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/lower-extract-subvector.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/lower-extract-subvector.ll Fri Jul 29 11:44:27 2016
@@ -0,0 +1,47 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+; This test checks if we custom lower extract_subvector. If we cannot
+; custom lower extract_subvector this test makes the compiler crash.
+
+; CHECK: vmem
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @__processed() #0 {
+entry:
+  br label %"for matrix.s0.y"
+
+"for matrix.s0.y":                                ; preds = %"for matrix.s0.y", %entry
+  br i1 undef, label %"produce processed", label %"for matrix.s0.y"
+
+"produce processed":                              ; preds = %"for matrix.s0.y"
+  br i1 undef, label %"for processed.s0.ty.ty.preheader", label %"consume processed"
+
+"for processed.s0.ty.ty.preheader":               ; preds = %"produce processed"
+  br i1 undef, label %"for denoised.s0.y.preheader", label %"consume denoised"
+
+"for denoised.s0.y.preheader":                    ; preds = %"for processed.s0.ty.ty.preheader"
+  unreachable
+
+"consume denoised":                               ; preds = %"for processed.s0.ty.ty.preheader"
+  br i1 undef, label %"consume deinterleaved", label %if.then.i164
+
+if.then.i164:                                     ; preds = %"consume denoised"
+  unreachable
+
+"consume deinterleaved":                          ; preds = %"consume denoised"
+  %0 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> undef, i32 -2)
+  %1 = bitcast <64 x i32> %0 to <128 x i16>
+  %2 = shufflevector <128 x i16> %1, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
+  store <64 x i16> %2, <64 x i16>* undef, align 128
+  unreachable
+
+"consume processed":                              ; preds = %"produce processed"
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" }
+attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-double" }

Added: llvm/trunk/test/CodeGen/Hexagon/vdmpy-halide-test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vdmpy-halide-test.ll?rev=277168&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vdmpy-halide-test.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vdmpy-halide-test.ll Fri Jul 29 11:44:27 2016
@@ -0,0 +1,167 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Thie tests checks a compiler assert. So the test just needs to compile for it to pass
+target triple = "hexagon-unknown--elf"
+
+%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+
+; Function Attrs: norecurse nounwind
+define i32 @__testOne(%struct.buffer_t* noalias nocapture readonly %inputOne.buffer, %struct.buffer_t* noalias nocapture readonly %inputTwo.buffer, %struct.buffer_t* noalias nocapture readonly %testOne.buffer) #0 {
+entry:
+  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 1
+  %inputOne.host = load i8*, i8** %buf_host, align 4
+  %buf_min = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 4, i32 0
+  %inputOne.min.0 = load i32, i32* %buf_min, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 1
+  %inputTwo.host = load i8*, i8** %buf_host10, align 4
+  %buf_min22 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 4, i32 0
+  %inputTwo.min.0 = load i32, i32* %buf_min22, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 1
+  %testOne.host = load i8*, i8** %buf_host27, align 4
+  %buf_extent31 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 2, i32 0
+  %testOne.extent.0 = load i32, i32* %buf_extent31, align 4
+  %buf_min39 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 4, i32 0
+  %testOne.min.0 = load i32, i32* %buf_min39, align 4
+  %0 = ashr i32 %testOne.extent.0, 4
+  %1 = icmp sgt i32 %0, 0
+  br i1 %1, label %"for testOne.s0.x.x.preheader", label %"end for testOne.s0.x.x"
+
+"for testOne.s0.x.x.preheader":                   ; preds = %entry
+  %2 = bitcast i8* %inputOne.host to i16*
+  %3 = bitcast i8* %inputTwo.host to i16*
+  %4 = bitcast i8* %testOne.host to i32*
+  br label %"for testOne.s0.x.x"
+
+"for testOne.s0.x.x":                             ; preds = %"for testOne.s0.x.x", %"for testOne.s0.x.x.preheader"
+  %.phi = phi i32* [ %4, %"for testOne.s0.x.x.preheader" ], [ %.inc, %"for testOne.s0.x.x" ]
+  %testOne.s0.x.x = phi i32 [ 0, %"for testOne.s0.x.x.preheader" ], [ %50, %"for testOne.s0.x.x" ]
+  %5 = shl nsw i32 %testOne.s0.x.x, 4
+  %6 = add nsw i32 %5, %testOne.min.0
+  %7 = shl nsw i32 %6, 1
+  %8 = sub nsw i32 %7, %inputOne.min.0
+  %9 = getelementptr inbounds i16, i16* %2, i32 %8
+  %10 = bitcast i16* %9 to <16 x i16>*
+  %11 = load <16 x i16>, <16 x i16>* %10, align 2, !tbaa !5
+  %12 = add nsw i32 %8, 15
+  %13 = getelementptr inbounds i16, i16* %2, i32 %12
+  %14 = bitcast i16* %13 to <16 x i16>*
+  %15 = load <16 x i16>, <16 x i16>* %14, align 2, !tbaa !5
+  %16 = shufflevector <16 x i16> %11, <16 x i16> %15, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %17 = add nsw i32 %8, 1
+  %18 = getelementptr inbounds i16, i16* %2, i32 %17
+  %19 = bitcast i16* %18 to <16 x i16>*
+  %20 = load <16 x i16>, <16 x i16>* %19, align 2, !tbaa !5
+  %21 = add nsw i32 %8, 16
+  %22 = getelementptr inbounds i16, i16* %2, i32 %21
+  %23 = bitcast i16* %22 to <16 x i16>*
+  %24 = load <16 x i16>, <16 x i16>* %23, align 2, !tbaa !5
+  %25 = shufflevector <16 x i16> %20, <16 x i16> %24, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %26 = shufflevector <16 x i16> %16, <16 x i16> %25, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  %27 = sub nsw i32 %7, %inputTwo.min.0
+  %28 = getelementptr inbounds i16, i16* %3, i32 %27
+  %29 = bitcast i16* %28 to <16 x i16>*
+  %30 = load <16 x i16>, <16 x i16>* %29, align 2, !tbaa !8
+  %31 = add nsw i32 %27, 15
+  %32 = getelementptr inbounds i16, i16* %3, i32 %31
+  %33 = bitcast i16* %32 to <16 x i16>*
+  %34 = load <16 x i16>, <16 x i16>* %33, align 2, !tbaa !8
+  %35 = shufflevector <16 x i16> %30, <16 x i16> %34, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %36 = add nsw i32 %27, 1
+  %37 = getelementptr inbounds i16, i16* %3, i32 %36
+  %38 = bitcast i16* %37 to <16 x i16>*
+  %39 = load <16 x i16>, <16 x i16>* %38, align 2, !tbaa !8
+  %40 = add nsw i32 %27, 16
+  %41 = getelementptr inbounds i16, i16* %3, i32 %40
+  %42 = bitcast i16* %41 to <16 x i16>*
+  %43 = load <16 x i16>, <16 x i16>* %42, align 2, !tbaa !8
+  %44 = shufflevector <16 x i16> %39, <16 x i16> %43, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %45 = shufflevector <16 x i16> %35, <16 x i16> %44, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  %46 = bitcast <32 x i16> %26 to <16 x i32>
+  %47 = bitcast <32 x i16> %45 to <16 x i32>
+  %48 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %46, <16 x i32> %47)
+  %49 = bitcast i32* %.phi to <16 x i32>*
+  store <16 x i32> %48, <16 x i32>* %49, align 4, !tbaa !10
+  %50 = add nuw nsw i32 %testOne.s0.x.x, 1
+  %51 = icmp eq i32 %50, %0
+  %.inc = getelementptr i32, i32* %.phi, i32 16
+  br i1 %51, label %"end for testOne.s0.x.x", label %"for testOne.s0.x.x"
+
+"end for testOne.s0.x.x":                         ; preds = %"for testOne.s0.x.x", %entry
+  %52 = add nsw i32 %testOne.extent.0, 15
+  %53 = ashr i32 %52, 4
+  %54 = icmp sgt i32 %53, %0
+  br i1 %54, label %"for testOne.s0.x.x44.preheader", label %destructor_block
+
+"for testOne.s0.x.x44.preheader":                 ; preds = %"end for testOne.s0.x.x"
+  %55 = add nsw i32 %testOne.min.0, %testOne.extent.0
+  %56 = shl nsw i32 %55, 1
+  %57 = sub nsw i32 %56, %inputOne.min.0
+  %58 = add nsw i32 %57, -32
+  %59 = bitcast i8* %inputOne.host to i16*
+  %60 = getelementptr inbounds i16, i16* %59, i32 %58
+  %61 = bitcast i16* %60 to <16 x i16>*
+  %62 = load <16 x i16>, <16 x i16>* %61, align 2
+  %63 = add nsw i32 %57, -17
+  %64 = getelementptr inbounds i16, i16* %59, i32 %63
+  %65 = bitcast i16* %64 to <16 x i16>*
+  %66 = load <16 x i16>, <16 x i16>* %65, align 2
+  %67 = add nsw i32 %57, -31
+  %68 = getelementptr inbounds i16, i16* %59, i32 %67
+  %69 = bitcast i16* %68 to <16 x i16>*
+  %70 = load <16 x i16>, <16 x i16>* %69, align 2
+  %71 = add nsw i32 %57, -16
+  %72 = getelementptr inbounds i16, i16* %59, i32 %71
+  %73 = bitcast i16* %72 to <16 x i16>*
+  %74 = load <16 x i16>, <16 x i16>* %73, align 2
+  %75 = shufflevector <16 x i16> %70, <16 x i16> %74, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %76 = sub nsw i32 %56, %inputTwo.min.0
+  %77 = add nsw i32 %76, -32
+  %78 = bitcast i8* %inputTwo.host to i16*
+  %79 = getelementptr inbounds i16, i16* %78, i32 %77
+  %80 = bitcast i16* %79 to <16 x i16>*
+  %81 = load <16 x i16>, <16 x i16>* %80, align 2
+  %82 = add nsw i32 %76, -17
+  %83 = getelementptr inbounds i16, i16* %78, i32 %82
+  %84 = bitcast i16* %83 to <16 x i16>*
+  %85 = load <16 x i16>, <16 x i16>* %84, align 2
+  %86 = shufflevector <16 x i16> %81, <16 x i16> %85, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %87 = add nsw i32 %76, -31
+  %88 = getelementptr inbounds i16, i16* %78, i32 %87
+  %89 = bitcast i16* %88 to <16 x i16>*
+  %90 = load <16 x i16>, <16 x i16>* %89, align 2
+  %91 = add nsw i32 %76, -16
+  %92 = getelementptr inbounds i16, i16* %78, i32 %91
+  %93 = bitcast i16* %92 to <16 x i16>*
+  %94 = load <16 x i16>, <16 x i16>* %93, align 2
+  %95 = shufflevector <16 x i16> %90, <16 x i16> %94, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %96 = shufflevector <16 x i16> %86, <16 x i16> %95, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  %97 = bitcast <32 x i16> %96 to <16 x i32>
+  %98 = add nsw i32 %testOne.extent.0, -16
+  %99 = bitcast i8* %testOne.host to i32*
+  %100 = getelementptr inbounds i32, i32* %99, i32 %98
+  %101 = shufflevector <16 x i16> %62, <16 x i16> %66, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %102 = shufflevector <16 x i16> %101, <16 x i16> %75, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  %103 = bitcast <32 x i16> %102 to <16 x i32>
+  %104 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %103, <16 x i32> %97)
+  %105 = bitcast i32* %100 to <16 x i32>*
+  store <16 x i32> %104, <16 x i32>* %105, align 4, !tbaa !10
+  br label %destructor_block
+
+destructor_block:                                 ; preds = %"for testOne.s0.x.x44.preheader", %"end for testOne.s0.x.x"
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" }
+attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" }
+
+!5 = !{!6, !6, i64 0}
+!6 = !{!"inputOne", !7}
+!7 = !{!"Halide buffer"}
+!8 = !{!9, !9, i64 0}
+!9 = !{!"inputTwo", !7}
+!10 = !{!11, !11, i64 0}
+!11 = !{!"testOne", !7}

Added: llvm/trunk/test/CodeGen/Hexagon/vmpa-halide-test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vmpa-halide-test.ll?rev=277168&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vmpa-halide-test.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vmpa-halide-test.ll Fri Jul 29 11:44:27 2016
@@ -0,0 +1,145 @@
+; RUN: llc -march=hexagon < %s
+; Thie tests checks a compiler assert. So the test just needs to compile
+; for it to pass
+
+target triple = "hexagon-unknown--elf"
+
+%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+
+; Function Attrs: norecurse nounwind
+define i32 @__testOne(%struct.buffer_t* noalias nocapture readonly %inputOne.buffer, %struct.buffer_t* noalias nocapture readonly %inputTwo.buffer, %struct.buffer_t* noalias nocapture readonly %testOne.buffer) #0 {
+entry:
+  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 1
+  %inputOne.host = load i8*, i8** %buf_host, align 4
+  %buf_min = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputOne.buffer, i32 0, i32 4, i32 0
+  %inputOne.min.0 = load i32, i32* %buf_min, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 1
+  %inputTwo.host = load i8*, i8** %buf_host10, align 4
+  %buf_min22 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %inputTwo.buffer, i32 0, i32 4, i32 0
+  %inputTwo.min.0 = load i32, i32* %buf_min22, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 1
+  %testOne.host = load i8*, i8** %buf_host27, align 4
+  %buf_extent31 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 2, i32 0
+  %testOne.extent.0 = load i32, i32* %buf_extent31, align 4
+  %buf_min39 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %testOne.buffer, i32 0, i32 4, i32 0
+  %testOne.min.0 = load i32, i32* %buf_min39, align 4
+  %0 = ashr i32 %testOne.extent.0, 6
+  %1 = icmp sgt i32 %0, 0
+  br i1 %1, label %"for testOne.s0.x.x.preheader", label %"end for testOne.s0.x.x"
+
+"for testOne.s0.x.x.preheader":                   ; preds = %entry
+  %2 = bitcast i8* %testOne.host to i16*
+  br label %"for testOne.s0.x.x"
+
+"for testOne.s0.x.x":                             ; preds = %"for testOne.s0.x.x", %"for testOne.s0.x.x.preheader"
+  %.phi = phi i16* [ %2, %"for testOne.s0.x.x.preheader" ], [ %.inc, %"for testOne.s0.x.x" ]
+  %testOne.s0.x.x = phi i32 [ 0, %"for testOne.s0.x.x.preheader" ], [ %38, %"for testOne.s0.x.x" ]
+  %3 = shl nsw i32 %testOne.s0.x.x, 6
+  %4 = add nsw i32 %3, %testOne.min.0
+  %5 = shl nsw i32 %4, 1
+  %6 = sub nsw i32 %5, %inputOne.min.0
+  %7 = getelementptr inbounds i8, i8* %inputOne.host, i32 %6
+  %8 = bitcast i8* %7 to <64 x i8>*
+  %9 = load <64 x i8>, <64 x i8>* %8, align 1, !tbaa !5
+  %10 = add nsw i32 %6, 64
+  %11 = getelementptr inbounds i8, i8* %inputOne.host, i32 %10
+  %12 = bitcast i8* %11 to <64 x i8>*
+  %13 = load <64 x i8>, <64 x i8>* %12, align 1, !tbaa !5
+  %14 = shufflevector <64 x i8> %9, <64 x i8> %13, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
+  %15 = shufflevector <64 x i8> %9, <64 x i8> %13, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
+  %16 = shufflevector <64 x i8> %14, <64 x i8> %15, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i
 32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
+  %17 = sub nsw i32 %5, %inputTwo.min.0
+  %18 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %17
+  %19 = bitcast i8* %18 to <64 x i8>*
+  %20 = load <64 x i8>, <64 x i8>* %19, align 1, !tbaa !8
+  %21 = add nsw i32 %17, 64
+  %22 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %21
+  %23 = bitcast i8* %22 to <64 x i8>*
+  %24 = load <64 x i8>, <64 x i8>* %23, align 1, !tbaa !8
+  %25 = shufflevector <64 x i8> %20, <64 x i8> %24, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
+  %26 = shufflevector <64 x i8> %20, <64 x i8> %24, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
+  %27 = shufflevector <64 x i8> %25, <64 x i8> %26, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i
 32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
+  %28 = bitcast <128 x i8> %16 to <32 x i32>
+  %29 = bitcast <128 x i8> %27 to <32 x i32>
+  %30 = tail call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %28, <32 x i32> %29)
+  %31 = bitcast <32 x i32> %30 to <64 x i16>
+  %32 = shufflevector <64 x i16> %31, <64 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %33 = bitcast i16* %.phi to <32 x i16>*
+  store <32 x i16> %32, <32 x i16>* %33, align 2, !tbaa !10
+  %34 = shufflevector <64 x i16> %31, <64 x i16> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  %35 = or i32 %3, 32
+  %36 = getelementptr inbounds i16, i16* %2, i32 %35
+  %37 = bitcast i16* %36 to <32 x i16>*
+  store <32 x i16> %34, <32 x i16>* %37, align 2, !tbaa !10
+  %38 = add nuw nsw i32 %testOne.s0.x.x, 1
+  %39 = icmp eq i32 %38, %0
+  %.inc = getelementptr i16, i16* %.phi, i32 64
+  br i1 %39, label %"end for testOne.s0.x.x", label %"for testOne.s0.x.x"
+
+"end for testOne.s0.x.x":                         ; preds = %"for testOne.s0.x.x", %entry
+  %40 = add nsw i32 %testOne.extent.0, 63
+  %41 = ashr i32 %40, 6
+  %42 = icmp sgt i32 %41, %0
+  br i1 %42, label %"for testOne.s0.x.x44.preheader", label %destructor_block
+
+"for testOne.s0.x.x44.preheader":                 ; preds = %"end for testOne.s0.x.x"
+  %43 = add nsw i32 %testOne.min.0, %testOne.extent.0
+  %44 = shl nsw i32 %43, 1
+  %45 = sub nsw i32 %44, %inputOne.min.0
+  %46 = add nsw i32 %45, -128
+  %47 = getelementptr inbounds i8, i8* %inputOne.host, i32 %46
+  %48 = bitcast i8* %47 to <64 x i8>*
+  %49 = load <64 x i8>, <64 x i8>* %48, align 1
+  %50 = add nsw i32 %45, -64
+  %51 = getelementptr inbounds i8, i8* %inputOne.host, i32 %50
+  %52 = bitcast i8* %51 to <64 x i8>*
+  %53 = load <64 x i8>, <64 x i8>* %52, align 1
+  %54 = shufflevector <64 x i8> %49, <64 x i8> %53, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
+  %55 = shufflevector <64 x i8> %49, <64 x i8> %53, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
+  %56 = shufflevector <64 x i8> %54, <64 x i8> %55, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i
 32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
+  %57 = sub nsw i32 %44, %inputTwo.min.0
+  %58 = add nsw i32 %57, -128
+  %59 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %58
+  %60 = bitcast i8* %59 to <64 x i8>*
+  %61 = load <64 x i8>, <64 x i8>* %60, align 1
+  %62 = add nsw i32 %57, -64
+  %63 = getelementptr inbounds i8, i8* %inputTwo.host, i32 %62
+  %64 = bitcast i8* %63 to <64 x i8>*
+  %65 = load <64 x i8>, <64 x i8>* %64, align 1
+  %66 = shufflevector <64 x i8> %61, <64 x i8> %65, <64 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126>
+  %67 = shufflevector <64 x i8> %61, <64 x i8> %65, <64 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127>
+  %68 = shufflevector <64 x i8> %66, <64 x i8> %67, <128 x i32> <i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i
 32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127>
+  %69 = bitcast <128 x i8> %56 to <32 x i32>
+  %70 = bitcast <128 x i8> %68 to <32 x i32>
+  %71 = tail call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %69, <32 x i32> %70)
+  %72 = bitcast <32 x i32> %71 to <64 x i16>
+  %73 = add nsw i32 %testOne.extent.0, -64
+  %74 = bitcast i8* %testOne.host to i16*
+  %75 = getelementptr inbounds i16, i16* %74, i32 %73
+  %76 = bitcast i16* %75 to <32 x i16>*
+  %77 = add nsw i32 %testOne.extent.0, -32
+  %78 = getelementptr inbounds i16, i16* %74, i32 %77
+  %79 = shufflevector <64 x i16> %72, <64 x i16> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %80 = shufflevector <64 x i16> %72, <64 x i16> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  %81 = bitcast i16* %78 to <32 x i16>*
+  store <32 x i16> %79, <32 x i16>* %76, align 2, !tbaa !10
+  store <32 x i16> %80, <32 x i16>* %81, align 2, !tbaa !10
+  br label %destructor_block
+
+destructor_block:                                 ; preds = %"for testOne.s0.x.x44.preheader", %"end for testOne.s0.x.x"
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32>, <32 x i32>) #1
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" }
+attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" }
+
+!5 = !{!6, !6, i64 0}
+!6 = !{!"inputOne", !7}
+!7 = !{!"Halide buffer"}
+!8 = !{!9, !9, i64 0}
+!9 = !{!"inputTwo", !7}
+!10 = !{!11, !11, i64 0}
+!11 = !{!"testOne", !7}

Added: llvm/trunk/test/CodeGen/Hexagon/vpack_eo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/vpack_eo.ll?rev=277168&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/vpack_eo.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/vpack_eo.ll Fri Jul 29 11:44:27 2016
@@ -0,0 +1,73 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+target triple = "hexagon-unknown--elf"
+
+; CHECK-DAG: vpacke
+; CHECK-DAG: vpacko
+
+%struct.buffer_t = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+
+; Function Attrs: norecurse nounwind
+define i32 @__Strided_LoadTest(%struct.buffer_t* noalias nocapture readonly %InputOne.buffer, %struct.buffer_t* noalias nocapture readonly %InputTwo.buffer, %struct.buffer_t* noalias nocapture readonly %Strided_LoadTest.buffer) #0 {
+entry:
+  %buf_host = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %InputOne.buffer, i32 0, i32 1
+  %0 = bitcast i8** %buf_host to i16**
+  %InputOne.host45 = load i16*, i16** %0, align 4
+  %buf_host10 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %InputTwo.buffer, i32 0, i32 1
+  %1 = bitcast i8** %buf_host10 to i16**
+  %InputTwo.host46 = load i16*, i16** %1, align 4
+  %buf_host27 = getelementptr inbounds %struct.buffer_t, %struct.buffer_t* %Strided_LoadTest.buffer, i32 0, i32 1
+  %2 = bitcast i8** %buf_host27 to i16**
+  %Strided_LoadTest.host44 = load i16*, i16** %2, align 4
+  %3 = bitcast i16* %InputOne.host45 to <32 x i16>*
+  %4 = load <32 x i16>, <32 x i16>* %3, align 2, !tbaa !4
+  %5 = getelementptr inbounds i16, i16* %InputOne.host45, i32 32
+  %6 = bitcast i16* %5 to <32 x i16>*
+  %7 = load <32 x i16>, <32 x i16>* %6, align 2, !tbaa !4
+  %8 = shufflevector <32 x i16> %4, <32 x i16> %7, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+  %9 = bitcast i16* %InputTwo.host46 to <32 x i16>*
+  %10 = load <32 x i16>, <32 x i16>* %9, align 2, !tbaa !7
+  %11 = getelementptr inbounds i16, i16* %InputTwo.host46, i32 32
+  %12 = bitcast i16* %11 to <32 x i16>*
+  %13 = load <32 x i16>, <32 x i16>* %12, align 2, !tbaa !7
+  %14 = shufflevector <32 x i16> %10, <32 x i16> %13, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
+  %15 = bitcast <32 x i16> %8 to <16 x i32>
+  %16 = bitcast <32 x i16> %14 to <16 x i32>
+  %17 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %15, <16 x i32> %16)
+  %18 = bitcast i16* %Strided_LoadTest.host44 to <16 x i32>*
+  store <16 x i32> %17, <16 x i32>* %18, align 2, !tbaa !9
+  %.inc = getelementptr i16, i16* %InputOne.host45, i32 64
+  %.inc49 = getelementptr i16, i16* %InputTwo.host46, i32 64
+  %.inc52 = getelementptr i16, i16* %Strided_LoadTest.host44, i32 32
+  %19 = bitcast i16* %.inc to <32 x i16>*
+  %20 = load <32 x i16>, <32 x i16>* %19, align 2, !tbaa !4
+  %21 = getelementptr inbounds i16, i16* %InputOne.host45, i32 96
+  %22 = bitcast i16* %21 to <32 x i16>*
+  %23 = load <32 x i16>, <32 x i16>* %22, align 2, !tbaa !4
+  %24 = shufflevector <32 x i16> %20, <32 x i16> %23, <32 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63>
+  %25 = bitcast i16* %.inc49 to <32 x i16>*
+  %26 = load <32 x i16>, <32 x i16>* %25, align 2, !tbaa !7
+  %27 = getelementptr inbounds i16, i16* %InputTwo.host46, i32 96
+  %28 = bitcast i16* %27 to <32 x i16>*
+  %29 = load <32 x i16>, <32 x i16>* %28, align 2, !tbaa !7
+  %30 = shufflevector <32 x i16> %26, <32 x i16> %29, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
+  %31 = bitcast <32 x i16> %24 to <16 x i32>
+  %32 = bitcast <32 x i16> %30 to <16 x i32>
+  %33 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %31, <16 x i32> %32)
+  %34 = bitcast i16* %.inc52 to <16 x i32>*
+  store <16 x i32> %33, <16 x i32>* %34, align 2, !tbaa !9
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" }
+attributes #1 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvx,-hvx-double" }
+
+!4 = !{!5, !5, i64 0}
+!5 = !{!"InputOne", !6}
+!6 = !{!"Halide buffer"}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"InputTwo", !6}
+!9 = !{!10, !10, i64 0}
+!10 = !{!"Strided_LoadTest", !6}




More information about the llvm-commits mailing list