[llvm] r291693 - [ARM] More aggressive matching for vpadd and vpaddl.

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 11 11:33:38 PST 2017


Author: efriedma
Date: Wed Jan 11 13:33:38 2017
New Revision: 291693

URL: http://llvm.org/viewvc/llvm-project?rev=291693&view=rev
Log:
[ARM] More aggressive matching for vpadd and vpaddl.

The new matchers work after legalization to make them simpler, and to avoid
blocking other optimizations.

Differential Revision: https://reviews.llvm.org/D27779


Modified:
    llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
    llvm/trunk/test/CodeGen/ARM/vpadd.ll
    llvm/trunk/test/CodeGen/ARM/vtrn.ll

Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=291693&r1=291692&r2=291693&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Wed Jan 11 13:33:38 2017
@@ -9227,12 +9227,102 @@ SDValue combineSelectAndUseCommutative(S
   return SDValue();
 }
 
-// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
-// (only after legalization).
-static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
+static bool IsVUZPShuffleNode(SDNode *N) {
+  // VUZP shuffle node.
+  if (N->getOpcode() == ARMISD::VUZP)
+    return true;
+
+  // "VUZP" on i32 is an alias for VTRN.
+  if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
+    return true;
+
+  return false;
+}
+
+static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {
+  // Look for ADD(VUZP.0, VUZP.1).
+  if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
+      N0 == N1)
+   return SDValue();
+
+  // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
+  if (!N->getValueType(0).is64BitVector())
+    return SDValue();
+
+  // Generate vpadd.
+  SelectionDAG &DAG = DCI.DAG;
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  SDLoc dl(N);
+  SDNode *Unzip = N0.getNode();
+  EVT VT = N->getValueType(0);
+
+  SmallVector<SDValue, 8> Ops;
+  Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
+                                TLI.getPointerTy(DAG.getDataLayout())));
+  Ops.push_back(Unzip->getOperand(0));
+  Ops.push_back(Unzip->getOperand(1));
+
+  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
+}
+
+static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
+                                      TargetLowering::DAGCombinerInfo &DCI,
+                                      const ARMSubtarget *Subtarget) {
+  // Check for two extended operands.
+  if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
+        N1.getOpcode() == ISD::SIGN_EXTEND) &&
+      !(N0.getOpcode() == ISD::ZERO_EXTEND &&
+        N1.getOpcode() == ISD::ZERO_EXTEND))
+    return SDValue();
+
+  SDValue N00 = N0.getOperand(0);
+  SDValue N10 = N1.getOperand(0);
 
+  // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
+  if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
+      N00 == N10)
+    return SDValue();
+
+  // We only recognize Q register paddl here; this can't be reached until
+  // after type legalization.
+  if (!N00.getValueType().is64BitVector() ||
+      !N0.getValueType().is128BitVector())
+    return SDValue();
+
+  // Generate vpaddl.
+  SelectionDAG &DAG = DCI.DAG;
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  SDLoc dl(N);
+  EVT VT = N->getValueType(0);
+
+  SmallVector<SDValue, 8> Ops;
+  // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
+  unsigned Opcode;
+  if (N0.getOpcode() == ISD::SIGN_EXTEND)
+    Opcode = Intrinsic::arm_neon_vpaddls;
+  else
+    Opcode = Intrinsic::arm_neon_vpaddlu;
+  Ops.push_back(DAG.getConstant(Opcode, dl,
+                                TLI.getPointerTy(DAG.getDataLayout())));
+  EVT ElemTy = N00.getValueType().getVectorElementType();
+  unsigned NumElts = VT.getVectorNumElements();
+  EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
+  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
+                               N00.getOperand(0), N00.getOperand(1));
+  Ops.push_back(Concat);
+
+  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
+}
+
+// FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
+// an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
+// much easier to match.
+static SDValue
+AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
+                               TargetLowering::DAGCombinerInfo &DCI,
+                               const ARMSubtarget *Subtarget) {
   // Only perform optimization if after legalize, and if NEON is available. We
   // also expected both operands to be BUILD_VECTORs.
   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
@@ -9288,6 +9378,10 @@ static SDValue AddCombineToVPADDL(SDNode
       return SDValue();
   }
 
+  // Don't generate vpaddl+vmovn; we'll match it to vpadd later.
+  if (Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
+    return SDValue();
+
   // Create VPADDL node.
   SelectionDAG &DAG = DCI.DAG;
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -9559,9 +9653,15 @@ static SDValue PerformADDCCombine(SDNode
 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const ARMSubtarget *Subtarget){
+  // Attempt to create vpadd for this add.
+  if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
+    return Result;
 
   // Attempt to create vpaddl for this add.
-  if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget))
+  if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
+    return Result;
+  if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
+                                                      Subtarget))
     return Result;
 
   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))

Modified: llvm/trunk/test/CodeGen/ARM/vpadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vpadd.ll?rev=291693&r1=291692&r2=291693&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vpadd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vpadd.ll Wed Jan 11 13:33:38 2017
@@ -214,14 +214,11 @@ define <2 x i64> @vpaddlQu32(<4 x i32>*
 }
 
 ; Combine vuzp+vadd->vpadd.
-; FIXME: Implement this optimization
-define void @addCombineToVPADD(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp {
-; CHECK-LABEL: addCombineToVPADD:
+define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADD_i8:
 ; CHECK:       @ BB#0:
 ; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT:    vorr d18, d17, d17
-; CHECK-NEXT:    vuzp.8 d16, d18
-; CHECK-NEXT:    vadd.i8 d16, d18, d16
+; CHECK-NEXT:    vpadd.i8 d16, d16, d17
 ; CHECK-NEXT:    vstr d16, [r1]
 ; CHECK-NEXT:    mov pc, lr
   %tmp = load <16 x i8>, <16 x i8>* %cbcr
@@ -233,15 +230,44 @@ define void @addCombineToVPADD(<16 x i8>
   ret void
 }
 
+; Combine vuzp+vadd->vpadd.
+define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADD_i16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpadd.i16 d16, d16, d17
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <8 x i16>, <8 x i16>* %cbcr
+  %tmp1 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp3 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %add = add <4 x i16> %tmp3, %tmp1
+  store <4 x i16> %add, <4 x i16>* %X, align 8
+  ret void
+}
+
+; Combine vtrn+vadd->vpadd.
+define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADD_i32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpadd.i32 d16, d16, d17
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <4 x i32>, <4 x i32>* %cbcr
+  %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
+  %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
+  %add = add <2 x i32> %tmp3, %tmp1
+  store <2 x i32> %add, <2 x i32>* %X, align 8
+  ret void
+}
+
 ; Combine vuzp+vaddl->vpaddl
-; FIXME: Implement this optimization.
-define void @addCombineToVPADDL_sext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
-; CHECK-LABEL: addCombineToVPADDL_sext:
+define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_s8:
 ; CHECK:       @ BB#0:
 ; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT:    vorr d18, d17, d17
-; CHECK-NEXT:    vuzp.8 d16, d18
-; CHECK-NEXT:    vaddl.s8 q8, d18, d16
+; CHECK-NEXT:    vpaddl.s8 q8, q8
 ; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
 ; CHECK-NEXT:    mov pc, lr
   %tmp = load <16 x i8>, <16 x i8>* %cbcr
@@ -254,10 +280,200 @@ define void @addCombineToVPADDL_sext(<16
   ret void
 }
 
-; Legalization produces a EXTRACT_VECTOR_ELT DAG node which performs an extend from
-; i16 to i32. In this case the input for the formed VPADDL needs to be a vector of i16s.
-define <2 x i16> @fromExtendingExtractVectorElt(<4 x i16> %in) {
-; CHECK-LABEL: fromExtendingExtractVectorElt:
+; Combine vuzp+vaddl->vpaddl
+; FIXME: Legalization butchers the shuffles.
+define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDL_s8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmov.i16 d18, #0x8
+; CHECK-NEXT:    vneg.s16 d18, d18
+; CHECK-NEXT:    vext.8 d19, d16, d16, #1
+; CHECK-NEXT:    vshl.i16 d16, d16, #8
+; CHECK-NEXT:    vshl.i16 d17, d19, #8
+; CHECK-NEXT:    vshl.s16 d16, d16, d18
+; CHECK-NEXT:    vshl.s16 d17, d17, d18
+; CHECK-NEXT:    vadd.i16 d16, d17, d16
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <16 x i8>, <16 x i8>* %cbcr
+  %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %tmp4 = sext <4 x i8> %tmp3 to <4 x i16>
+  %tmp5 = sext <4 x i8> %tmp1 to <4 x i16>
+  %add = add <4 x i16> %tmp4, %tmp5
+  store <4 x i16> %add, <4 x i16>* %X, align 8
+  ret void
+}
+
+; Combine vuzp+vaddl->vpaddl
+define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_u8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.u8 q8, q8
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <16 x i8>, <16 x i8>* %cbcr
+  %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+  %tmp5 = zext <8 x i8> %tmp1 to <8 x i16>
+  %add = add <8 x i16> %tmp4, %tmp5
+  store <8 x i16> %add, <8 x i16>* %X, align 8
+  ret void
+}
+
+; In theory, it's possible to match this to vpaddl, but rearranging the
+; shuffle is awkward, so this doesn't match at the moment.
+define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_u8_early_zext:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmovl.u8 q9, d17
+; CHECK-NEXT:    vmovl.u8 q8, d16
+; CHECK-NEXT:    vuzp.16 q8, q9
+; CHECK-NEXT:    vadd.i16 q8, q8, q9
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <16 x i8>, <16 x i8>* %cbcr
+  %tmp1 = zext <16 x i8> %tmp to <16 x i16>
+  %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %add = add <8 x i16> %tmp2, %tmp3
+  store <8 x i16> %add, <8 x i16>* %X, align 8
+  ret void
+}
+
+; Combine vuzp+vaddl->vpaddl
+; FIXME: Legalization butchers the shuffle.
+define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDL_u8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vext.8 d18, d16, d16, #1
+; CHECK-NEXT:    vbic.i16 d16, #0xff00
+; CHECK-NEXT:    vbic.i16 d18, #0xff00
+; CHECK-NEXT:    vadd.i16 d16, d18, d16
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <16 x i8>, <16 x i8>* %cbcr
+  %tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %tmp4 = zext <4 x i8> %tmp3 to <4 x i16>
+  %tmp5 = zext <4 x i8> %tmp1 to <4 x i16>
+  %add = add <4 x i16> %tmp4, %tmp5
+  store <4 x i16> %add, <4 x i16>* %X, align 8
+  ret void
+}
+
+; Matching to vpaddl.8 requires matching shuffle(zext()).
+define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDL_u8_early_zext:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmovl.u8 q8, d16
+; CHECK-NEXT:    vpadd.i16 d16, d16, d17
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <16 x i8>, <16 x i8>* %cbcr
+  %tmp1 = zext <16 x i8> %tmp to <16 x i16>
+  %tmp2 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp3 = shufflevector <16 x i16> %tmp1, <16 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %add = add <4 x i16> %tmp2, %tmp3
+  store <4 x i16> %add, <4 x i16>* %X, align 8
+  ret void
+}
+
+; Combine vuzp+vaddl->vpaddl
+define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_s16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.s16 q8, q8
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <8 x i16>, <8 x i16>* %cbcr
+  %tmp1 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp3 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %tmp4 = sext <4 x i16> %tmp3 to <4 x i32>
+  %tmp5 = sext <4 x i16> %tmp1 to <4 x i32>
+  %add = add <4 x i32> %tmp4, %tmp5
+  store <4 x i32> %add, <4 x i32>* %X, align 8
+  ret void
+}
+
+; Combine vuzp+vaddl->vpaddl
+define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_u16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.u16 q8, q8
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <8 x i16>, <8 x i16>* %cbcr
+  %tmp1 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp3 = shufflevector <8 x i16> %tmp, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+  %tmp5 = zext <4 x i16> %tmp1 to <4 x i32>
+  %add = add <4 x i32> %tmp4, %tmp5
+  store <4 x i32> %add, <4 x i32>* %X, align 8
+  ret void
+}
+
+; Combine vtrn+vaddl->vpaddl
+define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_s32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.s32 q8, q8
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <4 x i32>, <4 x i32>* %cbcr
+  %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
+  %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
+  %tmp4 = sext <2 x i32> %tmp3 to <2 x i64>
+  %tmp5 = sext <2 x i32> %tmp1 to <2 x i64>
+  %add = add <2 x i64> %tmp4, %tmp5
+  store <2 x i64> %add, <2 x i64>* %X, align 8
+  ret void
+}
+
+; Combine vtrn+vaddl->vpaddl
+define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp {
+; CHECK-LABEL: addCombineToVPADDLq_u32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.u32 q8, q8
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    mov pc, lr
+  %tmp = load <4 x i32>, <4 x i32>* %cbcr
+  %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
+  %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
+  %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+  %tmp5 = zext <2 x i32> %tmp1 to <2 x i64>
+  %add = add <2 x i64> %tmp4, %tmp5
+  store <2 x i64> %add, <2 x i64>* %X, align 8
+  ret void
+}
+
+; Legalization promotes the <4 x i8> to <4 x i16>.
+define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) {
+; CHECK-LABEL: fromExtendingExtractVectorElt_i8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vpaddl.s8 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %tmp1 = shufflevector <8 x i8> %in, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %tmp2 = shufflevector <8 x i8> %in, <8 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %x = add <4 x i8> %tmp2, %tmp1
+  ret <4 x i8> %x
+}
+
+; Legalization promotes the <2 x i16> to <2 x i32>.
+define <2 x i16> @fromExtendingExtractVectorElt_i16(<4 x i16> %in) {
+; CHECK-LABEL: fromExtendingExtractVectorElt_i16:
 ; CHECK:       @ BB#0:
 ; CHECK-NEXT:    vmov d16, r0, r1
 ; CHECK-NEXT:    vpaddl.s16 d16, d16

Modified: llvm/trunk/test/CodeGen/ARM/vtrn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vtrn.ll?rev=291693&r1=291692&r2=291693&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vtrn.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vtrn.ll Wed Jan 11 13:33:38 2017
@@ -70,14 +70,14 @@ define <2 x i32> @vtrni32(<2 x i32>* %A,
 ; CHECK-NEXT:    vldr d16, [r1]
 ; CHECK-NEXT:    vldr d17, [r0]
 ; CHECK-NEXT:    vtrn.32 d17, d16
-; CHECK-NEXT:    vadd.i32 d16, d17, d16
+; CHECK-NEXT:    vmul.i32 d16, d17, d16
 ; CHECK-NEXT:    vmov r0, r1, d16
 ; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
 	%tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
-        %tmp5 = add <2 x i32> %tmp3, %tmp4
+        %tmp5 = mul <2 x i32> %tmp3, %tmp4
 	ret <2 x i32> %tmp5
 }
 




More information about the llvm-commits mailing list