[llvm] eb1e95d - [ARM] Extend more reductions during lowering

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 19 00:58:20 PDT 2021


Author: David Green
Date: 2021-07-19T08:58:03+01:00
New Revision: eb1e95dbdf2b4ffe94259199d0970d072fe24959

URL: https://github.com/llvm/llvm-project/commit/eb1e95dbdf2b4ffe94259199d0970d072fe24959
DIFF: https://github.com/llvm/llvm-project/commit/eb1e95dbdf2b4ffe94259199d0970d072fe24959.diff

LOG: [ARM] Extend more reductions during lowering

This relaxes the VMLAV and VADDV reduction recognition code to handle
smaller than legal types, extending them as needed. That was already
handled for some reductions, this extends it to more types in a more
generic way. If a smaller than legal value is found it is extended to
the legal type as needed.

Differential Revision: https://reviews.llvm.org/D106051

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 261be2ce74025..501175bf631bd 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -16011,6 +16011,13 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
 
   // If the input vector is smaller than legal (v4i8/v4i16 for example) we can
   // extend it and use v4i32 instead.
+  auto ExtTypeMatches = [](SDValue A, ArrayRef<MVT> ExtTypes) {
+    EVT AVT = A.getValueType();
+    return any_of(ExtTypes, [&](MVT Ty) {
+      return AVT.getVectorNumElements() == Ty.getVectorNumElements() &&
+             AVT.bitsLE(Ty);
+    });
+  };
   auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) {
     EVT AVT = A.getValueType();
     if (!AVT.is128BitVector())
@@ -16024,7 +16031,7 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
     if (ResVT != RetTy || N0->getOpcode() != ExtendCode)
       return SDValue();
     SDValue A = N0->getOperand(0);
-    if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
+    if (ExtTypeMatches(A, ExtTypes))
       return ExtendIfNeeded(A, ExtendCode);
     return SDValue();
   };
@@ -16038,7 +16045,7 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
     if (Ext->getOpcode() != ExtendCode)
       return SDValue();
     SDValue A = Ext->getOperand(0);
-    if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
+    if (ExtTypeMatches(A, ExtTypes))
       return ExtendIfNeeded(A, ExtendCode);
     return SDValue();
   };
@@ -16067,9 +16074,7 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
       return false;
     A = ExtA->getOperand(0);
     B = ExtB->getOperand(0);
-    if (A.getValueType() == B.getValueType() &&
-        llvm::any_of(ExtTypes,
-                     [&A](MVT Ty) { return A.getValueType() == Ty; })) {
+    if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) {
       A = ExtendIfNeeded(A, ExtendCode);
       B = ExtendIfNeeded(B, ExtendCode);
       return true;
@@ -16101,9 +16106,7 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
       return false;
     A = ExtA->getOperand(0);
     B = ExtB->getOperand(0);
-    if (A.getValueType() == B.getValueType() &&
-        llvm::any_of(ExtTypes,
-                     [&A](MVT Ty) { return A.getValueType() == Ty; })) {
+    if (ExtTypeMatches(A, ExtTypes) && ExtTypeMatches(B, ExtTypes)) {
       A = ExtendIfNeeded(A, ExtendCode);
       B = ExtendIfNeeded(B, ExtendCode);
       return true;
@@ -16146,11 +16149,9 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
     return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A);
   if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}))
     return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A);
-  if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND,
-                          {MVT::v4i8, MVT::v4i16, MVT::v4i32}))
+  if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}))
     return Create64bitNode(ARMISD::VADDLVs, {A});
-  if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND,
-                          {MVT::v4i8, MVT::v4i16, MVT::v4i32}))
+  if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}))
     return Create64bitNode(ARMISD::VADDLVu, {A});
   if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}))
     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
@@ -16164,11 +16165,9 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
     return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask);
   if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask))
     return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask);
-  if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND,
-                              {MVT::v4i8, MVT::v4i16, MVT::v4i32}, Mask))
+  if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask))
     return Create64bitNode(ARMISD::VADDLVps, {A, Mask});
-  if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND,
-                              {MVT::v4i8, MVT::v4i16, MVT::v4i32}, Mask))
+  if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask))
     return Create64bitNode(ARMISD::VADDLVpu, {A, Mask});
   if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask))
     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
@@ -16182,11 +16181,11 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
     return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B);
   if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
     return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B);
-  if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND,
-              {MVT::v16i8, MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, B))
+  if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32},
+              A, B))
     return Create64bitNode(ARMISD::VMLALVs, {A, B});
-  if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND,
-              {MVT::v16i8, MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, B))
+  if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v16i8, MVT::v8i16, MVT::v4i32},
+              A, B))
     return Create64bitNode(ARMISD::VMLALVu, {A, B});
   if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B))
     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
@@ -16195,17 +16194,17 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,
                        DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B));
 
-  if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask))
+  if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B,
+                  Mask))
     return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask);
-  if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask))
+  if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B,
+                  Mask))
     return DAG.getNode(ARMISD::VMLAVpu, dl, ResVT, A, B, Mask);
-  if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND,
-                  {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A,
-                  B, Mask))
+  if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B,
+                  Mask))
     return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask});
-  if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND,
-                  {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A,
-                  B, Mask))
+  if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B,
+                  Mask))
     return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask});
   if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask))
     return DAG.getNode(ISD::TRUNCATE, dl, ResVT,

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
index d5900e12a1adc..83d7275358ce3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
@@ -292,24 +292,8 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_sext(<8 x i8> %x) {
 ; CHECK-LABEL: add_v8i8_v8i32_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u16 r0, q0[6]
-; CHECK-NEXT:    vmov.u16 r1, q0[4]
-; CHECK-NEXT:    vmov q1[2], q1[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[7]
-; CHECK-NEXT:    vmov.u16 r1, q0[5]
-; CHECK-NEXT:    vmov q1[3], q1[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[2]
-; CHECK-NEXT:    vmov.u16 r1, q0[0]
-; CHECK-NEXT:    vmovlb.s8 q1, q1
-; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[3]
-; CHECK-NEXT:    vmov.u16 r1, q0[1]
-; CHECK-NEXT:    vmovlb.s16 q1, q1
-; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
-; CHECK-NEXT:    vmovlb.s8 q0, q2
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vaddv.s16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = sext <8 x i8> %x to <8 x i32>
@@ -1032,24 +1016,8 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_acc_sext(<8 x i8> %x, i32 %a) {
 ; CHECK-LABEL: add_v8i8_v8i32_acc_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u16 r1, q0[6]
-; CHECK-NEXT:    vmov.u16 r2, q0[4]
-; CHECK-NEXT:    vmov q1[2], q1[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[7]
-; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    vmov q1[3], q1[1], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[2]
-; CHECK-NEXT:    vmov.u16 r2, q0[0]
-; CHECK-NEXT:    vmovlb.s8 q1, q1
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[3]
-; CHECK-NEXT:    vmov.u16 r2, q0[1]
-; CHECK-NEXT:    vmovlb.s16 q1, q1
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
-; CHECK-NEXT:    vmovlb.s8 q0, q2
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vaddva.s16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = sext <8 x i8> %x to <8 x i32>

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
index d49b41ec31529..0946b0103e345 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
@@ -524,37 +524,10 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_zext(<8 x i8> %x, <8 x i8> %b) {
 ; CHECK-LABEL: add_v8i8_v8i32_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, #16
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vstrw.32 q0, [r0]
-; CHECK-NEXT:    vmovlb.u8 q0, q1
-; CHECK-NEXT:    vcmp.i16 eq, q0, zr
-; CHECK-NEXT:    vmov.i8 q0, #0x0
-; CHECK-NEXT:    vmov.i8 q1, #0xff
-; CHECK-NEXT:    vldrh.u32 q2, [r0]
-; CHECK-NEXT:    vpsel q0, q1, q0
-; CHECK-NEXT:    vmov.u16 r1, q0[2]
-; CHECK-NEXT:    vmov.u16 r2, q0[0]
-; CHECK-NEXT:    vmov q1[2], q1[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[3]
-; CHECK-NEXT:    vmov.u16 r2, q0[1]
-; CHECK-NEXT:    vmov q1[3], q1[1], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[6]
-; CHECK-NEXT:    vcmp.i32 ne, q1, zr
-; CHECK-NEXT:    vmov.i32 q1, #0x0
-; CHECK-NEXT:    vmov.u16 r2, q0[4]
-; CHECK-NEXT:    vpsel q1, q2, q1
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[7]
-; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    vldrh.u32 q0, [r0, #8]
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
-; CHECK-NEXT:    vpt.i32 ne, q2, zr
-; CHECK-NEXT:    vaddt.i32 q1, q1, q0
-; CHECK-NEXT:    vaddv.u32 r0, q1
-; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vpt.i16 eq, q1, zr
+; CHECK-NEXT:    vaddvt.u16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i8> %b, zeroinitializer
@@ -567,45 +540,10 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_sext(<8 x i8> %x, <8 x i8> %b) {
 ; CHECK-LABEL: add_v8i8_v8i32_sext:
 ; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q0, q0
 ; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    vmov.u16 r0, q0[2]
-; CHECK-NEXT:    vmov.u16 r1, q0[0]
-; CHECK-NEXT:    vcmp.i16 eq, q1, zr
-; CHECK-NEXT:    vmov.i8 q1, #0x0
-; CHECK-NEXT:    vmov.i8 q3, #0xff
-; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[3]
-; CHECK-NEXT:    vmov.u16 r1, q0[1]
-; CHECK-NEXT:    vpsel q1, q3, q1
-; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[2]
-; CHECK-NEXT:    vmov.u16 r1, q1[0]
-; CHECK-NEXT:    vmovlb.s8 q2, q2
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[3]
-; CHECK-NEXT:    vmov.u16 r1, q1[1]
-; CHECK-NEXT:    vmovlb.s16 q2, q2
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[6]
-; CHECK-NEXT:    vcmp.i32 ne, q3, zr
-; CHECK-NEXT:    vmov.i32 q3, #0x0
-; CHECK-NEXT:    vmov.u16 r1, q0[4]
-; CHECK-NEXT:    vpsel q2, q2, q3
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[7]
-; CHECK-NEXT:    vmov.u16 r1, q0[5]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vmov.u16 r1, q1[4]
-; CHECK-NEXT:    vmovlb.s8 q0, q3
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[7]
-; CHECK-NEXT:    vmov.u16 r1, q1[5]
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vpt.i32 ne, q3, zr
-; CHECK-NEXT:    vaddt.i32 q2, q2, q0
-; CHECK-NEXT:    vaddv.u32 r0, q2
+; CHECK-NEXT:    vpt.i16 eq, q1, zr
+; CHECK-NEXT:    vaddvt.s16 r0, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i8> %b, zeroinitializer

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
index ba5cf0881ce34..9c0c33bfa40fa 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
@@ -169,73 +169,8 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v8i8i16_v8i64_zext(<8 x i16> %x, <8 x i8> %y) {
 ; CHECK-LABEL: add_v8i8i16_v8i64_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    vmov.u16 r0, q0[1]
-; CHECK-NEXT:    vmov.u16 r1, q0[0]
-; CHECK-NEXT:    vmov.u16 r2, q1[0]
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r1, q1[1]
-; CHECK-NEXT:    vmov.i64 q2, #0xffff
-; CHECK-NEXT:    vmov q4[2], q4[0], r2, r1
-; CHECK-NEXT:    vand q3, q3, q2
-; CHECK-NEXT:    vand q4, q4, q2
-; CHECK-NEXT:    vmov r0, s14
-; CHECK-NEXT:    vmov r1, s18
-; CHECK-NEXT:    vmov r2, s12
-; CHECK-NEXT:    vmov r3, s16
-; CHECK-NEXT:    umull r0, r1, r0, r1
-; CHECK-NEXT:    umlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r2, q0[3]
-; CHECK-NEXT:    vmov.u16 r3, q0[2]
-; CHECK-NEXT:    vmov q3[2], q3[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r3, q1[3]
-; CHECK-NEXT:    vmov.u16 r2, q1[2]
-; CHECK-NEXT:    vand q3, q3, q2
-; CHECK-NEXT:    vmov q4[2], q4[0], r2, r3
-; CHECK-NEXT:    vmov r12, s12
-; CHECK-NEXT:    vand q4, q4, q2
-; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vmov r3, s18
-; CHECK-NEXT:    umlal r0, r1, r12, r2
-; CHECK-NEXT:    vmov r2, s14
-; CHECK-NEXT:    umull r2, r3, r2, r3
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov.u16 r3, q0[4]
-; CHECK-NEXT:    vmov q3[2], q3[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r3, q1[5]
-; CHECK-NEXT:    vmov.u16 r2, q1[4]
-; CHECK-NEXT:    vand q3, q3, q2
-; CHECK-NEXT:    vmov q4[2], q4[0], r2, r3
-; CHECK-NEXT:    vmov r12, s12
-; CHECK-NEXT:    vand q4, q4, q2
-; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vmov r3, s18
-; CHECK-NEXT:    umlal r0, r1, r12, r2
-; CHECK-NEXT:    vmov r2, s14
-; CHECK-NEXT:    umull r2, r3, r2, r3
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    vmov.u16 r2, q0[7]
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov.u16 r3, q0[6]
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r3, q1[7]
-; CHECK-NEXT:    vmov.u16 r2, q1[6]
-; CHECK-NEXT:    vand q0, q0, q2
-; CHECK-NEXT:    vmov q1[2], q1[0], r2, r3
-; CHECK-NEXT:    vmov r12, s0
-; CHECK-NEXT:    vand q1, q1, q2
-; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vmov r3, s6
-; CHECK-NEXT:    umlal r0, r1, r12, r2
-; CHECK-NEXT:    vmov r2, s2
-; CHECK-NEXT:    umull r2, r3, r2, r3
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmlalv.u16 r0, r1, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = zext <8 x i16> %x to <8 x i64>
@@ -248,38 +183,8 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v8i8i16_v8i64_sext(<8 x i16> %x, <8 x i8> %y) {
 ; CHECK-LABEL: add_v8i8i16_v8i64_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u16 r1, q1[1]
-; CHECK-NEXT:    vmov.s16 r0, q0[1]
-; CHECK-NEXT:    sxtb r1, r1
-; CHECK-NEXT:    vmov.u16 r3, q1[0]
-; CHECK-NEXT:    smull r0, r1, r0, r1
-; CHECK-NEXT:    vmov.s16 r2, q0[0]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[2]
-; CHECK-NEXT:    vmov.s16 r2, q0[2]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[3]
-; CHECK-NEXT:    vmov.s16 r2, q0[3]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[4]
-; CHECK-NEXT:    vmov.s16 r2, q0[4]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[5]
-; CHECK-NEXT:    vmov.s16 r2, q0[5]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[6]
-; CHECK-NEXT:    vmov.s16 r2, q0[6]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[7]
-; CHECK-NEXT:    vmov.s16 r2, q0[7]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r2, r3
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmlalv.s16 r0, r1, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = sext <8 x i16> %x to <8 x i64>
@@ -432,23 +337,9 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_zext(<8 x i8> %x, <8 x i8> %y) {
 ; CHECK-LABEL: add_v8i8_v8i32_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #32
-; CHECK-NEXT:    sub sp, #32
 ; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    add r0, sp, #16
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    mov r1, sp
-; CHECK-NEXT:    vstrw.32 q1, [r0]
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vldrh.u32 q0, [r0, #8]
-; CHECK-NEXT:    vldrh.u32 q1, [r1, #8]
-; CHECK-NEXT:    vldrh.u32 q2, [r1]
-; CHECK-NEXT:    vmul.i32 q0, q1, q0
-; CHECK-NEXT:    vldrh.u32 q1, [r0]
-; CHECK-NEXT:    vmul.i32 q1, q2, q1
-; CHECK-NEXT:    vadd.i32 q0, q1, q0
-; CHECK-NEXT:    vaddv.u32 r0, q0
-; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vmlav.u16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = zext <8 x i8> %x to <8 x i32>
@@ -461,42 +352,9 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_sext(<8 x i8> %x, <8 x i8> %y) {
 ; CHECK-LABEL: add_v8i8_v8i32_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vmov.u16 r1, q1[4]
-; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[7]
-; CHECK-NEXT:    vmov.u16 r1, q1[5]
-; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[6]
-; CHECK-NEXT:    vmov.u16 r1, q0[4]
-; CHECK-NEXT:    vmovlb.s8 q2, q2
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[7]
-; CHECK-NEXT:    vmov.u16 r1, q0[5]
-; CHECK-NEXT:    vmovlb.s16 q2, q2
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[2]
-; CHECK-NEXT:    vmovlb.s8 q3, q3
-; CHECK-NEXT:    vmov.u16 r1, q1[0]
-; CHECK-NEXT:    vmovlb.s16 q3, q3
-; CHECK-NEXT:    vmul.i32 q2, q3, q2
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[3]
-; CHECK-NEXT:    vmov.u16 r1, q1[1]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[2]
-; CHECK-NEXT:    vmov.u16 r1, q0[0]
-; CHECK-NEXT:    vmovlb.s8 q1, q3
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[3]
-; CHECK-NEXT:    vmov.u16 r1, q0[1]
-; CHECK-NEXT:    vmovlb.s16 q1, q1
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmovlb.s8 q0, q3
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vmul.i32 q0, q0, q1
-; CHECK-NEXT:    vadd.i32 q0, q0, q2
-; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmlav.s16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = sext <8 x i8> %x to <8 x i32>
@@ -509,22 +367,8 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8i16_v8i32_zext(<8 x i8> %x, <8 x i16> %y) {
 ; CHECK-LABEL: add_v8i8i16_v8i32_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #32
-; CHECK-NEXT:    sub sp, #32
-; CHECK-NEXT:    mov r0, sp
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    add r1, sp, #16
-; CHECK-NEXT:    vstrw.32 q1, [r0]
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vldrh.u32 q0, [r0, #8]
-; CHECK-NEXT:    vldrh.u32 q1, [r1, #8]
-; CHECK-NEXT:    vldrh.u32 q2, [r1]
-; CHECK-NEXT:    vmul.i32 q0, q1, q0
-; CHECK-NEXT:    vldrh.u32 q1, [r0]
-; CHECK-NEXT:    vmul.i32 q1, q2, q1
-; CHECK-NEXT:    vadd.i32 q0, q1, q0
-; CHECK-NEXT:    vaddv.u32 r0, q0
-; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vmlav.u16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = zext <8 x i8> %x to <8 x i32>
@@ -537,33 +381,8 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8i16_v8i32_sext(<8 x i8> %x, <8 x i16> %y) {
 ; CHECK-LABEL: add_v8i8i16_v8i32_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, #16
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vmov.u16 r1, q0[6]
-; CHECK-NEXT:    vmov.u16 r2, q0[4]
-; CHECK-NEXT:    vstrw.32 q1, [r0]
-; CHECK-NEXT:    vmov q1[2], q1[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[7]
-; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    vldrh.s32 q2, [r0, #8]
-; CHECK-NEXT:    vmov q1[3], q1[1], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[2]
-; CHECK-NEXT:    vmovlb.s8 q1, q1
-; CHECK-NEXT:    vmov.u16 r2, q0[0]
-; CHECK-NEXT:    vmovlb.s16 q1, q1
-; CHECK-NEXT:    vmul.i32 q1, q1, q2
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[3]
-; CHECK-NEXT:    vmov.u16 r2, q0[1]
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
-; CHECK-NEXT:    vmovlb.s8 q0, q2
-; CHECK-NEXT:    vldrh.s32 q2, [r0]
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vmul.i32 q0, q0, q2
-; CHECK-NEXT:    vadd.i32 q0, q0, q1
-; CHECK-NEXT:    vaddv.u32 r0, q0
-; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmlav.s16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = sext <8 x i8> %x to <8 x i32>
@@ -924,32 +743,10 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v4i8i16_v4i64_zext(<4 x i8> %x, <4 x i16> %y) {
 ; CHECK-LABEL: add_v4i8i16_v4i64_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vmov.i32 q2, #0xff
 ; CHECK-NEXT:    vmovlb.u16 q1, q1
 ; CHECK-NEXT:    vand q0, q0, q2
-; CHECK-NEXT:    vmov.f32 s12, s4
-; CHECK-NEXT:    vmov.f32 s8, s0
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov.f32 s14, s5
-; CHECK-NEXT:    vmullb.u32 q4, q2, q3
-; CHECK-NEXT:    vmov.f32 s8, s2
-; CHECK-NEXT:    vmov r0, r1, d9
-; CHECK-NEXT:    vmov r2, r3, d8
-; CHECK-NEXT:    vmov.f32 s10, s3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s7
-; CHECK-NEXT:    vmullb.u32 q1, q2, q0
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov r2, r3, d2
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmlalv.u32 r0, r1, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = zext <4 x i8> %x to <4 x i64>
@@ -962,34 +759,10 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v4i8i16_v4i64_sext(<4 x i8> %x, <4 x i16> %y) {
 ; CHECK-LABEL: add_v4i8i16_v4i64_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s10, s5
-; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov r0, s10
-; CHECK-NEXT:    vmov.f32 s8, s0
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov r1, s10
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    sxth r2, r2
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    sxth r0, r0
-; CHECK-NEXT:    sxtb r1, r1
-; CHECK-NEXT:    smull r0, r1, r1, r0
-; CHECK-NEXT:    smlal r0, r1, r3, r2
-; CHECK-NEXT:    vmov r2, s8
-; CHECK-NEXT:    vmov r3, s4
-; CHECK-NEXT:    sxth r2, r2
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r3, r2
-; CHECK-NEXT:    vmov r2, s10
-; CHECK-NEXT:    vmov r3, s6
-; CHECK-NEXT:    sxth r2, r2
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smlal r0, r1, r3, r2
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmlalv.s32 r0, r1, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %xx = sext <4 x i8> %x to <4 x i64>

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
index dc42ecdf9bb81..463ed0e60bf2e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
@@ -231,148 +231,10 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v8i8i16_v8i64_zext(<8 x i16> %x, <8 x i8> %y, <8 x i16> %b) {
 ; CHECK-LABEL: add_v8i8i16_v8i64_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    vmov.u16 r0, q0[1]
-; CHECK-NEXT:    vmov.u16 r1, q0[0]
-; CHECK-NEXT:    vmov.u16 r2, q1[0]
-; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r1, q1[1]
-; CHECK-NEXT:    vmov.i64 q3, #0xffff
-; CHECK-NEXT:    vmov q5[2], q5[0], r2, r1
-; CHECK-NEXT:    vand q4, q4, q3
-; CHECK-NEXT:    vand q5, q5, q3
-; CHECK-NEXT:    vmov r0, s18
-; CHECK-NEXT:    vmov.i8 q6, #0xff
-; CHECK-NEXT:    vmov r1, s22
-; CHECK-NEXT:    vcmp.i16 eq, q2, zr
-; CHECK-NEXT:    vmov r3, s20
-; CHECK-NEXT:    vmov.i8 q5, #0x0
-; CHECK-NEXT:    vmov r2, s16
-; CHECK-NEXT:    vpsel q2, q6, q5
-; CHECK-NEXT:    umull r0, r1, r0, r1
-; CHECK-NEXT:    umull r2, r3, r2, r3
-; CHECK-NEXT:    vmov q4[2], q4[0], r2, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[2]
-; CHECK-NEXT:    vmov q4[3], q4[1], r3, r1
-; CHECK-NEXT:    vmov.u16 r1, q2[0]
-; CHECK-NEXT:    vmov q5[2], q5[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[3]
-; CHECK-NEXT:    vmov.u16 r1, q2[1]
-; CHECK-NEXT:    vmov q5[3], q5[1], r1, r0
-; CHECK-NEXT:    vcmp.i32 ne, q5, zr
-; CHECK-NEXT:    vmrs r0, p0
-; CHECK-NEXT:    and r2, r0, #1
-; CHECK-NEXT:    ubfx r1, r0, #4, #1
-; CHECK-NEXT:    rsbs r2, r2, #0
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    vmov q5[2], q5[0], r2, r1
-; CHECK-NEXT:    vmov q5[3], q5[1], r2, r1
-; CHECK-NEXT:    vand q4, q4, q5
-; CHECK-NEXT:    vmov r1, r12, d9
-; CHECK-NEXT:    vmov r3, r2, d8
-; CHECK-NEXT:    adds.w lr, r3, r1
-; CHECK-NEXT:    vmov.u16 r3, q0[3]
-; CHECK-NEXT:    vmov.u16 r1, q0[2]
-; CHECK-NEXT:    adc.w r12, r12, r2
-; CHECK-NEXT:    vmov q4[2], q4[0], r1, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[3]
-; CHECK-NEXT:    vmov.u16 r2, q1[2]
-; CHECK-NEXT:    vand q4, q4, q3
-; CHECK-NEXT:    vmov q5[2], q5[0], r2, r3
-; CHECK-NEXT:    vmov r1, s18
-; CHECK-NEXT:    vand q5, q5, q3
-; CHECK-NEXT:    vmov r3, s16
-; CHECK-NEXT:    vmov r2, s22
-; CHECK-NEXT:    vmov r4, s20
-; CHECK-NEXT:    umull r1, r2, r1, r2
-; CHECK-NEXT:    umull r3, r4, r3, r4
-; CHECK-NEXT:    vmov q4[2], q4[0], r3, r1
-; CHECK-NEXT:    ubfx r1, r0, #12, #1
-; CHECK-NEXT:    ubfx r0, r0, #8, #1
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    rsbs r0, r0, #0
-; CHECK-NEXT:    vmov q4[3], q4[1], r4, r2
-; CHECK-NEXT:    vmov q5[2], q5[0], r0, r1
-; CHECK-NEXT:    vmov.u16 r4, q1[4]
-; CHECK-NEXT:    vmov q5[3], q5[1], r0, r1
-; CHECK-NEXT:    vand q4, q4, q5
-; CHECK-NEXT:    vmov r0, r1, d8
-; CHECK-NEXT:    vmov r2, r3, d9
-; CHECK-NEXT:    adds.w r0, r0, lr
-; CHECK-NEXT:    adc.w r1, r1, r12
-; CHECK-NEXT:    adds.w r12, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    vmov.u16 r3, q0[4]
-; CHECK-NEXT:    vmov q4[2], q4[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r3, q1[5]
-; CHECK-NEXT:    vmov q5[2], q5[0], r4, r3
-; CHECK-NEXT:    vand q4, q4, q3
-; CHECK-NEXT:    vand q5, q5, q3
-; CHECK-NEXT:    vmov r2, s18
-; CHECK-NEXT:    vmov r3, s22
-; CHECK-NEXT:    vmov r4, s16
-; CHECK-NEXT:    vmov r0, s20
-; CHECK-NEXT:    umull r2, r3, r2, r3
-; CHECK-NEXT:    umull r0, r4, r4, r0
-; CHECK-NEXT:    vmov q4[2], q4[0], r0, r2
-; CHECK-NEXT:    vmov.u16 r0, q2[6]
-; CHECK-NEXT:    vmov.u16 r2, q2[4]
-; CHECK-NEXT:    vmov q4[3], q4[1], r4, r3
-; CHECK-NEXT:    vmov q5[2], q5[0], r2, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[7]
-; CHECK-NEXT:    vmov.u16 r2, q2[5]
-; CHECK-NEXT:    vmov q5[3], q5[1], r2, r0
-; CHECK-NEXT:    vcmp.i32 ne, q5, zr
-; CHECK-NEXT:    vmrs r2, p0
-; CHECK-NEXT:    and r3, r2, #1
-; CHECK-NEXT:    ubfx r0, r2, #4, #1
-; CHECK-NEXT:    rsbs r3, r3, #0
-; CHECK-NEXT:    rsbs r0, r0, #0
-; CHECK-NEXT:    vmov q2[2], q2[0], r3, r0
-; CHECK-NEXT:    vmov q2[3], q2[1], r3, r0
-; CHECK-NEXT:    vand q2, q4, q2
-; CHECK-NEXT:    vmov r0, r3, d4
-; CHECK-NEXT:    adds.w r0, r0, r12
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vmov r3, r4, d5
-; CHECK-NEXT:    adds.w r12, r0, r3
-; CHECK-NEXT:    vmov.u16 r3, q0[7]
-; CHECK-NEXT:    adc.w lr, r1, r4
-; CHECK-NEXT:    vmov.u16 r4, q0[6]
-; CHECK-NEXT:    vmov q0[2], q0[0], r4, r3
-; CHECK-NEXT:    vmov.u16 r4, q1[7]
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vand q0, q0, q3
-; CHECK-NEXT:    vmov q1[2], q1[0], r0, r4
-; CHECK-NEXT:    vmov r3, s2
-; CHECK-NEXT:    vand q1, q1, q3
-; CHECK-NEXT:    vmov r4, s0
-; CHECK-NEXT:    vmov r0, s6
-; CHECK-NEXT:    vmov r1, s4
-; CHECK-NEXT:    umull r0, r3, r3, r0
-; CHECK-NEXT:    umull r1, r4, r4, r1
-; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT:    ubfx r0, r2, #12, #1
-; CHECK-NEXT:    ubfx r1, r2, #8, #1
-; CHECK-NEXT:    rsbs r0, r0, #0
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    vmov q0[3], q0[1], r4, r3
-; CHECK-NEXT:    vmov q1[2], q1[0], r1, r0
-; CHECK-NEXT:    vmov q1[3], q1[1], r1, r0
-; CHECK-NEXT:    vand q0, q0, q1
-; CHECK-NEXT:    vmov r0, r1, d0
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    adds.w r0, r0, r12
-; CHECK-NEXT:    adc.w r1, r1, lr
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    vpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmlalvt.u16 r0, r1, q0, q1
+; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i16> %b, zeroinitializer
   %xx = zext <8 x i16> %x to <8 x i64>
@@ -386,122 +248,10 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v8i8i16_v8i64_sext(<8 x i16> %x, <8 x i8> %y, <8 x i16> %b) {
 ; CHECK-LABEL: add_v8i8i16_v8i64_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vmov.i8 q3, #0x0
-; CHECK-NEXT:    vmov.i8 q4, #0xff
-; CHECK-NEXT:    vcmp.i16 eq, q2, zr
-; CHECK-NEXT:    vmov.s16 r3, q0[0]
-; CHECK-NEXT:    vpsel q2, q4, q3
-; CHECK-NEXT:    vmov.u16 r4, q1[4]
-; CHECK-NEXT:    vmov.u16 r0, q2[2]
-; CHECK-NEXT:    vmov.u16 r1, q2[0]
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[3]
-; CHECK-NEXT:    vmov.u16 r1, q2[1]
-; CHECK-NEXT:    sxtb r4, r4
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vcmp.i32 ne, q3, zr
-; CHECK-NEXT:    vmrs r0, p0
-; CHECK-NEXT:    and r2, r0, #1
-; CHECK-NEXT:    ubfx r1, r0, #4, #1
-; CHECK-NEXT:    rsbs r2, r2, #0
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    vmov q3[2], q3[0], r2, r1
-; CHECK-NEXT:    vmov q3[3], q3[1], r2, r1
-; CHECK-NEXT:    vmov.u16 r2, q1[1]
-; CHECK-NEXT:    vmov.s16 r1, q0[1]
-; CHECK-NEXT:    sxtb r2, r2
-; CHECK-NEXT:    smull r1, r12, r1, r2
-; CHECK-NEXT:    vmov.u16 r2, q1[0]
-; CHECK-NEXT:    sxtb r2, r2
-; CHECK-NEXT:    smull r2, r3, r3, r2
-; CHECK-NEXT:    vmov q4[2], q4[0], r2, r1
-; CHECK-NEXT:    vmov q4[3], q4[1], r3, r12
-; CHECK-NEXT:    vand q3, q4, q3
-; CHECK-NEXT:    vmov r1, r12, d7
-; CHECK-NEXT:    vmov r3, r2, d6
-; CHECK-NEXT:    adds.w lr, r3, r1
-; CHECK-NEXT:    ubfx r3, r0, #12, #1
-; CHECK-NEXT:    ubfx r0, r0, #8, #1
-; CHECK-NEXT:    rsb.w r3, r3, #0
-; CHECK-NEXT:    rsb.w r0, r0, #0
-; CHECK-NEXT:    adc.w r12, r12, r2
-; CHECK-NEXT:    vmov q3[2], q3[0], r0, r3
-; CHECK-NEXT:    vmov.u16 r2, q1[2]
-; CHECK-NEXT:    vmov q3[3], q3[1], r0, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[3]
-; CHECK-NEXT:    vmov.s16 r0, q0[3]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    vmov.s16 r1, q0[2]
-; CHECK-NEXT:    sxtb r2, r2
-; CHECK-NEXT:    smull r0, r3, r0, r3
-; CHECK-NEXT:    smull r1, r2, r1, r2
-; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
-; CHECK-NEXT:    vmov q4[3], q4[1], r2, r3
-; CHECK-NEXT:    vand q3, q4, q3
-; CHECK-NEXT:    vmov r0, r1, d6
-; CHECK-NEXT:    vmov r2, r3, d7
-; CHECK-NEXT:    adds.w r0, r0, lr
-; CHECK-NEXT:    adc.w r1, r1, r12
-; CHECK-NEXT:    adds.w r12, r0, r2
-; CHECK-NEXT:    adc.w lr, r1, r3
-; CHECK-NEXT:    vmov.u16 r2, q2[6]
-; CHECK-NEXT:    vmov.u16 r3, q2[4]
-; CHECK-NEXT:    vmov.s16 r1, q0[4]
-; CHECK-NEXT:    vmov q3[2], q3[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r2, q2[7]
-; CHECK-NEXT:    vmov.u16 r3, q2[5]
-; CHECK-NEXT:    smull r1, r4, r1, r4
-; CHECK-NEXT:    vmov q3[3], q3[1], r3, r2
-; CHECK-NEXT:    vcmp.i32 ne, q3, zr
-; CHECK-NEXT:    vmrs r2, p0
-; CHECK-NEXT:    and r0, r2, #1
-; CHECK-NEXT:    ubfx r3, r2, #4, #1
-; CHECK-NEXT:    rsbs r0, r0, #0
-; CHECK-NEXT:    rsbs r3, r3, #0
-; CHECK-NEXT:    vmov q2[2], q2[0], r0, r3
-; CHECK-NEXT:    vmov q2[3], q2[1], r0, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[5]
-; CHECK-NEXT:    vmov.s16 r0, q0[5]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smull r0, r3, r0, r3
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov q3[3], q3[1], r4, r3
-; CHECK-NEXT:    vand q2, q3, q2
-; CHECK-NEXT:    vmov r0, r1, d4
-; CHECK-NEXT:    vmov r3, r4, d5
-; CHECK-NEXT:    adds.w r0, r0, r12
-; CHECK-NEXT:    adc.w r1, r1, lr
-; CHECK-NEXT:    adds.w r12, r0, r3
-; CHECK-NEXT:    ubfx r3, r2, #12, #1
-; CHECK-NEXT:    ubfx r2, r2, #8, #1
-; CHECK-NEXT:    rsb.w r3, r3, #0
-; CHECK-NEXT:    rsb.w r2, r2, #0
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r3
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r3
-; CHECK-NEXT:    vmov.u16 r3, q1[7]
-; CHECK-NEXT:    adcs r1, r4
-; CHECK-NEXT:    vmov.s16 r2, q0[7]
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    vmov.s16 r4, q0[6]
-; CHECK-NEXT:    sxtb r0, r0
-; CHECK-NEXT:    smull r2, r3, r2, r3
-; CHECK-NEXT:    smull r0, r4, r4, r0
-; CHECK-NEXT:    vmov q0[2], q0[0], r0, r2
-; CHECK-NEXT:    vmov q0[3], q0[1], r4, r3
-; CHECK-NEXT:    vand q0, q0, q2
-; CHECK-NEXT:    vmov r0, r2, d0
-; CHECK-NEXT:    adds.w r0, r0, r12
-; CHECK-NEXT:    adcs r1, r2
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmlalvt.s16 r0, r1, q0, q1
+; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i16> %b, zeroinitializer
   %xx = sext <8 x i16> %x to <8 x i64>
@@ -773,44 +523,11 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_zext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b) {
 ; CHECK-LABEL: add_v8i8_v8i32_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #32
-; CHECK-NEXT:    sub sp, #32
 ; CHECK-NEXT:    vmovlb.u8 q1, q1
-; CHECK-NEXT:    add r0, sp, #16
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    mov r1, sp
-; CHECK-NEXT:    vstrw.32 q1, [r0]
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vmovlb.u8 q0, q2
-; CHECK-NEXT:    vmov.i8 q1, #0xff
-; CHECK-NEXT:    vcmp.i16 eq, q0, zr
-; CHECK-NEXT:    vmov.i8 q0, #0x0
-; CHECK-NEXT:    vpsel q0, q1, q0
-; CHECK-NEXT:    vldrh.u32 q2, [r0]
-; CHECK-NEXT:    vmov.u16 r2, q0[2]
-; CHECK-NEXT:    vmov.u16 r3, q0[0]
-; CHECK-NEXT:    vmov q1[2], q1[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r2, q0[3]
-; CHECK-NEXT:    vmov.u16 r3, q0[1]
-; CHECK-NEXT:    vldrh.u32 q3, [r1]
-; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
-; CHECK-NEXT:    vcmp.i32 ne, q1, zr
-; CHECK-NEXT:    vmov.i32 q1, #0x0
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vmult.i32 q1, q3, q2
-; CHECK-NEXT:    vldrh.u32 q2, [r0, #8]
-; CHECK-NEXT:    vldrh.u32 q3, [r1, #8]
-; CHECK-NEXT:    vmov.u16 r0, q0[6]
-; CHECK-NEXT:    vmov.u16 r1, q0[4]
-; CHECK-NEXT:    vmul.i32 q2, q3, q2
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[7]
-; CHECK-NEXT:    vmov.u16 r1, q0[5]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vpt.i32 ne, q3, zr
-; CHECK-NEXT:    vaddt.i32 q1, q1, q2
-; CHECK-NEXT:    vaddv.u32 r0, q1
-; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vmovlb.u8 q2, q2
+; CHECK-NEXT:    vpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmlavt.u16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i8> %b, zeroinitializer
@@ -825,66 +542,11 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8_v8i32_sext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b) {
 ; CHECK-LABEL: add_v8i8_v8i32_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vmov.u16 r0, q1[2]
-; CHECK-NEXT:    vmov.u16 r1, q1[0]
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[3]
-; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
 ; CHECK-NEXT:    vmovlb.u8 q2, q2
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[2]
-; CHECK-NEXT:    vmovlb.s8 q3, q3
-; CHECK-NEXT:    vmov.u16 r1, q0[0]
-; CHECK-NEXT:    vmovlb.s16 q4, q3
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[3]
-; CHECK-NEXT:    vmov.u16 r1, q0[1]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vcmp.i16 eq, q2, zr
-; CHECK-NEXT:    vmovlb.s8 q3, q3
-; CHECK-NEXT:    vmov.i8 q2, #0x0
-; CHECK-NEXT:    vmovlb.s16 q5, q3
-; CHECK-NEXT:    vmov.i8 q3, #0xff
-; CHECK-NEXT:    vpsel q2, q3, q2
-; CHECK-NEXT:    vmov.u16 r0, q2[2]
-; CHECK-NEXT:    vmov.u16 r1, q2[0]
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[3]
-; CHECK-NEXT:    vmov.u16 r1, q2[1]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vcmp.i32 ne, q3, zr
-; CHECK-NEXT:    vmov.i32 q3, #0x0
-; CHECK-NEXT:    vmov.u16 r1, q1[4]
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vmult.i32 q3, q5, q4
-; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[7]
-; CHECK-NEXT:    vmov.u16 r1, q1[5]
-; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[6]
-; CHECK-NEXT:    vmov.u16 r1, q0[4]
-; CHECK-NEXT:    vmovlb.s8 q1, q4
-; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[7]
-; CHECK-NEXT:    vmov.u16 r1, q0[5]
-; CHECK-NEXT:    vmovlb.s16 q1, q1
-; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[6]
-; CHECK-NEXT:    vmovlb.s8 q0, q4
-; CHECK-NEXT:    vmov.u16 r1, q2[4]
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vmul.i32 q0, q0, q1
-; CHECK-NEXT:    vmov q1[2], q1[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[7]
-; CHECK-NEXT:    vmov.u16 r1, q2[5]
-; CHECK-NEXT:    vmov q1[3], q1[1], r1, r0
-; CHECK-NEXT:    vpt.i32 ne, q1, zr
-; CHECK-NEXT:    vaddt.i32 q3, q3, q0
-; CHECK-NEXT:    vaddv.u32 r0, q3
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    vpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmlavt.s16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i8> %b, zeroinitializer
@@ -899,43 +561,10 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8i16_v8i32_zext(<8 x i8> %x, <8 x i16> %y, <8 x i8> %b) {
 ; CHECK-LABEL: add_v8i8i16_v8i32_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .pad #32
-; CHECK-NEXT:    sub sp, #32
-; CHECK-NEXT:    mov r0, sp
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
-; CHECK-NEXT:    add r1, sp, #16
-; CHECK-NEXT:    vstrw.32 q1, [r0]
-; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vmovlb.u8 q0, q2
-; CHECK-NEXT:    vcmp.i16 eq, q0, zr
-; CHECK-NEXT:    vmov.i8 q0, #0x0
-; CHECK-NEXT:    vmov.i8 q1, #0xff
-; CHECK-NEXT:    vldrh.u32 q2, [r0]
-; CHECK-NEXT:    vpsel q0, q1, q0
-; CHECK-NEXT:    vldrh.u32 q3, [r1]
-; CHECK-NEXT:    vmov.u16 r2, q0[2]
-; CHECK-NEXT:    vmov.u16 r3, q0[0]
-; CHECK-NEXT:    vmov q1[2], q1[0], r3, r2
-; CHECK-NEXT:    vmov.u16 r2, q0[3]
-; CHECK-NEXT:    vmov.u16 r3, q0[1]
-; CHECK-NEXT:    vmov q1[3], q1[1], r3, r2
-; CHECK-NEXT:    vcmp.i32 ne, q1, zr
-; CHECK-NEXT:    vmov.i32 q1, #0x0
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vmult.i32 q1, q3, q2
-; CHECK-NEXT:    vldrh.u32 q2, [r0, #8]
-; CHECK-NEXT:    vldrh.u32 q3, [r1, #8]
-; CHECK-NEXT:    vmov.u16 r0, q0[6]
-; CHECK-NEXT:    vmov.u16 r1, q0[4]
-; CHECK-NEXT:    vmul.i32 q2, q3, q2
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q0[7]
-; CHECK-NEXT:    vmov.u16 r1, q0[5]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vpt.i32 ne, q3, zr
-; CHECK-NEXT:    vaddt.i32 q1, q1, q2
-; CHECK-NEXT:    vaddv.u32 r0, q1
-; CHECK-NEXT:    add sp, #32
+; CHECK-NEXT:    vmovlb.u8 q2, q2
+; CHECK-NEXT:    vpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmlavt.u16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i8> %b, zeroinitializer
@@ -950,57 +579,10 @@ entry:
 define arm_aapcs_vfpcc i32 @add_v8i8i16_v8i32_sext(<8 x i8> %x, <8 x i16> %y, <8 x i8> %b) {
 ; CHECK-LABEL: add_v8i8i16_v8i32_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, #16
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vmov.u16 r1, q0[2]
-; CHECK-NEXT:    vmov.u16 r2, q0[0]
-; CHECK-NEXT:    vstrw.32 q1, [r0]
-; CHECK-NEXT:    vmov q1[2], q1[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[3]
-; CHECK-NEXT:    vmov.u16 r2, q0[1]
-; CHECK-NEXT:    vldrh.s32 q4, [r0]
-; CHECK-NEXT:    vmov q1[3], q1[1], r2, r1
-; CHECK-NEXT:    vmovlb.s8 q1, q1
-; CHECK-NEXT:    vmovlb.s16 q3, q1
-; CHECK-NEXT:    vmovlb.u8 q1, q2
-; CHECK-NEXT:    vcmp.i16 eq, q1, zr
-; CHECK-NEXT:    vmov.i8 q1, #0x0
-; CHECK-NEXT:    vmov.i8 q2, #0xff
-; CHECK-NEXT:    vpsel q1, q2, q1
-; CHECK-NEXT:    vmov.u16 r1, q1[2]
-; CHECK-NEXT:    vmov.u16 r2, q1[0]
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q1[3]
-; CHECK-NEXT:    vmov.u16 r2, q1[1]
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[6]
-; CHECK-NEXT:    vcmp.i32 ne, q2, zr
-; CHECK-NEXT:    vmov.i32 q2, #0x0
-; CHECK-NEXT:    vmov.u16 r2, q0[4]
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vmult.i32 q2, q3, q4
-; CHECK-NEXT:    vmov q3[2], q3[0], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q0[7]
-; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    vmov q3[3], q3[1], r2, r1
-; CHECK-NEXT:    vmov.u16 r1, q1[4]
-; CHECK-NEXT:    vmovlb.s8 q0, q3
-; CHECK-NEXT:    vldrh.s32 q3, [r0, #8]
-; CHECK-NEXT:    vmovlb.s16 q0, q0
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vmul.i32 q0, q0, q3
-; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q1[7]
-; CHECK-NEXT:    vmov.u16 r1, q1[5]
-; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
-; CHECK-NEXT:    vpt.i32 ne, q3, zr
-; CHECK-NEXT:    vaddt.i32 q2, q2, q0
-; CHECK-NEXT:    vaddv.u32 r0, q2
-; CHECK-NEXT:    add sp, #16
-; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.u8 q2, q2
+; CHECK-NEXT:    vpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmlavt.s16 r0, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <8 x i8> %b, zeroinitializer
@@ -1907,49 +1489,12 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v4i8i16_v4i64_zext(<4 x i8> %x, <4 x i16> %y, <4 x i8> %b) {
 ; CHECK-LABEL: add_v4i8i16_v4i64_zext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    vmov.i32 q3, #0xff
 ; CHECK-NEXT:    vmovlb.u16 q1, q1
-; CHECK-NEXT:    vand q2, q2, q3
 ; CHECK-NEXT:    vand q0, q0, q3
-; CHECK-NEXT:    vcmp.i32 eq, q2, zr
-; CHECK-NEXT:    vmov.f32 s16, s0
-; CHECK-NEXT:    vmrs r0, p0
-; CHECK-NEXT:    vmov.f32 s20, s4
-; CHECK-NEXT:    vmov.f32 s18, s1
-; CHECK-NEXT:    vmov.f32 s22, s5
-; CHECK-NEXT:    vmullb.u32 q6, q4, q5
-; CHECK-NEXT:    and r2, r0, #1
-; CHECK-NEXT:    ubfx r1, r0, #4, #1
-; CHECK-NEXT:    rsbs r2, r2, #0
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
-; CHECK-NEXT:    vand q2, q6, q2
-; CHECK-NEXT:    vmov r1, r12, d5
-; CHECK-NEXT:    vmov r3, r2, d4
-; CHECK-NEXT:    vmov.f32 s8, s2
-; CHECK-NEXT:    vmov.f32 s10, s3
-; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s7
-; CHECK-NEXT:    vmullb.u32 q1, q2, q0
-; CHECK-NEXT:    adds r1, r1, r3
-; CHECK-NEXT:    ubfx r3, r0, #12, #1
-; CHECK-NEXT:    ubfx r0, r0, #8, #1
-; CHECK-NEXT:    rsb.w r3, r3, #0
-; CHECK-NEXT:    rsb.w r0, r0, #0
-; CHECK-NEXT:    adc.w r2, r2, r12
-; CHECK-NEXT:    vmov q0[2], q0[0], r0, r3
-; CHECK-NEXT:    vmov q0[3], q0[1], r0, r3
-; CHECK-NEXT:    vand q0, q1, q0
-; CHECK-NEXT:    vmov r0, r3, d0
-; CHECK-NEXT:    adds r0, r0, r1
-; CHECK-NEXT:    adc.w r1, r2, r3
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vand q2, q2, q3
+; CHECK-NEXT:    vpt.i32 eq, q2, zr
+; CHECK-NEXT:    vmlalvt.u32 r0, r1, q0, q1
 ; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <4 x i8> %b, zeroinitializer
@@ -1964,72 +1509,14 @@ entry:
 define arm_aapcs_vfpcc i64 @add_v4i8i16_v4i64_sext(<4 x i8> %x, <4 x i16> %y, <4 x i8> %b) {
 ; CHECK-LABEL: add_v4i8i16_v4i64_sext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .save {r4, lr}
-; CHECK-NEXT:    push {r4, lr}
-; CHECK-NEXT:    .vsave {d8, d9}
-; CHECK-NEXT:    vpush {d8, d9}
-; CHECK-NEXT:    vmov.f32 s12, s4
-; CHECK-NEXT:    vmov.i32 q4, #0xff
-; CHECK-NEXT:    vmov.f32 s14, s5
-; CHECK-NEXT:    vand q2, q2, q4
-; CHECK-NEXT:    vmov r2, s4
-; CHECK-NEXT:    vcmp.i32 eq, q2, zr
-; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov r0, s14
-; CHECK-NEXT:    vmov.f32 s12, s0
-; CHECK-NEXT:    vmov.f32 s14, s1
-; CHECK-NEXT:    vmov r1, s14
-; CHECK-NEXT:    sxth r2, r2
-; CHECK-NEXT:    sxtb r3, r3
-; CHECK-NEXT:    smull r2, r3, r3, r2
-; CHECK-NEXT:    sxth r0, r0
-; CHECK-NEXT:    sxtb r1, r1
-; CHECK-NEXT:    smull r0, r1, r1, r0
-; CHECK-NEXT:    vmov q3[2], q3[0], r2, r0
-; CHECK-NEXT:    vmrs r0, p0
-; CHECK-NEXT:    vmov q3[3], q3[1], r3, r1
-; CHECK-NEXT:    and r2, r0, #1
-; CHECK-NEXT:    ubfx r1, r0, #4, #1
-; CHECK-NEXT:    rsbs r2, r2, #0
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    vmov q2[2], q2[0], r2, r1
-; CHECK-NEXT:    vmov q2[3], q2[1], r2, r1
-; CHECK-NEXT:    vand q2, q3, q2
-; CHECK-NEXT:    vmov r1, r12, d5
-; CHECK-NEXT:    vmov r3, r2, d4
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmov.f32 s4, s2
-; CHECK-NEXT:    vmov.f32 s6, s3
-; CHECK-NEXT:    vmov r4, s4
-; CHECK-NEXT:    adds.w lr, r3, r1
-; CHECK-NEXT:    vmov r3, s10
-; CHECK-NEXT:    vmov r1, s6
-; CHECK-NEXT:    adc.w r12, r12, r2
-; CHECK-NEXT:    vmov r2, s8
-; CHECK-NEXT:    sxtb r4, r4
-; CHECK-NEXT:    sxth r3, r3
-; CHECK-NEXT:    sxtb r1, r1
-; CHECK-NEXT:    sxth r2, r2
-; CHECK-NEXT:    smull r1, r3, r1, r3
-; CHECK-NEXT:    smull r2, r4, r4, r2
-; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
-; CHECK-NEXT:    ubfx r1, r0, #12, #1
-; CHECK-NEXT:    ubfx r0, r0, #8, #1
-; CHECK-NEXT:    rsbs r1, r1, #0
-; CHECK-NEXT:    rsbs r0, r0, #0
-; CHECK-NEXT:    vmov q0[3], q0[1], r4, r3
-; CHECK-NEXT:    vmov q1[2], q1[0], r0, r1
-; CHECK-NEXT:    vmov q1[3], q1[1], r0, r1
-; CHECK-NEXT:    vand q0, q0, q1
-; CHECK-NEXT:    vmov r0, r1, d0
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    adds.w r0, r0, lr
-; CHECK-NEXT:    adc.w r1, r1, r12
-; CHECK-NEXT:    adds r0, r0, r2
-; CHECK-NEXT:    adcs r1, r3
-; CHECK-NEXT:    vpop {d8, d9}
-; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    vand q2, q2, q3
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vpt.i32 eq, q2, zr
+; CHECK-NEXT:    vmlalvt.s32 r0, r1, q0, q1
+; CHECK-NEXT:    bx lr
 entry:
   %c = icmp eq <4 x i8> %b, zeroinitializer
   %xx = sext <4 x i8> %x to <4 x i64>


        


More information about the llvm-commits mailing list