[llvm] a3dc5b5 - [ARM][CodeGen] Add integer support for complex deinterleaving

Nicholas Guy via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 12 03:43:09 PST 2022


Author: Nicholas Guy
Date: 2022-12-12T11:38:19Z
New Revision: a3dc5b534a80c1b6d2a22c4bb8710f89d034d63d

URL: https://github.com/llvm/llvm-project/commit/a3dc5b534a80c1b6d2a22c4bb8710f89d034d63d
DIFF: https://github.com/llvm/llvm-project/commit/a3dc5b534a80c1b6d2a22c4bb8710f89d034d63d.diff

LOG: [ARM][CodeGen] Add integer support for complex deinterleaving

Differential Revision: https://reviews.llvm.org/D139628

Added: 
    llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i16-add.ll
    llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i32-add.ll
    llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i64-add.ll
    llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i8-add.ll

Modified: 
    llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
    llvm/lib/Target/ARM/ARMISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp b/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
index c2a05cb81f9a..2414c0d7617c 100644
--- a/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
+++ b/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
@@ -635,21 +635,30 @@ ComplexDeinterleavingGraph::identifyAdd(Instruction *Real, Instruction *Imag) {
 
   // Determine rotation
   ComplexDeinterleavingRotation Rotation;
-  if (Real->getOpcode() == Instruction::FSub &&
-      Imag->getOpcode() == Instruction::FAdd)
+  if ((Real->getOpcode() == Instruction::FSub &&
+       Imag->getOpcode() == Instruction::FAdd) ||
+      (Real->getOpcode() == Instruction::Sub &&
+       Imag->getOpcode() == Instruction::Add))
     Rotation = ComplexDeinterleavingRotation::Rotation_90;
-  else if (Real->getOpcode() == Instruction::FAdd &&
-           Imag->getOpcode() == Instruction::FSub)
+  else if ((Real->getOpcode() == Instruction::FAdd &&
+            Imag->getOpcode() == Instruction::FSub) ||
+           (Real->getOpcode() == Instruction::Add &&
+            Imag->getOpcode() == Instruction::Sub))
     Rotation = ComplexDeinterleavingRotation::Rotation_270;
   else {
     LLVM_DEBUG(dbgs() << " - Unhandled case, rotation is not assigned.\n");
     return nullptr;
   }
 
-  auto *AR = cast<Instruction>(Real->getOperand(0));
-  auto *BI = cast<Instruction>(Real->getOperand(1));
-  auto *AI = cast<Instruction>(Imag->getOperand(0));
-  auto *BR = cast<Instruction>(Imag->getOperand(1));
+  auto *AR = dyn_cast<Instruction>(Real->getOperand(0));
+  auto *BI = dyn_cast<Instruction>(Real->getOperand(1));
+  auto *AI = dyn_cast<Instruction>(Imag->getOperand(0));
+  auto *BR = dyn_cast<Instruction>(Imag->getOperand(1));
+
+  if (!AR || !AI || !BR || !BI) {
+    LLVM_DEBUG(dbgs() << " - Not all operands are instructions.\n");
+    return nullptr;
+  }
 
   NodePtr ResA = identifyNode(AR, AI);
   if (!ResA) {
@@ -673,8 +682,11 @@ ComplexDeinterleavingGraph::identifyAdd(Instruction *Real, Instruction *Imag) {
 static bool isInstructionPairAdd(Instruction *A, Instruction *B) {
   unsigned OpcA = A->getOpcode();
   unsigned OpcB = B->getOpcode();
+
   return (OpcA == Instruction::FSub && OpcB == Instruction::FAdd) ||
-         (OpcA == Instruction::FAdd && OpcB == Instruction::FSub);
+         (OpcA == Instruction::FAdd && OpcB == Instruction::FSub) ||
+         (OpcA == Instruction::Sub && OpcB == Instruction::Add) ||
+         (OpcA == Instruction::Add && OpcB == Instruction::Sub);
 }
 
 static bool isInstructionPairMul(Instruction *A, Instruction *B) {

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a27afc5723da..76310611bdcf 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21839,7 +21839,7 @@ void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
 }
 
 bool ARMTargetLowering::isComplexDeinterleavingSupported() const {
-  return Subtarget->hasMVEFloatOps();
+  return Subtarget->hasMVEIntegerOps();
 }
 
 bool ARMTargetLowering::isComplexDeinterleavingOperationSupported(
@@ -21856,7 +21856,15 @@ bool ARMTargetLowering::isComplexDeinterleavingOperationSupported(
     return false;
 
   // Both VCADD and VCMUL/VCMLA support the same types, F16 and F32
-  return ScalarTy->isHalfTy() || ScalarTy->isFloatTy();
+  if (ScalarTy->isHalfTy() || ScalarTy->isFloatTy())
+    return Subtarget->hasMVEFloatOps();
+
+  if (Operation != ComplexDeinterleavingOperation::CAdd)
+    return false;
+
+  return Subtarget->hasMVEIntegerOps() &&
+         (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
+          ScalarTy->isIntegerTy(32));
 }
 
 Value *ARMTargetLowering::createComplexDeinterleavingIR(

diff  --git a/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i16-add.ll b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i16-add.ll
new file mode 100644
index 000000000000..37f6bbeffd02
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i16-add.ll
@@ -0,0 +1,127 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s --mattr=+mve -o - | FileCheck %s
+
+target triple = "thumbv8.1m.main-none-none-eabi"
+
+
+; Expected to not transform
+define arm_aapcs_vfpcc <2 x i16> @complex_add_v2i16(<2 x i16> %a, <2 x i16> %b) {
+; CHECK-LABEL: complex_add_v2i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <2 x i16> %a, <2 x i16> zeroinitializer, <1 x i32> <i32 0>
+  %a.imag = shufflevector <2 x i16> %a, <2 x i16> zeroinitializer, <1 x i32> <i32 1>
+  %b.real = shufflevector <2 x i16> %b, <2 x i16> zeroinitializer, <1 x i32> <i32 0>
+  %b.imag = shufflevector <2 x i16> %b, <2 x i16> zeroinitializer, <1 x i32> <i32 1>
+  %0 = sub <1 x i16> %b.real, %a.imag
+  %1 = add <1 x i16> %b.imag, %a.real
+  %interleaved.vec = shufflevector <1 x i16> %0, <1 x i16> %1, <2 x i32> <i32 0, i32 1>
+  ret <2 x i16> %interleaved.vec
+}
+
+; Expected to not transform
+define arm_aapcs_vfpcc <4 x i16> @complex_add_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: complex_add_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vrev64.32 q2, q0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vrev64.32 q3, q1
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    subs r0, r1, r0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    add r1, r2
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <4 x i16> %a, <4 x i16> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %a.imag = shufflevector <4 x i16> %a, <4 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %b.real = shufflevector <4 x i16> %b, <4 x i16> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %b.imag = shufflevector <4 x i16> %b, <4 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %0 = sub <2 x i16> %b.real, %a.imag
+  %1 = add <2 x i16> %b.imag, %a.real
+  %interleaved.vec = shufflevector <2 x i16> %0, <2 x i16> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  ret <4 x i16> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <8 x i16> @complex_add_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: complex_add_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcadd.i16 q0, q1, q0, #90
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %a.imag = shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %b.real = shufflevector <8 x i16> %b, <8 x i16> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %b.imag = shufflevector <8 x i16> %b, <8 x i16> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %0 = sub <4 x i16> %b.real, %a.imag
+  %1 = add <4 x i16> %b.imag, %a.real
+  %interleaved.vec = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i16> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <16 x i16> @complex_add_v16i16(<16 x i16> %a, <16 x i16> %b) {
+; CHECK-LABEL: complex_add_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcadd.i16 q0, q2, q0, #90
+; CHECK-NEXT:    vcadd.i16 q1, q3, q1, #90
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %a.imag = shufflevector <16 x i16> %a, <16 x i16> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %b.real = shufflevector <16 x i16> %b, <16 x i16> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %b.imag = shufflevector <16 x i16> %b, <16 x i16> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %0 = sub <8 x i16> %b.real, %a.imag
+  %1 = add <8 x i16> %b.imag, %a.real
+  %interleaved.vec = shufflevector <8 x i16> %0, <8 x i16> %1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  ret <16 x i16> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <32 x i16> @complex_add_v32i16(<32 x i16> %a, <32 x i16> %b) {
+; CHECK-LABEL: complex_add_v32i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    add r0, sp, #16
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vcadd.i16 q0, q4, q0, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    add r0, sp, #48
+; CHECK-NEXT:    vcadd.i16 q1, q4, q1, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    add r0, sp, #64
+; CHECK-NEXT:    vcadd.i16 q2, q4, q2, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    vcadd.i16 q3, q4, q3, #90
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %a.imag = shufflevector <32 x i16> %a, <32 x i16> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %b.real = shufflevector <32 x i16> %b, <32 x i16> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %b.imag = shufflevector <32 x i16> %b, <32 x i16> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %0 = sub <16 x i16> %b.real, %a.imag
+  %1 = add <16 x i16> %b.imag, %a.real
+  %interleaved.vec = shufflevector <16 x i16> %0, <16 x i16> %1, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  ret <32 x i16> %interleaved.vec
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i32-add.ll b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i32-add.ll
new file mode 100644
index 000000000000..852865cec4e2
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i32-add.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s --mattr=+mve -o - | FileCheck %s
+
+target triple = "thumbv8.1m.main-none-none-eabi"
+
+
+; Expected to not transform
+define arm_aapcs_vfpcc <2 x i32> @complex_add_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: complex_add_v2i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <2 x i32> %a, <2 x i32> zeroinitializer, <1 x i32> <i32 0>
+  %a.imag = shufflevector <2 x i32> %a, <2 x i32> zeroinitializer, <1 x i32> <i32 1>
+  %b.real = shufflevector <2 x i32> %b, <2 x i32> zeroinitializer, <1 x i32> <i32 0>
+  %b.imag = shufflevector <2 x i32> %b, <2 x i32> zeroinitializer, <1 x i32> <i32 1>
+  %0 = sub <1 x i32> %b.real, %a.imag
+  %1 = add <1 x i32> %b.imag, %a.real
+  %interleaved.vec = shufflevector <1 x i32> %0, <1 x i32> %1, <2 x i32> <i32 0, i32 1>
+  ret <2 x i32> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <4 x i32> @complex_add_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: complex_add_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcadd.i32 q2, q1, q0, #90
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %a.imag = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %b.real = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %b.imag = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %0 = sub <2 x i32> %b.real, %a.imag
+  %1 = add <2 x i32> %b.imag, %a.real
+  %interleaved.vec = shufflevector <2 x i32> %0, <2 x i32> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  ret <4 x i32> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <8 x i32> @complex_add_v8i32(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: complex_add_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcadd.i32 q4, q2, q0, #90
+; CHECK-NEXT:    vcadd.i32 q2, q3, q1, #90
+; CHECK-NEXT:    vmov q0, q4
+; CHECK-NEXT:    vmov q1, q2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %a.imag = shufflevector <8 x i32> %a, <8 x i32> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %b.real = shufflevector <8 x i32> %b, <8 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %b.imag = shufflevector <8 x i32> %b, <8 x i32> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %0 = sub <4 x i32> %b.real, %a.imag
+  %1 = add <4 x i32> %b.imag, %a.real
+  %interleaved.vec = shufflevector <4 x i32> %0, <4 x i32> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i32> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <16 x i32> @complex_add_v16i32(<16 x i32> %a, <16 x i32> %b) {
+; CHECK-LABEL: complex_add_v16i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    add r0, sp, #64
+; CHECK-NEXT:    vldrw.u32 q5, [r0]
+; CHECK-NEXT:    add r0, sp, #80
+; CHECK-NEXT:    vcadd.i32 q4, q5, q0, #90
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    add r0, sp, #96
+; CHECK-NEXT:    vcadd.i32 q5, q0, q1, #90
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    add r0, sp, #112
+; CHECK-NEXT:    vmov q1, q5
+; CHECK-NEXT:    vcadd.i32 q6, q0, q2, #90
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov q2, q6
+; CHECK-NEXT:    vcadd.i32 q7, q0, q3, #90
+; CHECK-NEXT:    vmov q0, q4
+; CHECK-NEXT:    vmov q3, q7
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <16 x i32> %a, <16 x i32> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %a.imag = shufflevector <16 x i32> %a, <16 x i32> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %b.real = shufflevector <16 x i32> %b, <16 x i32> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %b.imag = shufflevector <16 x i32> %b, <16 x i32> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %0 = sub <8 x i32> %b.real, %a.imag
+  %1 = add <8 x i32> %b.imag, %a.real
+  %interleaved.vec = shufflevector <8 x i32> %0, <8 x i32> %1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  ret <16 x i32> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <32 x i32> @complex_add_v32i32(<32 x i32> %a, <32 x i32> %b) {
+; CHECK-LABEL: complex_add_v32i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    add r1, sp, #96
+; CHECK-NEXT:    vldrw.u32 q4, [r1]
+; CHECK-NEXT:    add r1, sp, #224
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    add r1, sp, #80
+; CHECK-NEXT:    vcadd.i32 q6, q5, q4, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r1]
+; CHECK-NEXT:    add r1, sp, #208
+; CHECK-NEXT:    vstrw.32 q6, [r0, #112]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    add r1, sp, #64
+; CHECK-NEXT:    vcadd.i32 q6, q5, q4, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r1]
+; CHECK-NEXT:    add r1, sp, #192
+; CHECK-NEXT:    vstrw.32 q6, [r0, #96]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    add r1, sp, #48
+; CHECK-NEXT:    vcadd.i32 q6, q5, q4, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r1]
+; CHECK-NEXT:    add r1, sp, #176
+; CHECK-NEXT:    vstrw.32 q6, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    add r1, sp, #160
+; CHECK-NEXT:    vcadd.i32 q6, q5, q4, #90
+; CHECK-NEXT:    vldrw.u32 q4, [r1]
+; CHECK-NEXT:    add r1, sp, #144
+; CHECK-NEXT:    vstrw.32 q6, [r0, #64]
+; CHECK-NEXT:    vcadd.i32 q5, q4, q3, #90
+; CHECK-NEXT:    vldrw.u32 q3, [r1]
+; CHECK-NEXT:    add r1, sp, #128
+; CHECK-NEXT:    vstrw.32 q5, [r0, #48]
+; CHECK-NEXT:    vcadd.i32 q4, q3, q2, #90
+; CHECK-NEXT:    vldrw.u32 q2, [r1]
+; CHECK-NEXT:    add r1, sp, #112
+; CHECK-NEXT:    vstrw.32 q4, [r0, #32]
+; CHECK-NEXT:    vcadd.i32 q3, q2, q1, #90
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vstrw.32 q3, [r0, #16]
+; CHECK-NEXT:    vcadd.i32 q2, q1, q0, #90
+; CHECK-NEXT:    vstrw.32 q2, [r0]
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <32 x i32> %a, <32 x i32> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %a.imag = shufflevector <32 x i32> %a, <32 x i32> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %b.real = shufflevector <32 x i32> %b, <32 x i32> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %b.imag = shufflevector <32 x i32> %b, <32 x i32> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %0 = sub <16 x i32> %b.real, %a.imag
+  %1 = add <16 x i32> %b.imag, %a.real
+  %interleaved.vec = shufflevector <16 x i32> %0, <16 x i32> %1, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  ret <32 x i32> %interleaved.vec
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i64-add.ll b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i64-add.ll
new file mode 100644
index 000000000000..b9e5590fe850
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i64-add.ll
@@ -0,0 +1,632 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s --mattr=+mve -o - | FileCheck %s
+
+target triple = "thumbv8.1m.main-none-none-eabi"
+
+
+; Expected to not transform
+define arm_aapcs_vfpcc <2 x i64> @complex_add_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: complex_add_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov r2, r3, d3
+; CHECK-NEXT:    adds.w lr, r2, r0
+; CHECK-NEXT:    adc.w r12, r3, r1
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    vmov r1, r0, d2
+; CHECK-NEXT:    subs r1, r1, r2
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, lr
+; CHECK-NEXT:    sbcs r0, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r12
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %a.real = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <1 x i32> <i32 0>
+  %a.imag = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <1 x i32> <i32 1>
+  %b.real = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <1 x i32> <i32 0>
+  %b.imag = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <1 x i32> <i32 1>
+  %0 = sub <1 x i64> %b.real, %a.imag
+  %1 = add <1 x i64> %b.imag, %a.real
+  %interleaved.vec = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+  ret <2 x i64> %interleaved.vec
+}
+
+; Expected to not transform
+define arm_aapcs_vfpcc <4 x i64> @complex_add_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; CHECK-LABEL: complex_add_v4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov q4, q1
+; CHECK-NEXT:    vmov r2, r3, d7
+; CHECK-NEXT:    vmov r0, r1, d8
+; CHECK-NEXT:    adds.w lr, r2, r0
+; CHECK-NEXT:    adc.w r12, r3, r1
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    vmov r1, r0, d5
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adcs r0, r3
+; CHECK-NEXT:    vmov q1[2], q1[0], r1, lr
+; CHECK-NEXT:    vmov q1[3], q1[1], r0, r12
+; CHECK-NEXT:    vmov r0, r1, d9
+; CHECK-NEXT:    vmov r2, r3, d6
+; CHECK-NEXT:    subs.w lr, r2, r0
+; CHECK-NEXT:    sbc.w r12, r3, r1
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    vmov r1, r0, d4
+; CHECK-NEXT:    vmov.f32 s2, s4
+; CHECK-NEXT:    vmov.f32 s3, s5
+; CHECK-NEXT:    subs r1, r1, r2
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, lr
+; CHECK-NEXT:    sbcs r0, r3
+; CHECK-NEXT:    vmov q2[3], q2[1], r0, r12
+; CHECK-NEXT:    vmov.f32 s0, s8
+; CHECK-NEXT:    vmov.f32 s4, s10
+; CHECK-NEXT:    vmov.f32 s1, s9
+; CHECK-NEXT:    vmov.f32 s5, s11
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %a.real = shufflevector <4 x i64> %a, <4 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %a.imag = shufflevector <4 x i64> %a, <4 x i64> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %b.real = shufflevector <4 x i64> %b, <4 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %b.imag = shufflevector <4 x i64> %b, <4 x i64> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %0 = sub <2 x i64> %b.real, %a.imag
+  %1 = add <2 x i64> %b.imag, %a.real
+  %interleaved.vec = shufflevector <2 x i64> %0, <2 x i64> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  ret <4 x i64> %interleaved.vec
+}
+
+; Expected to not transform
+define arm_aapcs_vfpcc <8 x i64> @complex_add_v8i64(<8 x i64> %a, <8 x i64> %b) {
+; CHECK-LABEL: complex_add_v8i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    add r2, sp, #72
+; CHECK-NEXT:    vmov q4, q1
+; CHECK-NEXT:    vldrw.u32 q5, [r2]
+; CHECK-NEXT:    vmov r0, r1, d8
+; CHECK-NEXT:    vmov r2, r3, d11
+; CHECK-NEXT:    adds.w lr, r2, r0
+; CHECK-NEXT:    adc.w r12, r3, r1
+; CHECK-NEXT:    add r1, sp, #56
+; CHECK-NEXT:    vldrw.u32 q6, [r1]
+; CHECK-NEXT:    vmov r2, r3, d0
+; CHECK-NEXT:    vmov r1, r0, d13
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adcs r0, r3
+; CHECK-NEXT:    vmov q1[2], q1[0], r1, lr
+; CHECK-NEXT:    vmov q1[3], q1[1], r0, r12
+; CHECK-NEXT:    vmov r0, r1, d9
+; CHECK-NEXT:    vmov r2, r3, d10
+; CHECK-NEXT:    subs.w lr, r2, r0
+; CHECK-NEXT:    sbc.w r12, r3, r1
+; CHECK-NEXT:    vmov r2, r3, d1
+; CHECK-NEXT:    vmov r1, r0, d12
+; CHECK-NEXT:    vmov.f32 s2, s4
+; CHECK-NEXT:    vmov.f32 s3, s5
+; CHECK-NEXT:    subs r1, r1, r2
+; CHECK-NEXT:    add r2, sp, #104
+; CHECK-NEXT:    vldrw.u32 q5, [r2]
+; CHECK-NEXT:    sbcs r0, r3
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r0, r12
+; CHECK-NEXT:    vmov r0, r1, d6
+; CHECK-NEXT:    vmov r2, r3, d11
+; CHECK-NEXT:    vmov.f32 s0, s16
+; CHECK-NEXT:    vmov.f32 s4, s18
+; CHECK-NEXT:    vmov.f32 s1, s17
+; CHECK-NEXT:    vmov.f32 s5, s19
+; CHECK-NEXT:    adds.w lr, r2, r0
+; CHECK-NEXT:    adc.w r12, r3, r1
+; CHECK-NEXT:    add r1, sp, #88
+; CHECK-NEXT:    vldrw.u32 q6, [r1]
+; CHECK-NEXT:    vmov r2, r3, d4
+; CHECK-NEXT:    vmov r1, r0, d13
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adcs r0, r3
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r0, r12
+; CHECK-NEXT:    vmov r0, r1, d7
+; CHECK-NEXT:    vmov r2, r3, d10
+; CHECK-NEXT:    subs.w lr, r2, r0
+; CHECK-NEXT:    sbc.w r12, r3, r1
+; CHECK-NEXT:    vmov r2, r3, d5
+; CHECK-NEXT:    vmov r1, r0, d12
+; CHECK-NEXT:    vmov.f32 s10, s16
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    subs r1, r1, r2
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, lr
+; CHECK-NEXT:    sbcs r0, r3
+; CHECK-NEXT:    vmov q3[3], q3[1], r0, r12
+; CHECK-NEXT:    vmov.f32 s16, s14
+; CHECK-NEXT:    vmov.f32 s8, s12
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vmov.f32 s9, s13
+; CHECK-NEXT:    vmov q3, q4
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %a.real = shufflevector <8 x i64> %a, <8 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %a.imag = shufflevector <8 x i64> %a, <8 x i64> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %b.real = shufflevector <8 x i64> %b, <8 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %b.imag = shufflevector <8 x i64> %b, <8 x i64> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %0 = sub <4 x i64> %b.real, %a.imag
+  %1 = add <4 x i64> %b.imag, %a.real
+  %interleaved.vec = shufflevector <4 x i64> %0, <4 x i64> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i64> %interleaved.vec
+}
+
+; Expected to not transform
+define arm_aapcs_vfpcc <16 x i64> @complex_add_v16i64(<16 x i64> %a, <16 x i64> %b) {
+; CHECK-LABEL: complex_add_v16i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    add r1, sp, #136
+; CHECK-NEXT:    add r3, sp, #264
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov r1, r12, d10
+; CHECK-NEXT:    vmov r3, r2, d13
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #120
+; CHECK-NEXT:    add r1, sp, #248
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    adc.w r12, r12, r2
+; CHECK-NEXT:    vmov r3, r2, d14
+; CHECK-NEXT:    vmov r1, r4, d1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, lr
+; CHECK-NEXT:    adc.w r1, r4, r2
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #232
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    add r1, sp, #104
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #112]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vmov r3, r4, d13
+; CHECK-NEXT:    vmov r1, r2, d10
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #96]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #88
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #216
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d14
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #200
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d6
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q5, [r3]
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov r3, r4, d11
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #64]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #184
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d4
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d7
+; CHECK-NEXT:    vmov r3, r4, d10
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d5
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    vmov.f32 s10, s18
+; CHECK-NEXT:    vmov.f32 s11, s19
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #168
+; CHECK-NEXT:    vldrw.u32 q3, [r3]
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d2
+; CHECK-NEXT:    vmov r3, r4, d7
+; CHECK-NEXT:    vmov.f32 s8, s2
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s9, s3
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q2, [r0, #48]
+; CHECK-NEXT:    vstrw.32 q0, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #152
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d8
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q2[2], q2[0], r2, lr
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d3
+; CHECK-NEXT:    vmov r3, r4, d6
+; CHECK-NEXT:    vmov.f32 s6, s10
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d9
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s2, s8
+; CHECK-NEXT:    vmov.f32 s5, s3
+; CHECK-NEXT:    vmov.f32 s3, s9
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %a.real = shufflevector <16 x i64> %a, <16 x i64> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %a.imag = shufflevector <16 x i64> %a, <16 x i64> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %b.real = shufflevector <16 x i64> %b, <16 x i64> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %b.imag = shufflevector <16 x i64> %b, <16 x i64> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %0 = sub <8 x i64> %b.real, %a.imag
+  %1 = add <8 x i64> %b.imag, %a.real
+  %interleaved.vec = shufflevector <8 x i64> %0, <8 x i64> %1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  ret <16 x i64> %interleaved.vec
+}
+
+; Expected to not transform
+define arm_aapcs_vfpcc <32 x i64> @complex_add_v32i64(<32 x i64> %a, <32 x i64> %b) {
+; CHECK-LABEL: complex_add_v32i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    add r1, sp, #272
+; CHECK-NEXT:    add r3, sp, #528
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov r1, r12, d10
+; CHECK-NEXT:    vmov r3, r2, d13
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #256
+; CHECK-NEXT:    add r1, sp, #512
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    adc.w r12, r12, r2
+; CHECK-NEXT:    vmov r3, r2, d14
+; CHECK-NEXT:    vmov r1, r4, d1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, lr
+; CHECK-NEXT:    adc.w r1, r4, r2
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #496
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    add r1, sp, #240
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #240]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vmov r3, r4, d13
+; CHECK-NEXT:    vmov r1, r2, d10
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #224]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #224
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #480
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d14
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #464
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    add r1, sp, #208
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #208]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vmov r3, r4, d13
+; CHECK-NEXT:    vmov r1, r2, d10
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #192]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #192
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #448
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d14
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #432
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    add r1, sp, #176
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #176]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vmov r3, r4, d13
+; CHECK-NEXT:    vmov r1, r2, d10
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #160]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #160
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #416
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d14
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #400
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    add r1, sp, #144
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #144]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vmov r3, r4, d13
+; CHECK-NEXT:    vmov r1, r2, d10
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #128]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #128
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #384
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d14
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #368
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    add r1, sp, #112
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r3]
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #112]
+; CHECK-NEXT:    vldrw.u32 q5, [r1]
+; CHECK-NEXT:    vmov r3, r4, d13
+; CHECK-NEXT:    vmov r1, r2, d10
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #96]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    add r3, sp, #96
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #352
+; CHECK-NEXT:    vldrw.u32 q7, [r3]
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d14
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d11
+; CHECK-NEXT:    vmov r3, r4, d12
+; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s23, s19
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d15
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #336
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d6
+; CHECK-NEXT:    vmov.f32 s20, s2
+; CHECK-NEXT:    vmov.f32 s21, s3
+; CHECK-NEXT:    vstrw.32 q5, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q5, [r3]
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov r3, r4, d11
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q0, [r0, #64]
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #320
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d4
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q4[2], q4[0], r2, lr
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d7
+; CHECK-NEXT:    vmov r3, r4, d10
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d5
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    vmov.f32 s10, s18
+; CHECK-NEXT:    vmov.f32 s11, s19
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    add r3, sp, #304
+; CHECK-NEXT:    vldrw.u32 q3, [r3]
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d2
+; CHECK-NEXT:    vmov r3, r4, d7
+; CHECK-NEXT:    vmov.f32 s8, s2
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s9, s3
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vstrw.32 q2, [r0, #48]
+; CHECK-NEXT:    vstrw.32 q0, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    adds.w lr, r3, r1
+; CHECK-NEXT:    adc.w r12, r4, r2
+; CHECK-NEXT:    add r2, sp, #288
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    vmov r3, r4, d8
+; CHECK-NEXT:    vmov r2, r1, d1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adcs r1, r4
+; CHECK-NEXT:    vmov q2[2], q2[0], r2, lr
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r12
+; CHECK-NEXT:    vmov r1, r2, d3
+; CHECK-NEXT:    vmov r3, r4, d6
+; CHECK-NEXT:    vmov.f32 s6, s10
+; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    subs.w lr, r3, r1
+; CHECK-NEXT:    sbc.w r12, r4, r2
+; CHECK-NEXT:    vmov r3, r4, d9
+; CHECK-NEXT:    vmov r2, r1, d0
+; CHECK-NEXT:    subs r2, r2, r3
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
+; CHECK-NEXT:    sbcs r1, r4
+; CHECK-NEXT:    vmov q0[3], q0[1], r1, r12
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s2, s8
+; CHECK-NEXT:    vmov.f32 s5, s3
+; CHECK-NEXT:    vmov.f32 s3, s9
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %a.real = shufflevector <32 x i64> %a, <32 x i64> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %a.imag = shufflevector <32 x i64> %a, <32 x i64> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %b.real = shufflevector <32 x i64> %b, <32 x i64> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %b.imag = shufflevector <32 x i64> %b, <32 x i64> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %0 = sub <16 x i64> %b.real, %a.imag
+  %1 = add <16 x i64> %b.imag, %a.real
+  %interleaved.vec = shufflevector <16 x i64> %0, <16 x i64> %1, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  ret <32 x i64> %interleaved.vec
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i8-add.ll b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i8-add.ll
new file mode 100644
index 000000000000..794894def926
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-complex-deinterleaving-i8-add.ll
@@ -0,0 +1,117 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s --mattr=+mve -o - | FileCheck %s
+
+target triple = "thumbv8.1m.main-none-none-eabi"
+
+
+; Expected to not transform
+define arm_aapcs_vfpcc <2 x i8> @complex_add_v2i8(<2 x i8> %a, <2 x i8> %b) {
+; CHECK-LABEL: complex_add_v2i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <2 x i8> %a, <2 x i8> zeroinitializer, <1 x i32> <i32 0>
+  %a.imag = shufflevector <2 x i8> %a, <2 x i8> zeroinitializer, <1 x i32> <i32 1>
+  %b.real = shufflevector <2 x i8> %b, <2 x i8> zeroinitializer, <1 x i32> <i32 0>
+  %b.imag = shufflevector <2 x i8> %b, <2 x i8> zeroinitializer, <1 x i32> <i32 1>
+  %0 = sub <1 x i8> %b.real, %a.imag
+  %1 = add <1 x i8> %b.imag, %a.real
+  %interleaved.vec = shufflevector <1 x i8> %0, <1 x i8> %1, <2 x i32> <i32 0, i32 1>
+  ret <2 x i8> %interleaved.vec
+}
+
+; Expected to not transform
+define arm_aapcs_vfpcc <4 x i8> @complex_add_v4i8(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: complex_add_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vrev64.32 q2, q0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vrev64.32 q3, q1
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    subs r0, r1, r0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    add r1, r2
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %a.imag = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %b.real = shufflevector <4 x i8> %b, <4 x i8> zeroinitializer, <2 x i32> <i32 0, i32 2>
+  %b.imag = shufflevector <4 x i8> %b, <4 x i8> zeroinitializer, <2 x i32> <i32 1, i32 3>
+  %0 = sub <2 x i8> %b.real, %a.imag
+  %1 = add <2 x i8> %b.imag, %a.real
+  %interleaved.vec = shufflevector <2 x i8> %0, <2 x i8> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  ret <4 x i8> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <8 x i8> @complex_add_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: complex_add_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vrev32.16 q2, q1
+; CHECK-NEXT:    vadd.i32 q2, q2, q0
+; CHECK-NEXT:    vrev32.16 q0, q0
+; CHECK-NEXT:    vsub.i32 q0, q1, q0
+; CHECK-NEXT:    vmovnt.i32 q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %a.imag = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %b.real = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %b.imag = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %0 = sub <4 x i8> %b.real, %a.imag
+  %1 = add <4 x i8> %b.imag, %a.real
+  %interleaved.vec = shufflevector <4 x i8> %0, <4 x i8> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i8> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <16 x i8> @complex_add_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: complex_add_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcadd.i8 q0, q1, q0, #90
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %a.imag = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %b.real = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %b.imag = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %0 = sub <8 x i8> %b.real, %a.imag
+  %1 = add <8 x i8> %b.imag, %a.real
+  %interleaved.vec = shufflevector <8 x i8> %0, <8 x i8> %1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  ret <16 x i8> %interleaved.vec
+}
+
+; Expected to transform
+define arm_aapcs_vfpcc <32 x i8> @complex_add_v32i8(<32 x i8> %a, <32 x i8> %b) {
+; CHECK-LABEL: complex_add_v32i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcadd.i8 q0, q2, q0, #90
+; CHECK-NEXT:    vcadd.i8 q1, q3, q1, #90
+; CHECK-NEXT:    bx lr
+entry:
+  %a.real = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %a.imag = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %b.real = shufflevector <32 x i8> %b, <32 x i8> zeroinitializer, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %b.imag = shufflevector <32 x i8> %b, <32 x i8> zeroinitializer, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %0 = sub <16 x i8> %b.real, %a.imag
+  %1 = add <16 x i8> %b.imag, %a.real
+  %interleaved.vec = shufflevector <16 x i8> %0, <16 x i8> %1, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+  ret <32 x i8> %interleaved.vec
+}


        


More information about the llvm-commits mailing list