[llvm] [AArch64] Use `ZIP1/2` over `INS` for vector concat (PR #142427)

Csanád Hajdú via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 2 09:40:26 PDT 2025


https://github.com/Il-Capitano created https://github.com/llvm/llvm-project/pull/142427

Some 64-bit `INS` instructions are equivalent to `ZIP1` or `ZIP2` instructions, e.g. `mov v0.d[1], v1.d[0]` is the same as `zip1 v0.2d, v0.2d, v1.2d`. Compared to `INS`, `ZIP` has a separate output register from its first input, which can help eliminate SIMD register moves, leading to better code size and performance.

The motivation for this change is eliminating the SIMD register moves in this 4x4 f32 matrix transpose function: https://godbolt.org/z/f31Yz6q61

>From 53e6e47427efc7486257231d819295f447f38607 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Csan=C3=A1d=20Hajd=C3=BA?= <csanad.hajdu at arm.com>
Date: Mon, 26 May 2025 16:39:43 +0200
Subject: [PATCH] [AArch64] Use `ZIP1/2` over `INS` for vector concat

Some 64-bit `INS` instructions are equivalent to `ZIP1` or `ZIP2`
instructions, e.g. `mov v0.d[1], v1.d[0]` is the same as
`zip1 v0.2d, v0.2d, v1.2d`. Compared to `INS`, `ZIP` has a separate
output register from its first input, which can help eliminate SIMD
register moves, leading to better code size and performance.

The motivation for this change is eliminating the SIMD register moves
in this 4x4 f32 matrix transpose function:
https://godbolt.org/z/f31Yz6q61
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  13 +-
 .../GlobalISel/select-concat-vectors.mir      |  10 +-
 llvm/test/CodeGen/AArch64/aarch64-addv.ll     |   3 +-
 llvm/test/CodeGen/AArch64/aarch64-be-bv.ll    |   4 +-
 .../AArch64/aarch64-combine-add-zext.ll       |   6 +-
 llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll  |   8 +-
 llvm/test/CodeGen/AArch64/arm64-dup.ll        |   4 +-
 llvm/test/CodeGen/AArch64/arm64-fp128.ll      |  30 ++---
 llvm/test/CodeGen/AArch64/arm64-neon-copy.ll  |  45 ++++---
 llvm/test/CodeGen/AArch64/arm64-nvcast.ll     |   3 +-
 llvm/test/CodeGen/AArch64/arm64-tbl.ll        |   3 +-
 llvm/test/CodeGen/AArch64/arm64-vcombine.ll   |   7 +-
 llvm/test/CodeGen/AArch64/arm64-vshift.ll     |   4 +-
 llvm/test/CodeGen/AArch64/arm64-zip.ll        |  12 +-
 llvm/test/CodeGen/AArch64/bf16-shuffle.ll     |   2 +-
 .../CodeGen/AArch64/bf16-vector-shuffle.ll    |   2 +-
 llvm/test/CodeGen/AArch64/bitcast-extend.ll   |   4 +-
 .../CodeGen/AArch64/build-vector-two-dup.ll   |  42 +++---
 .../complex-deinterleaving-mixed-cases.ll     |   2 +-
 .../complex-deinterleaving-uniform-cases.ll   |   4 +-
 .../AArch64/concat-vector-add-combine.ll      |   4 +-
 llvm/test/CodeGen/AArch64/concat-vector.ll    |  60 +++------
 llvm/test/CodeGen/AArch64/concatbinop.ll      |  30 ++---
 llvm/test/CodeGen/AArch64/ctlz.ll             |   2 +-
 llvm/test/CodeGen/AArch64/ctpop.ll            |   2 +-
 llvm/test/CodeGen/AArch64/cttz.ll             |   2 +-
 .../AArch64/dag-combine-trunc-build-vec.ll    |   4 +-
 llvm/test/CodeGen/AArch64/extbinopload.ll     |   2 +-
 .../extend_inreg_of_concat_subvectors.ll      |  10 +-
 .../CodeGen/AArch64/extract-vector-elt.ll     |   4 +-
 llvm/test/CodeGen/AArch64/fabs.ll             |   2 +-
 llvm/test/CodeGen/AArch64/faddsub.ll          |   8 +-
 llvm/test/CodeGen/AArch64/fcmp.ll             |  84 ++++++------
 llvm/test/CodeGen/AArch64/fcopysign.ll        |   4 +-
 llvm/test/CodeGen/AArch64/fcvt.ll             |  14 +-
 llvm/test/CodeGen/AArch64/fdiv.ll             |   4 +-
 .../AArch64/fixed-vector-interleave.ll        |   4 +-
 llvm/test/CodeGen/AArch64/fminimummaximum.ll  |   8 +-
 llvm/test/CodeGen/AArch64/fminmax.ll          |   8 +-
 llvm/test/CodeGen/AArch64/fmla.ll             |  48 +++----
 llvm/test/CodeGen/AArch64/fmul.ll             |   4 +-
 llvm/test/CodeGen/AArch64/fneg.ll             |   2 +-
 .../CodeGen/AArch64/fp-intrinsics-vector.ll   |   6 +-
 .../AArch64/fp-maximumnum-minimumnum.ll       |  28 ++--
 .../CodeGen/AArch64/fp16-vector-shuffle.ll    |   2 +-
 llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll |  72 +++++-----
 llvm/test/CodeGen/AArch64/fptoi.ll            | 126 +++++++++++-------
 .../test/CodeGen/AArch64/fptosi-sat-vector.ll |   2 +-
 .../test/CodeGen/AArch64/fptoui-sat-vector.ll |   2 +-
 llvm/test/CodeGen/AArch64/fptrunc.ll          |  12 +-
 llvm/test/CodeGen/AArch64/fsqrt.ll            |   2 +-
 llvm/test/CodeGen/AArch64/icmp.ll             |  12 +-
 llvm/test/CodeGen/AArch64/insert-subvector.ll |  45 +++----
 llvm/test/CodeGen/AArch64/insertextract.ll    |  12 +-
 .../test/CodeGen/AArch64/insertshuffleload.ll |   2 +-
 llvm/test/CodeGen/AArch64/itofp.ll            |  20 +--
 .../AArch64/neon-bitwise-instructions.ll      |   4 +-
 .../AArch64/neon-compare-instructions.ll      |   4 +-
 llvm/test/CodeGen/AArch64/neon-dotreduce.ll   |  45 +++----
 .../CodeGen/AArch64/neon-extracttruncate.ll   |   8 +-
 llvm/test/CodeGen/AArch64/neon-wide-splat.ll  |   2 +-
 llvm/test/CodeGen/AArch64/pr135821.ll         |   8 +-
 llvm/test/CodeGen/AArch64/ptradd.ll           |   4 +-
 ...ull-negations-after-concat-of-truncates.ll |   6 +-
 .../CodeGen/AArch64/shuffle-mask-legal.ll     |   2 +-
 llvm/test/CodeGen/AArch64/shuffle-tbl34.ll    |  92 ++++++-------
 llvm/test/CodeGen/AArch64/shuffles.ll         |   4 +-
 llvm/test/CodeGen/AArch64/shufflevector.ll    |   4 +-
 .../AArch64/speculation-hardening-loads.ll    |   2 +-
 .../AArch64/sve-fixed-length-concat.ll        |  14 +-
 .../sve-fixed-length-fp-extend-trunc.ll       |   4 +-
 .../AArch64/sve-fixed-length-fp-to-int.ll     |  14 +-
 .../CodeGen/AArch64/sve-fixed-length-fp128.ll |  16 +--
 .../AArch64/sve-fixed-length-int-to-fp.ll     |  14 +-
 .../AArch64/sve-fixed-length-mask-opt.ll      |   4 +-
 .../AArch64/sve-fixed-length-masked-gather.ll |   8 +-
 .../AArch64/sve-fixed-length-masked-loads.ll  |  16 +--
 .../AArch64/sve-fixed-length-masked-stores.ll |  16 +--
 .../CodeGen/AArch64/sve-fixed-length-ptest.ll |   4 +-
 .../AArch64/sve-fixed-length-trunc-stores.ll  |   8 +-
 .../CodeGen/AArch64/sve-fixed-length-trunc.ll |  14 +-
 llvm/test/CodeGen/AArch64/trunc-to-tbl.ll     |   4 +-
 llvm/test/CodeGen/AArch64/trunc.ll            |   8 +-
 llvm/test/CodeGen/AArch64/vecreduce-add.ll    |  30 ++---
 llvm/test/CodeGen/AArch64/xtn.ll              |   6 +-
 85 files changed, 615 insertions(+), 611 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 96d0146c1e752..a68d831d8fe5a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -7590,13 +7590,13 @@ def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
           (bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
 
 // All concat_vectors operations are canonicalised to act on i64 vectors for
-// AArch64. In the general case we need an instruction, which had just as well be
-// INS.
+// AArch64. In the general case we can use ZIP1 to concatenate the low halves of
+// two SIMD registers.
 multiclass ConcatPat<ValueType DstTy, ValueType SrcTy,
                      ComplexPattern ExtractHigh> {
   def : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
-            (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
-                         (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
+            (ZIP1v2i64 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+                       (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub))>;
 
   // If the high lanes are zero we can instead emit a d->d register mov, which
   // will implicitly clear the upper bits.
@@ -7607,11 +7607,10 @@ multiclass ConcatPat<ValueType DstTy, ValueType SrcTy,
   def : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
             (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
 
-  // Concatting the high half of two vectors is the insert of the first
-  // into the low half of the second.
+  // Concatting the high half of two vectors is equivalent to ZIP2v2i64.
   def : Pat<(DstTy (concat_vectors (ExtractHigh (DstTy V128:$Rn)),
                                    (ExtractHigh (DstTy V128:$Rm)))),
-            (INSvi64lane V128:$Rm, (i64 0), V128:$Rn, (i64 1))>;
+            (ZIP2v2i64 V128:$Rn, V128:$Rm)>;
 }
 
 defm : ConcatPat<v2i64, v1i64, extract_high_v2i64>;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
index 53d8b82509481..d2ed95ddbeac9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
@@ -25,8 +25,8 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
     ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
-    ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
-    ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+    ; CHECK-NEXT: [[ZIP1v2i64_:%[0-9]+]]:fpr128 = ZIP1v2i64 [[INSERT_SUBREG1]], [[INSERT_SUBREG]]
+    ; CHECK-NEXT: $q0 = COPY [[ZIP1v2i64_]]
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = COPY $d1
@@ -59,8 +59,8 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
     ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
-    ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
-    ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+    ; CHECK-NEXT: [[ZIP1v2i64_:%[0-9]+]]:fpr128 = ZIP1v2i64 [[INSERT_SUBREG1]], [[INSERT_SUBREG]]
+    ; CHECK-NEXT: $q0 = COPY [[ZIP1v2i64_]]
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = COPY $d1
@@ -110,7 +110,7 @@ body:             |
     ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %b, %subreg.dsub
     ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
     ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], %a, %subreg.dsub
-    ; CHECK-NEXT: %concat:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
+    ; CHECK-NEXT: %concat:fpr128 = ZIP1v2i64 [[INSERT_SUBREG1]], [[INSERT_SUBREG]]
     ; CHECK-NEXT: $q0 = COPY %concat
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %a:fpr(<8 x s8>) = COPY $d0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index bc675343adc08..4c056b0bdf608 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -447,7 +447,7 @@ define i64 @addv_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    mov v2.d[1], xzr
 ; CHECK-SD-NEXT:    add v0.2d, v0.2d, v2.2d
 ; CHECK-SD-NEXT:    addp d0, v0.2d
@@ -492,4 +492,3 @@ entry:
   %arg1 = call i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a)
   ret i128 %arg1
 }
-
diff --git a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
index 4afe362686440..2f645bb15b862 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
@@ -1036,8 +1036,8 @@ define <2 x double> @test_v1f64(<1 x double> %0, ptr %1) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvni v1.2s, #31, msl #16
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    ext v0.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    ret
   %vec = shufflevector <1 x double> <double 0xFFE00000FFE00000>, <1 x double> %0, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %vec
diff --git a/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll b/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll
index b1b995931ac02..291bdc52be91a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll
@@ -6,7 +6,7 @@ define i16 @test_add_zext_v8i16(<8 x i8> %a, <8 x i8> %b) local_unnamed_addr #0
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    uaddlv h0, v0.16b
 ; CHECK-NEXT:    umov w0, v0.h[0]
 ; CHECK-NEXT:    ret
@@ -22,7 +22,7 @@ define i32 @test_add_zext_v4i32(<4 x i16> %a, <4 x i16> %b) local_unnamed_addr #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    uaddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
@@ -38,7 +38,7 @@ define i64 @test_add_zext_v2i64(<2 x i32> %a, <2 x i32> %b) local_unnamed_addr #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    uaddlv d0, v0.4s
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
index 8a503bb65c079..ddfbc23237441 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
@@ -714,7 +714,7 @@ define i64 @sminv_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    mov v2.d[1], x8
 ; CHECK-SD-NEXT:    cmgt v1.2d, v2.2d, v0.2d
 ; CHECK-SD-NEXT:    bif v0.16b, v2.16b, v1.16b
@@ -1074,7 +1074,7 @@ define i64 @smaxv_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    mov v2.d[1], x8
 ; CHECK-SD-NEXT:    cmgt v1.2d, v0.2d, v2.2d
 ; CHECK-SD-NEXT:    bif v0.16b, v2.16b, v1.16b
@@ -1432,7 +1432,7 @@ define i64 @uminv_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    mov v2.d[1], x8
 ; CHECK-SD-NEXT:    cmhi v1.2d, v2.2d, v0.2d
 ; CHECK-SD-NEXT:    bif v0.16b, v2.16b, v1.16b
@@ -1788,7 +1788,7 @@ define i64 @umaxv_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    mov v3.16b, v2.16b
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    mov v3.d[1], xzr
 ; CHECK-SD-NEXT:    cmhi v3.2d, v0.2d, v3.2d
 ; CHECK-SD-NEXT:    ext v4.16b, v3.16b, v3.16b, #8
diff --git a/llvm/test/CodeGen/AArch64/arm64-dup.ll b/llvm/test/CodeGen/AArch64/arm64-dup.ll
index 4c28ea7592202..0ea11c33b9c46 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dup.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dup.ll
@@ -602,7 +602,7 @@ define <4 x i32> @dup_const24(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C) nounwind
 ; CHECK-SD-NEXT:    movk w8, #128, lsl #16
 ; CHECK-SD-NEXT:    dup.4s v3, w8
 ; CHECK-SD-NEXT:    add.2s v0, v0, v3
-; CHECK-SD-NEXT:    mov.d v0[1], v1[0]
+; CHECK-SD-NEXT:    zip1.2d v0, v0, v1
 ; CHECK-SD-NEXT:    add.4s v1, v2, v3
 ; CHECK-SD-NEXT:    eor.16b v0, v1, v0
 ; CHECK-SD-NEXT:    ret
@@ -615,7 +615,7 @@ define <4 x i32> @dup_const24(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C) nounwind
 ; CHECK-GI-NEXT:    adrp x8, .LCPI41_0
 ; CHECK-GI-NEXT:    add.2s v0, v0, v3
 ; CHECK-GI-NEXT:    ldr q3, [x8, :lo12:.LCPI41_0]
-; CHECK-GI-NEXT:    mov.d v0[1], v1[0]
+; CHECK-GI-NEXT:    zip1.2d v0, v0, v1
 ; CHECK-GI-NEXT:    add.4s v1, v2, v3
 ; CHECK-GI-NEXT:    eor.16b v0, v1, v0
 ; CHECK-GI-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 7eb26096ed156..8193e49bb71d9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -635,17 +635,17 @@ define <2 x i64> @vec_fptosi_64(<2 x fp128> %val) {
 ; CHECK-SD-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    bl __fixtfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __fixtfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
@@ -719,17 +719,17 @@ define <2 x i64> @vec_fptoui_64(<2 x fp128> %val) {
 ; CHECK-SD-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    bl __fixunstfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __fixunstfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
@@ -1278,17 +1278,17 @@ define <2 x double> @vec_round_f64(<2 x fp128> %val) {
 ; CHECK-SD-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    bl __trunctfdf2
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __trunctfdf2
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
index 51f1351a5edf4..2ac4df6e73679 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -1716,7 +1716,7 @@ entry:
 define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v16i8_v16i8_v16i8:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v16i8_v16i8_v16i8:
@@ -1736,7 +1736,7 @@ define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v16i8_v8i8_v16i8:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v16i8_v8i8_v16i8:
@@ -1780,7 +1780,7 @@ define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v16i8_v16i8_v8i8:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v16i8_v16i8_v8i8:
@@ -1845,7 +1845,7 @@ define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v16i8_v8i8_v8i8:
@@ -1909,7 +1909,7 @@ entry:
 define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v8i16_v8i16_v8i16:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v8i16_v8i16_v8i16:
@@ -1929,7 +1929,7 @@ define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v8i16_v4i16_v8i16:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v8i16_v4i16_v8i16:
@@ -1961,7 +1961,7 @@ define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v8i16_v8i16_v4i16:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v8i16_v8i16_v4i16:
@@ -2002,7 +2002,7 @@ define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v8i16_v4i16_v4i16:
@@ -2042,7 +2042,7 @@ entry:
 define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v4i32_v4i32_v4i32:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v4i32_v4i32_v4i32:
@@ -2062,7 +2062,7 @@ define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v4i32_v2i32_v4i32:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v4i32_v2i32_v4i32:
@@ -2088,7 +2088,7 @@ define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v4i32_v4i32_v2i32:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v4i32_v4i32_v2i32:
@@ -2117,7 +2117,7 @@ define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2151,7 +2151,7 @@ define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-SD-LABEL: test_concat_v2i64_v2i64_v1i64:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test_concat_v2i64_v2i64_v1i64:
@@ -2169,12 +2169,19 @@ entry:
 }
 
 define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 {
-; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_concat_v2i64_v1i64_v1i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_concat_v2i64_v1i64_v1i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    ret
 entry:
   %vecext = extractelement <1 x i64> %x, i32 0
   %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
diff --git a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
index 80314765abdd1..f5acd7124211e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
@@ -79,8 +79,7 @@ define <4 x float> @testv4i16(<2 x float> %l1) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.4h v1, #16
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov.d v1[1], v0[0]
-; CHECK-NEXT:    mov.16b v0, v1
+; CHECK-NEXT:    zip1.2d v0, v1, v0
 ; CHECK-NEXT:    ret
   %l2 = shufflevector <2 x float> <float 0x37E0001000000000, float 0x37E0001000000000>, <2 x float> %l1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x float> %l2
diff --git a/llvm/test/CodeGen/AArch64/arm64-tbl.ll b/llvm/test/CodeGen/AArch64/arm64-tbl.ll
index fe5a6f12a49c3..dfdf0a5924f86 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tbl.ll
@@ -610,8 +610,7 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask(<16 x i8> %a, <16 x
 ; CHECK-SD-NEXT:    mov.b v4[6], w0
 ; CHECK-SD-NEXT:    mov.b v4[7], w0
 ; CHECK-SD-NEXT:    tbl.16b v0, { v0, v1 }, v4
-; CHECK-SD-NEXT:    mov.d v2[1], v0[0]
-; CHECK-SD-NEXT:    mov.16b v0, v2
+; CHECK-SD-NEXT:    zip1.2d v0, v2, v0
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: shuffled_tbl2_to_tbl4_nonconst_second_mask:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vcombine.ll b/llvm/test/CodeGen/AArch64/arm64-vcombine.ll
index ca9d16d86298f..398a8512b5c34 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcombine.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcombine.ll
@@ -1,12 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 ; LowerCONCAT_VECTORS() was reversing the order of two parts.
 ; rdar://11558157
 ; rdar://11559553
 define <16 x i8> @test(<16 x i8> %q0, <16 x i8> %q1, ptr nocapture %dest) nounwind {
-entry:
 ; CHECK-LABEL: test:
-; CHECK: mov.d v0[1], v1[0]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    zip1.2d v0, v0, v1
+; CHECK-NEXT:    ret
+entry:
   %0 = bitcast <16 x i8> %q0 to <2 x i64>
   %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> zeroinitializer
   %1 = bitcast <16 x i8> %q1 to <2 x i64>
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index a7f9ca8d73c1f..2621cdcc92bcc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2348,7 +2348,7 @@ define <4 x i32> @neon_ushl8h_constant_shift_extend_not_2x(ptr %A) nounwind {
 ; CHECK-GI-NEXT:    mov v2.h[1], w11
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-GI-NEXT:    ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT:    mov v1.d[1], v2.d[0]
+; CHECK-GI-NEXT:    zip1 v1.2d, v1.2d, v2.2d
 ; CHECK-GI-NEXT:    ushl v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
   %tmp1 = load <4 x i8>, ptr %A
@@ -2623,7 +2623,7 @@ define <4 x i32> @neon_sshl4s_wrong_ext_constant_shift(ptr %A) nounwind {
 ; CHECK-GI-NEXT:    mov v2.h[1], w11
 ; CHECK-GI-NEXT:    sshll v1.4s, v1.4h, #0
 ; CHECK-GI-NEXT:    sshll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT:    mov v1.d[1], v2.d[0]
+; CHECK-GI-NEXT:    zip1 v1.2d, v1.2d, v2.2d
 ; CHECK-GI-NEXT:    sshl v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
   %tmp1 = load <4 x i8>, ptr %A
diff --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll
index b24e54a68fb42..205e662c38222 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zip.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll
@@ -239,8 +239,7 @@ define <16 x i8> @combine2_v16i8(<8 x i8> %0, <8 x i8> %1) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    zip1.8b v2, v0, v1
 ; CHECK-GI-NEXT:    zip2.8b v0, v0, v1
-; CHECK-GI-NEXT:    mov.d v2[1], v0[0]
-; CHECK-GI-NEXT:    mov.16b v0, v2
+; CHECK-GI-NEXT:    zip1.2d v0, v2, v0
 ; CHECK-GI-NEXT:    ret
   %3 = shufflevector <8 x i8> %0, <8 x i8> %1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %4 = shufflevector <8 x i8> %0, <8 x i8> %1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -271,8 +270,7 @@ define <8 x i16> @combine2_v8i16(<4 x i16> %0, <4 x i16> %1) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    zip1.4h v2, v0, v1
 ; CHECK-GI-NEXT:    zip2.4h v0, v0, v1
-; CHECK-GI-NEXT:    mov.d v2[1], v0[0]
-; CHECK-GI-NEXT:    mov.16b v0, v2
+; CHECK-GI-NEXT:    zip1.2d v0, v2, v0
 ; CHECK-GI-NEXT:    ret
   %3 = shufflevector <4 x i16> %0, <4 x i16> %1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %4 = shufflevector <4 x i16> %0, <4 x i16> %1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -303,8 +301,7 @@ define <4 x i32> @combine2_v4i32(<2 x i32> %0, <2 x i32> %1) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    zip1.2s v2, v0, v1
 ; CHECK-GI-NEXT:    zip2.2s v0, v0, v1
-; CHECK-GI-NEXT:    mov.d v2[1], v0[0]
-; CHECK-GI-NEXT:    mov.16b v0, v2
+; CHECK-GI-NEXT:    zip1.2d v0, v2, v0
 ; CHECK-GI-NEXT:    ret
   %3 = shufflevector <2 x i32> %0, <2 x i32> %1, <2 x i32> <i32 0, i32 2>
   %4 = shufflevector <2 x i32> %0, <2 x i32> %1, <2 x i32> <i32 1, i32 3>
@@ -335,8 +332,7 @@ define <16 x i8> @combine2_v16i8_undef(<8 x i8> %0, <8 x i8> %1) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    zip1.8b v2, v0, v1
 ; CHECK-GI-NEXT:    zip2.8b v0, v0, v1
-; CHECK-GI-NEXT:    mov.d v2[1], v0[0]
-; CHECK-GI-NEXT:    mov.16b v0, v2
+; CHECK-GI-NEXT:    zip1.2d v0, v2, v0
 ; CHECK-GI-NEXT:    ret
   %3 = shufflevector <8 x i8> %0, <8 x i8> %1, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %4 = shufflevector <8 x i8> %0, <8 x i8> %1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
diff --git a/llvm/test/CodeGen/AArch64/bf16-shuffle.ll b/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
index d59de3c56f4ee..568eb1488746c 100644
--- a/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
@@ -345,7 +345,7 @@ define <8 x bfloat> @test_shufflevector8xbfloat(<4 x bfloat> %a) {
 ; CHECK-LABEL: test_shufflevector8xbfloat:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %r = shufflevector <4 x bfloat> %a, <4 x bfloat> %a, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/bf16-vector-shuffle.ll
index 222d7435ff742..944fc6c99b44c 100644
--- a/llvm/test/CodeGen/AArch64/bf16-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-vector-shuffle.ll
@@ -90,7 +90,7 @@ define <8 x bfloat> @test_vcombine_bf16(<4 x bfloat> %low, <4 x bfloat> %high) n
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i = shufflevector <4 x bfloat> %low, <4 x bfloat> %high, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/AArch64/bitcast-extend.ll b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
index 3133d0efb4b9b..be87db48c7784 100644
--- a/llvm/test/CodeGen/AArch64/bitcast-extend.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
@@ -58,7 +58,7 @@ define <4 x i32> @z_i32_v4i32(i32 %x) {
 ; CHECK-GI-NEXT:    mov v1.h[1], w11
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
   %b = bitcast i32 %x to <4 x i8>
   %e = zext <4 x i8> %b to <4 x i32>
@@ -161,7 +161,7 @@ define <4 x i32> @s_i32_v4i32(i32 %x) {
 ; CHECK-GI-NEXT:    mov v1.h[1], w11
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    sshll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
   %b = bitcast i32 %x to <4 x i8>
   %e = sext <4 x i8> %b to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
index dbbfbea9176f6..c9934ba558d8c 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
@@ -4,9 +4,9 @@
 define <16 x i8> @test1(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ld1r { v1.8b }, [x1]
-; CHECK-NEXT:    ld1r { v0.8b }, [x0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ld1r { v0.8b }, [x1]
+; CHECK-NEXT:    ld1r { v1.8b }, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i8, ptr %a, align 1
@@ -22,11 +22,11 @@ entry:
 define <16 x i8> @test2(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ld1r { v1.8b }, [x1]
+; CHECK-NEXT:    ld1r { v0.8b }, [x1]
 ; CHECK-NEXT:    ldrb w8, [x0]
-; CHECK-NEXT:    dup v0.8b, w8
-; CHECK-NEXT:    mov v1.b[7], w8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    dup v1.8b, w8
+; CHECK-NEXT:    mov v0.b[7], w8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i8, ptr %a, align 1
@@ -60,9 +60,9 @@ entry:
 define <16 x i8> @test4(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test4:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ld1r { v1.8b }, [x0]
-; CHECK-NEXT:    ld1r { v0.8b }, [x1]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ld1r { v0.8b }, [x0]
+; CHECK-NEXT:    ld1r { v1.8b }, [x1]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i8, ptr %a, align 1
@@ -136,9 +136,9 @@ entry:
 define <8 x i16> @test8(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ld1r { v1.4h }, [x1]
-; CHECK-NEXT:    ld1r { v0.4h }, [x0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ld1r { v0.4h }, [x1]
+; CHECK-NEXT:    ld1r { v1.4h }, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i16, ptr %a, align 1
@@ -154,9 +154,9 @@ entry:
 define <4 x i32> @test9(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test9:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ld1r { v1.2s }, [x1]
-; CHECK-NEXT:    ld1r { v0.2s }, [x0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ld1r { v0.2s }, [x1]
+; CHECK-NEXT:    ld1r { v1.2s }, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr %a, align 1
@@ -174,7 +174,7 @@ define <2 x i64> @test10(ptr nocapture noundef readonly %a, ptr nocapture nounde
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i64, ptr %a, align 1
@@ -214,7 +214,7 @@ define <4 x i32> @test12(ptr nocapture noundef readonly %a, ptr nocapture nounde
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    mov v0.s[1], w8
 ; CHECK-NEXT:    mov v1.s[0], w8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i32, ptr %a, align 1
@@ -230,9 +230,9 @@ entry:
 define <2 x i64> @test13(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test13:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldr d1, [x0]
-; CHECK-NEXT:    ldr d0, [x1]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i64, ptr %a, align 1
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-mixed-cases.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-mixed-cases.ll
index 1ed9cf2db24f7..79bd429f0d1d7 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-mixed-cases.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-mixed-cases.ll
@@ -327,7 +327,7 @@ define <4 x float> @mul_triangle_multiuses(<4 x float> %a, <4 x float> %b, ptr %
 ; CHECK-NEXT:    fneg v3.2s, v3.2s
 ; CHECK-NEXT:    fmla v5.2s, v4.2s, v1.2s
 ; CHECK-NEXT:    fmla v3.2s, v0.2s, v1.2s
-; CHECK-NEXT:    mov v1.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v2.2d
 ; CHECK-NEXT:    zip1 v0.4s, v3.4s, v5.4s
 ; CHECK-NEXT:    str q1, [x0]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll
index 7686740aec302..a0c7d83e4e330 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll
@@ -285,9 +285,9 @@ define <12 x float> @abp90c12(<12 x float> %a, <12 x float> %b, <12 x float> %c)
 ; CHECK-NEXT:    zip2 v4.4s, v0.4s, v3.4s
 ; CHECK-NEXT:    zip1 v0.4s, v0.4s, v3.4s
 ; CHECK-NEXT:    ext v1.16b, v2.16b, v1.16b, #8
-; CHECK-NEXT:    mov v4.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v2.2d, v4.2d, v2.2d
 ; CHECK-NEXT:    str q0, [x8]
-; CHECK-NEXT:    stp q4, q1, [x8, #16]
+; CHECK-NEXT:    stp q2, q1, [x8, #16]
 ; CHECK-NEXT:    ret
 entry:
   %ar = shufflevector <12 x float> %a, <12 x float> poison, <6 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10>
diff --git a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
index 34899cb47dba3..6a56ffb2cbf08 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll
@@ -102,8 +102,8 @@ define i32 @combine_undef_add_8xi32(i32 %a, i32 %b, i32 %c, i32 %d) local_unname
 ; CHECK-NEXT:    xtn v2.4h, v1.4s
 ; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
 ; CHECK-NEXT:    uhadd v1.4h, v2.4h, v1.4h
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    uaddlv s0, v1.8h
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    uaddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
   %a1 = insertelement <8 x i32> poison, i32 %a, i32 0
diff --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index 05a40453833ee..308257e7fbd0f 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -49,7 +49,7 @@ define <16 x i8> @concat3(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
    %v16i8 = shufflevector <8 x i8> %A, <8 x i8> %B, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
    ret <16 x i8> %v16i8
@@ -80,7 +80,7 @@ define <8 x i16> @concat5(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
    %v8i16 = shufflevector <4 x i16> %A, <4 x i16> %B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
    ret <8 x i16> %v8i16
@@ -103,7 +103,7 @@ define <4 x i32> @concat7(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
    %v4i32 = shufflevector <2 x i32> %A, <2 x i32> %B, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
    ret <4 x i32> %v4i32
@@ -144,7 +144,7 @@ define <8 x half> @concat10(<4 x half> %A, <4 x half> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
    %v8half= shufflevector <4 x half> %A, <4 x half> %B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
    ret <8 x half> %v8half
@@ -213,8 +213,8 @@ define <16 x i8> @concat_v16s8_v4s8_reg(<4 x i8> %A, <4 x i8> %B, <4 x i8> %C, <
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v2.2d, v2.2d, v3.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uzp1 v0.16b, v0.16b, v2.16b
 ; CHECK-SD-NEXT:    ret
 ;
@@ -338,7 +338,7 @@ define <4 x i16> @concat_undef_first_use_undef(ptr %p1, ptr %p2) {
 define <8 x i16> @concat_low_low_v8i16(<8 x i16> %a_vec, <8 x i16> %b_vec) {
 ; CHECK-LABEL: concat_low_low_v8i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <8 x i16> %a_vec, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -356,7 +356,7 @@ define <8 x i16> @concat_high_low_v8i16(<8 x i16> %a_vec, <8 x i16> %b_vec) {
 ; CHECK-GI-LABEL: concat_high_low_v8i16:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <8 x i16> %a_vec, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -369,13 +369,13 @@ define <8 x i16> @concat_low_high_v8i16(<8 x i16> %a_vec, <8 x i16> %b_vec) {
 ; CHECK-SD-LABEL: concat_low_high_v8i16:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: concat_low_high_v8i16:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    mov d1, v1.d[1]
-; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <8 x i16> %a_vec, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -387,8 +387,7 @@ entry:
 define <8 x i16> @concat_high_high_v8i16(<8 x i16> %a_vec, <8 x i16> %b_vec) {
 ; CHECK-LABEL: concat_high_high_v8i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov v1.d[0], v0.d[1]
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <8 x i16> %a_vec, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -400,8 +399,7 @@ entry:
 define <8 x half> @concat_high_high_v8f16(<8 x half> %a_vec, <8 x half> %b_vec) {
 ; CHECK-LABEL: concat_high_high_v8f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov v1.d[0], v0.d[1]
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <8 x half> %a_vec, <8 x half> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -413,8 +411,7 @@ entry:
 define <8 x bfloat> @concat_high_high_v8bf16(<8 x bfloat> %a_vec, <8 x bfloat> %b_vec) {
 ; CHECK-LABEL: concat_high_high_v8bf16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov v1.d[0], v0.d[1]
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <8 x bfloat> %a_vec, <8 x bfloat> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -424,16 +421,10 @@ entry:
 }
 
 define <4 x i32> @concat_high_high_v4i32(<4 x i32> %a_vec, <4 x i32> %b_vec) {
-; CHECK-SD-LABEL: concat_high_high_v4i32:
-; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    zip2 v0.2d, v0.2d, v1.2d
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: concat_high_high_v4i32:
-; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    mov v1.d[0], v0.d[1]
-; CHECK-GI-NEXT:    mov v0.16b, v1.16b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: concat_high_high_v4i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <4 x i32> %a_vec, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
   %shuffle.i = shufflevector <4 x i32> %b_vec, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
@@ -442,16 +433,10 @@ entry:
 }
 
 define <4 x float> @concat_high_high_v4f32(<4 x float> %a_vec, <4 x float> %b_vec) {
-; CHECK-SD-LABEL: concat_high_high_v4f32:
-; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    zip2 v0.2d, v0.2d, v1.2d
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: concat_high_high_v4f32:
-; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    mov v1.d[0], v0.d[1]
-; CHECK-GI-NEXT:    mov v0.16b, v1.16b
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: concat_high_high_v4f32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <4 x float> %a_vec, <4 x float> poison, <2 x i32> <i32 2, i32 3>
   %shuffle.i = shufflevector <4 x float> %b_vec, <4 x float> poison, <2 x i32> <i32 2, i32 3>
@@ -462,8 +447,7 @@ entry:
 define <16 x i8> @concat_high_high_v16i8(<16 x i8> %a_vec, <16 x i8> %b_vec) {
 ; CHECK-LABEL: concat_high_high_v16i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov v1.d[0], v0.d[1]
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i3 = shufflevector <16 x i8> %a_vec, <16 x i8> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
diff --git a/llvm/test/CodeGen/AArch64/concatbinop.ll b/llvm/test/CodeGen/AArch64/concatbinop.ll
index 828182d18b38c..a5fcf87ce4121 100644
--- a/llvm/test/CodeGen/AArch64/concatbinop.ll
+++ b/llvm/test/CodeGen/AArch64/concatbinop.ll
@@ -9,8 +9,8 @@ define <8 x i16> @concat_add(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16>
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %x = add <4 x i16> %a, %b
@@ -58,8 +58,8 @@ define <8 x i16> @concat_sub(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16>
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    sub v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %x = sub <4 x i16> %a, %b
@@ -75,8 +75,8 @@ define <8 x i16> @concat_mul(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16>
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    mul v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %x = mul <4 x i16> %a, %b
@@ -92,8 +92,8 @@ define <8 x i16> @concat_xor(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16>
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    eor v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %x = xor <4 x i16> %a, %b
@@ -109,8 +109,8 @@ define <8 x half> @concat_fadd(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    fadd v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %x = fadd <4 x half> %a, %b
@@ -126,8 +126,8 @@ define <8 x half> @concat_fmul(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    fmul v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %x = fmul <4 x half> %a, %b
@@ -143,8 +143,8 @@ define <8 x half> @concat_min(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v1.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    fminnm v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %x = call <4 x half> @llvm.minnum.v4f16(<4 x half> %a, <4 x half> %b)
@@ -158,7 +158,7 @@ define <8 x half> @concat_minmax(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmaxnm v2.4h, v2.4h, v3.4h
 ; CHECK-NEXT:    fminnm v0.4h, v0.4h, v1.4h
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    ret
   %x = call <4 x half> @llvm.minnum.v4f16(<4 x half> %a, <4 x half> %b)
   %y = call <4 x half> @llvm.maxnum.v4f16(<4 x half> %c, <4 x half> %d)
diff --git a/llvm/test/CodeGen/AArch64/ctlz.ll b/llvm/test/CodeGen/AArch64/ctlz.ll
index a4863d1f74200..15d6936e3306e 100644
--- a/llvm/test/CodeGen/AArch64/ctlz.ll
+++ b/llvm/test/CodeGen/AArch64/ctlz.ll
@@ -312,7 +312,7 @@ define <3 x i64> @v3i64(<3 x i64> %d) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ushr v1.2d, v0.2d, #1
 ; CHECK-SD-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-SD-NEXT:    ushr v1.2d, v2.2d, #1
diff --git a/llvm/test/CodeGen/AArch64/ctpop.ll b/llvm/test/CodeGen/AArch64/ctpop.ll
index 55f75b6bc3f27..79df31241f6e0 100644
--- a/llvm/test/CodeGen/AArch64/ctpop.ll
+++ b/llvm/test/CodeGen/AArch64/ctpop.ll
@@ -304,7 +304,7 @@ define <3 x i64> @v3i64(<3 x i64> %d) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    cnt v1.16b, v2.16b
 ; CHECK-SD-NEXT:    cnt v0.16b, v0.16b
 ; CHECK-SD-NEXT:    uaddlp v1.8h, v1.16b
diff --git a/llvm/test/CodeGen/AArch64/cttz.ll b/llvm/test/CodeGen/AArch64/cttz.ll
index 60125f8a19811..711cb7b3ee4db 100644
--- a/llvm/test/CodeGen/AArch64/cttz.ll
+++ b/llvm/test/CodeGen/AArch64/cttz.ll
@@ -527,7 +527,7 @@ define <3 x i64> @v3i64(<3 x i64> %d) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    mov w8, #1 // =0x1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    dup v1.2d, x8
 ; CHECK-SD-NEXT:    sub v3.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    sub v1.2d, v2.2d, v1.2d
diff --git a/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll b/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
index 5763e4f43c1d2..9c9a199f44c1a 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
@@ -12,8 +12,8 @@ define void @no_combine(i32 %p) local_unnamed_addr {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.4h, #4
 ; CHECK-NEXT:    dup v1.4h, w0
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    uzp1 v0.16b, v1.16b, v1.16b
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v0.16b
 ; CHECK-NEXT:    str q0, [x8]
 ; CHECK-NEXT:    ret
 
diff --git a/llvm/test/CodeGen/AArch64/extbinopload.ll b/llvm/test/CodeGen/AArch64/extbinopload.ll
index 75f3ffc9515e5..44340215355e1 100644
--- a/llvm/test/CodeGen/AArch64/extbinopload.ll
+++ b/llvm/test/CodeGen/AArch64/extbinopload.ll
@@ -662,7 +662,7 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
 ; CHECK-NEXT:    zip1 v2.8b, v2.8b, v2.8b
 ; CHECK-NEXT:    mov v0.b[10], w9
 ; CHECK-NEXT:    add x9, x1, #4
-; CHECK-NEXT:    mov v1.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v2.2d
 ; CHECK-NEXT:    mov v0.b[11], w10
 ; CHECK-NEXT:    add x10, x3, #12
 ; CHECK-NEXT:    bic v1.8h, #255, lsl #8
diff --git a/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll b/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll
index 1f1bfe6906482..2293f7129f968 100644
--- a/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll
+++ b/llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll
@@ -46,7 +46,7 @@ define void @zext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    add.2s v0, v1, v0
 ; CHECK-NEXT:    movi.2d v1, #0000000000000000
-; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    zip1.2d v0, v0, v0
 ; CHECK-NEXT:    zip1.4s v1, v0, v1
 ; CHECK-NEXT:    str q0, [x4]
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -60,7 +60,7 @@ define void @zext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou
 ; CHECK-BE-NEXT:    ld1 { v1.2s }, [x0]
 ; CHECK-BE-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-BE-NEXT:    add v0.2s, v1.2s, v0.2s
-; CHECK-BE-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-BE-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-BE-NEXT:    zip1 v1.4s, v0.4s, v0.4s
 ; CHECK-BE-NEXT:    st1 { v0.4s }, [x4]
 ; CHECK-BE-NEXT:    trn2 v0.4s, v1.4s, v2.4s
@@ -119,8 +119,7 @@ define void @aext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou
 ; CHECK-NEXT:    ldr d0, [x1]
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    add.2s v0, v1, v0
-; CHECK-NEXT:    mov.16b v1, v0
-; CHECK-NEXT:    mov.d v1[1], v0[0]
+; CHECK-NEXT:    zip1.2d v1, v0, v0
 ; CHECK-NEXT:    zip1.4s v0, v0, v0
 ; CHECK-NEXT:    str q1, [x4]
 ; CHECK-NEXT:    ldr q1, [x2]
@@ -133,8 +132,7 @@ define void @aext_of_concat_extrause(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) nou
 ; CHECK-BE-NEXT:    ld1 { v0.2s }, [x1]
 ; CHECK-BE-NEXT:    ld1 { v1.2s }, [x0]
 ; CHECK-BE-NEXT:    add v0.2s, v1.2s, v0.2s
-; CHECK-BE-NEXT:    mov v1.16b, v0.16b
-; CHECK-BE-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-BE-NEXT:    zip1 v1.2d, v0.2d, v0.2d
 ; CHECK-BE-NEXT:    zip1 v0.4s, v0.4s, v0.4s
 ; CHECK-BE-NEXT:    st1 { v1.4s }, [x4]
 ; CHECK-BE-NEXT:    ld1 { v1.4s }, [x2]
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index 5e5fdd6d31705..3670d1cf03d6a 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -776,8 +776,8 @@ define i32 @extract_v4i32_vector_insert(<4 x i32> %a, <2 x i32> %b, i32 %c) {
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    bfi x8, x0, #2, #2
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    str q1, [sp]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    str q0, [sp]
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fabs.ll b/llvm/test/CodeGen/AArch64/fabs.ll
index 43e9007073634..9f4ade98f6a23 100644
--- a/llvm/test/CodeGen/AArch64/fabs.ll
+++ b/llvm/test/CodeGen/AArch64/fabs.ll
@@ -73,7 +73,7 @@ define <3 x double> @fabs_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fabs v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fabs v0.2d, v0.2d
diff --git a/llvm/test/CodeGen/AArch64/faddsub.ll b/llvm/test/CodeGen/AArch64/faddsub.ll
index b15579199a059..ba9fb69ce3d1f 100644
--- a/llvm/test/CodeGen/AArch64/faddsub.ll
+++ b/llvm/test/CodeGen/AArch64/faddsub.ll
@@ -74,8 +74,8 @@ define <3 x double> @fadd_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fadd v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fadd v0.2d, v0.2d, v3.2d
@@ -415,8 +415,8 @@ define <3 x double> @fsub_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fsub v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fsub v0.2d, v0.2d, v3.2d
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index e16ea9883a0c4..af72be9646c25 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -613,27 +613,25 @@ define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double>
 ; CHECK-SD-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    stp q0, q2, [sp, #16] // 32-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    mov v1.16b, v3.16b
 ; CHECK-SD-NEXT:    stp q4, q5, [sp, #48] // 32-byte Folded Spill
-; CHECK-SD-NEXT:    str q2, [sp, #32] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cset w8, lt
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldp q0, q1, [sp, #16] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #80] // 8-byte Folded Reload
 ; CHECK-SD-NEXT:    cset w8, lt
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ldp q2, q1, [sp, #48] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v1.16b
 ; CHECK-SD-NEXT:    add sp, sp, #96
@@ -678,24 +676,24 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-SD-NEXT:    str x30, [sp, #144] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 160
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    stp q2, q5, [sp, #112] // 32-byte Folded Spill
-; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
+; CHECK-SD-NEXT:    str q3, [sp, #48] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
+; CHECK-SD-NEXT:    str q2, [sp, #96] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldp d2, d3, [sp, #168]
+; CHECK-SD-NEXT:    str q5, [sp, #128] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    ldr d5, [sp, #184]
-; CHECK-SD-NEXT:    str q3, [sp, #64] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldp d3, d2, [sp, #168]
-; CHECK-SD-NEXT:    mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    zip1 v2.2d, v2.2d, v3.2d
+; CHECK-SD-NEXT:    str q5, [sp, #112] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr d5, [sp, #160]
+; CHECK-SD-NEXT:    stp q2, q5, [sp, #64] // 32-byte Folded Spill
+; CHECK-SD-NEXT:    zip1 v2.2d, v6.2d, v7.2d
+; CHECK-SD-NEXT:    stp q0, q2, [sp, #16] // 32-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    mov v1.16b, v4.16b
-; CHECK-SD-NEXT:    str q5, [sp, #96] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr d5, [sp, #160]
-; CHECK-SD-NEXT:    mov v3.d[1], v2.d[0]
-; CHECK-SD-NEXT:    str q5, [sp, #80] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    stp q6, q3, [sp, #32] // 32-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cset w8, lt
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d0, x8
@@ -703,28 +701,30 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cset w8, lt
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-SD-NEXT:    fmov d1, x8
-; CHECK-SD-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-SD-NEXT:    str q1, [sp, #64] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldp q0, q1, [sp, #112] // 32-byte Folded Reload
+; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    ldr q1, [sp, #128] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #96] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __lttf2
-; CHECK-SD-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
+; CHECK-SD-NEXT:    ldp q2, q0, [sp, #48] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    ldp q2, q4, [sp, #64] // 32-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cset w8, lt
+; CHECK-SD-NEXT:    ldr q3, [sp, #112] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
-; CHECK-SD-NEXT:    ldr q3, [sp, #96] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q4, [sp, #80] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #144] // 8-byte Folded Reload
 ; CHECK-SD-NEXT:    bit v0.16b, v1.16b, v2.16b
 ; CHECK-SD-NEXT:    fmov d2, x8
-; CHECK-SD-NEXT:    bsl v2.16b, v4.16b, v3.16b
 ; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    bsl v2.16b, v4.16b, v3.16b
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    add sp, sp, #160
 ; CHECK-SD-NEXT:    ret
 ;
@@ -813,22 +813,22 @@ define <3 x double> @v3f64_double(<3 x double> %a, <3 x double> %b, <3 x double>
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d4 killed $d4 def $q4
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
 ; CHECK-SD-NEXT:    ldr d16, [sp, #24]
 ; CHECK-SD-NEXT:    ldr d17, [sp]
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT:    mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT:    ldp d1, d4, [sp, #8]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcmgt v2.2d, v5.2d, v2.2d
-; CHECK-SD-NEXT:    mov v1.d[1], v4.d[0]
+; CHECK-SD-NEXT:    ldp d1, d4, [sp, #8]
+; CHECK-SD-NEXT:    zip1 v1.2d, v1.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v4.2d, v6.2d, v7.2d
 ; CHECK-SD-NEXT:    fcmgt v0.2d, v3.2d, v0.2d
 ; CHECK-SD-NEXT:    bsl v2.16b, v17.16b, v16.16b
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
-; CHECK-SD-NEXT:    bsl v0.16b, v6.16b, v1.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v1.16b
 ; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
@@ -905,16 +905,16 @@ entry:
 define <3 x i32> @v3f64_i32(<3 x double> %a, <3 x double> %b, <3 x i32> %d, <3 x i32> %e) {
 ; CHECK-SD-LABEL: v3f64_i32:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d4 killed $d4 def $q4
-; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    zip1 v1.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    fcmgt v0.2d, v1.2d, v0.2d
 ; CHECK-SD-NEXT:    fcmgt v1.2d, v5.2d, v2.2d
-; CHECK-SD-NEXT:    fcmgt v0.2d, v3.2d, v0.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
 ; CHECK-SD-NEXT:    bsl v0.16b, v6.16b, v7.16b
 ; CHECK-SD-NEXT:    ret
@@ -939,7 +939,7 @@ define <3 x i32> @v3f64_i32(<3 x double> %a, <3 x double> %b, <3 x i32> %d, <3 x
 ; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-GI-NEXT:    mov v1.s[2], w8
 ; CHECK-GI-NEXT:    mov v3.s[1], w9
-; CHECK-GI-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-GI-NEXT:    mov v3.s[2], w9
 ; CHECK-GI-NEXT:    ushl v0.4s, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    neg v1.4s, v1.4s
diff --git a/llvm/test/CodeGen/AArch64/fcopysign.ll b/llvm/test/CodeGen/AArch64/fcopysign.ll
index a42ec8e253be2..3dcc135b2668d 100644
--- a/llvm/test/CodeGen/AArch64/fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/fcopysign.ll
@@ -85,8 +85,8 @@ define <3 x double> @copysign_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d4 killed $d4 def $q4
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fneg v1.2d, v6.2d
 ; CHECK-SD-NEXT:    bif v0.16b, v3.16b, v1.16b
 ; CHECK-SD-NEXT:    bif v2.16b, v5.16b, v1.16b
diff --git a/llvm/test/CodeGen/AArch64/fcvt.ll b/llvm/test/CodeGen/AArch64/fcvt.ll
index b408e9c1bd4e6..138da9bd228e7 100644
--- a/llvm/test/CodeGen/AArch64/fcvt.ll
+++ b/llvm/test/CodeGen/AArch64/fcvt.ll
@@ -69,7 +69,7 @@ define <3 x double> @ceil_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frintp v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frintp v0.2d, v0.2d
@@ -368,7 +368,7 @@ define <3 x double> @floor_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frintm v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frintm v0.2d, v0.2d
@@ -667,7 +667,7 @@ define <3 x double> @nearbyint_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frinti v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frinti v0.2d, v0.2d
@@ -966,7 +966,7 @@ define <3 x double> @roundeven_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frintn v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frintn v0.2d, v0.2d
@@ -1265,7 +1265,7 @@ define <3 x double> @rint_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frintx v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frintx v0.2d, v0.2d
@@ -1564,7 +1564,7 @@ define <3 x double> @round_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frinta v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frinta v0.2d, v0.2d
@@ -1863,7 +1863,7 @@ define <3 x double> @trunc_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    frintz v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    frintz v0.2d, v0.2d
diff --git a/llvm/test/CodeGen/AArch64/fdiv.ll b/llvm/test/CodeGen/AArch64/fdiv.ll
index 5bdccccc62b99..9890612e9ddfa 100644
--- a/llvm/test/CodeGen/AArch64/fdiv.ll
+++ b/llvm/test/CodeGen/AArch64/fdiv.ll
@@ -74,8 +74,8 @@ define <3 x double> @fdiv_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fdiv v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fdiv v0.2d, v0.2d, v3.2d
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index a9618fdc2dec3..b2538602c4f00 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -17,7 +17,7 @@ define <8 x half> @interleave2_v8f16(<4 x half> %vec0, <4 x half> %vec1) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    adrp x8, .LCPI1_0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI1_0]
 ; CHECK-SD-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SD-NEXT:    ret
@@ -48,7 +48,7 @@ define <4 x float> @interleave2_v4f32(<2 x float> %vec0, <2 x float> %vec1) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    rev64 v1.4s, v0.4s
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fminimummaximum.ll b/llvm/test/CodeGen/AArch64/fminimummaximum.ll
index fb12f8acf1745..fafc7a7a7c4dd 100644
--- a/llvm/test/CodeGen/AArch64/fminimummaximum.ll
+++ b/llvm/test/CodeGen/AArch64/fminimummaximum.ll
@@ -135,8 +135,8 @@ define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fmin v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fmin v0.2d, v0.2d, v3.2d
@@ -172,8 +172,8 @@ define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fmax v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fmax v0.2d, v0.2d, v3.2d
diff --git a/llvm/test/CodeGen/AArch64/fminmax.ll b/llvm/test/CodeGen/AArch64/fminmax.ll
index 64f0da8b4cd0f..d4987a0389cf9 100644
--- a/llvm/test/CodeGen/AArch64/fminmax.ll
+++ b/llvm/test/CodeGen/AArch64/fminmax.ll
@@ -135,8 +135,8 @@ define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fminnm v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fminnm v0.2d, v0.2d, v3.2d
@@ -172,8 +172,8 @@ define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fmaxnm v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fmaxnm v0.2d, v0.2d, v3.2d
diff --git a/llvm/test/CodeGen/AArch64/fmla.ll b/llvm/test/CodeGen/AArch64/fmla.ll
index a37aabb0b5384..d02ab59f77474 100644
--- a/llvm/test/CodeGen/AArch64/fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fmla.ll
@@ -71,22 +71,22 @@ entry:
 define <3 x double> @fma_v3f64(<3 x double> %a, <3 x double> %b, <3 x double> %c) {
 ; CHECK-SD-LABEL: fma_v3f64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d4 killed $d4 def $q4
-; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT:    mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT:    fmla v6.2d, v3.2d, v0.2d
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v1.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v6.2d, v7.2d
+; CHECK-SD-NEXT:    fmla v0.2d, v3.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr d3, [sp]
 ; CHECK-SD-NEXT:    fmla v3.2d, v5.2d, v2.2d
-; CHECK-SD-NEXT:    fmov d0, d6
-; CHECK-SD-NEXT:    ext v1.16b, v6.16b, v6.16b, #8
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
 ; CHECK-SD-NEXT:    fmov d2, d3
 ; CHECK-SD-NEXT:    ret
@@ -722,22 +722,22 @@ entry:
 define <3 x double> @fmuladd_v3f64(<3 x double> %a, <3 x double> %b, <3 x double> %c) {
 ; CHECK-SD-LABEL: fmuladd_v3f64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d4 killed $d4 def $q4
-; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT:    mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT:    fmla v6.2d, v3.2d, v0.2d
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v1.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v6.2d, v7.2d
+; CHECK-SD-NEXT:    fmla v0.2d, v3.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr d3, [sp]
 ; CHECK-SD-NEXT:    fmla v3.2d, v5.2d, v2.2d
-; CHECK-SD-NEXT:    fmov d0, d6
-; CHECK-SD-NEXT:    ext v1.16b, v6.16b, v6.16b, #8
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
 ; CHECK-SD-NEXT:    fmov d2, d3
 ; CHECK-SD-NEXT:    ret
@@ -1167,22 +1167,22 @@ entry:
 define <3 x double> @fmul_v3f64(<3 x double> %a, <3 x double> %b, <3 x double> %c) {
 ; CHECK-SD-LABEL: fmul_v3f64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d4 killed $d4 def $q4
-; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT:    mov v6.d[1], v7.d[0]
-; CHECK-SD-NEXT:    fmla v6.2d, v3.2d, v0.2d
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v1.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v6.2d, v7.2d
+; CHECK-SD-NEXT:    fmla v0.2d, v3.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr d3, [sp]
 ; CHECK-SD-NEXT:    fmla v3.2d, v5.2d, v2.2d
-; CHECK-SD-NEXT:    fmov d0, d6
-; CHECK-SD-NEXT:    ext v1.16b, v6.16b, v6.16b, #8
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 killed $q1
 ; CHECK-SD-NEXT:    fmov d2, d3
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fmul.ll b/llvm/test/CodeGen/AArch64/fmul.ll
index bd3d1353e643e..0440bb6acb708 100644
--- a/llvm/test/CodeGen/AArch64/fmul.ll
+++ b/llvm/test/CodeGen/AArch64/fmul.ll
@@ -74,8 +74,8 @@ define <3 x double> @fmul_v3f64(<3 x double> %a, <3 x double> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fmul v2.2d, v2.2d, v5.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fmul v0.2d, v0.2d, v3.2d
diff --git a/llvm/test/CodeGen/AArch64/fneg.ll b/llvm/test/CodeGen/AArch64/fneg.ll
index de2671afe60ab..00e8c86d329c3 100644
--- a/llvm/test/CodeGen/AArch64/fneg.ll
+++ b/llvm/test/CodeGen/AArch64/fneg.ll
@@ -73,7 +73,7 @@ define <3 x double> @fabs_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fneg v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fneg v0.2d, v0.2d
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
index 1364c47adff2d..c6342528e8c1d 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll
@@ -241,9 +241,9 @@ define <4 x float> @uitofp_v4f32_v4i64(<4 x i64> %x) #0 {
 ; CHECK-NEXT:    fmul v0.2s, v2.2s, v1.2s
 ; CHECK-NEXT:    fmul v1.2s, v4.2s, v1.2s
 ; CHECK-NEXT:    mov v5.s[1], v3.s[0]
-; CHECK-NEXT:    fadd v2.2s, v0.2s, v6.2s
-; CHECK-NEXT:    fadd v0.2s, v1.2s, v5.2s
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    fadd v0.2s, v0.2s, v6.2s
+; CHECK-NEXT:    fadd v1.2s, v1.2s, v5.2s
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
   ret <4 x float> %val
diff --git a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
index c6b8e41f9bdfd..8eb6dcd849c5e 100644
--- a/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
+++ b/llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll
@@ -32,8 +32,8 @@ define <3 x double> @max_nnan_v3f64(<3 x double> %a, <3 x double> %b) {
 ; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; AARCH64-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; AARCH64-NEXT:    // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT:    mov v3.d[1], v4.d[0]
-; AARCH64-NEXT:    mov v0.d[1], v1.d[0]
+; AARCH64-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; AARCH64-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; AARCH64-NEXT:    fmaxnm v2.2d, v2.2d, v5.2d
 ; AARCH64-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v3.2d
@@ -569,8 +569,8 @@ define <3 x double> @min_nnan_v3f64(<3 x double> %a, <3 x double> %b) {
 ; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; AARCH64-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; AARCH64-NEXT:    // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT:    mov v3.d[1], v4.d[0]
-; AARCH64-NEXT:    mov v0.d[1], v1.d[0]
+; AARCH64-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; AARCH64-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v5.2d
 ; AARCH64-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v3.2d
@@ -1104,16 +1104,16 @@ entry:
 define <3 x double> @max_v3f64(<3 x double> %a, <3 x double> %b) {
 ; AARCH64-LABEL: max_v3f64:
 ; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    // kill: def $d3 killed $d3 def $q3
+; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; AARCH64-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; AARCH64-NEXT:    // kill: def $d4 killed $d4 def $q4
-; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; AARCH64-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; AARCH64-NEXT:    // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT:    mov v0.d[1], v1.d[0]
-; AARCH64-NEXT:    mov v3.d[1], v4.d[0]
+; AARCH64-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    zip1 v1.2d, v3.2d, v4.2d
 ; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v2.2d
-; AARCH64-NEXT:    fminnm v1.2d, v3.2d, v3.2d
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v1.2d
 ; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
 ; AARCH64-NEXT:    fmaxnm v0.2d, v0.2d, v1.2d
 ; AARCH64-NEXT:    fminnm v1.2d, v5.2d, v5.2d
@@ -1685,16 +1685,16 @@ entry:
 define <3 x double> @min_v3f64(<3 x double> %a, <3 x double> %b) {
 ; AARCH64-LABEL: min_v3f64:
 ; AARCH64:       // %bb.0: // %entry
-; AARCH64-NEXT:    // kill: def $d3 killed $d3 def $q3
+; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; AARCH64-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; AARCH64-NEXT:    // kill: def $d4 killed $d4 def $q4
-; AARCH64-NEXT:    // kill: def $d1 killed $d1 def $q1
+; AARCH64-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; AARCH64-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; AARCH64-NEXT:    // kill: def $d5 killed $d5 def $q5
-; AARCH64-NEXT:    mov v0.d[1], v1.d[0]
-; AARCH64-NEXT:    mov v3.d[1], v4.d[0]
+; AARCH64-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; AARCH64-NEXT:    zip1 v1.2d, v3.2d, v4.2d
 ; AARCH64-NEXT:    fminnm v2.2d, v2.2d, v2.2d
-; AARCH64-NEXT:    fminnm v1.2d, v3.2d, v3.2d
+; AARCH64-NEXT:    fminnm v1.2d, v1.2d, v1.2d
 ; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v0.2d
 ; AARCH64-NEXT:    fminnm v0.2d, v0.2d, v1.2d
 ; AARCH64-NEXT:    fminnm v1.2d, v5.2d, v5.2d
diff --git a/llvm/test/CodeGen/AArch64/fp16-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/fp16-vector-shuffle.ll
index 05422d3cc6051..6e6ec203cf302 100644
--- a/llvm/test/CodeGen/AArch64/fp16-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-vector-shuffle.ll
@@ -276,7 +276,7 @@ define <8 x half> @vcombine(<4 x half> %a, <4 x half> %b) #0 {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i = shufflevector <4 x half> %a, <4 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index 9157bcba59e9b..c1e775fbd48a1 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -360,10 +360,10 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-NEXT:    cmp x20, #0
 ; CHECK-NEXT:    csel x9, x19, xzr, eq
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -405,10 +405,10 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
 ; CHECK-NEXT:    csel x10, x10, xzr, lt
 ; CHECK-NEXT:    cmp xzr, x8
 ; CHECK-NEXT:    ngcs xzr, x9
-; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x10
 ; CHECK-NEXT:    csel x8, x8, xzr, lt
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -461,10 +461,10 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-NEXT:    cmp x20, #0
 ; CHECK-NEXT:    csel x9, x19, xzr, eq
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -507,10 +507,10 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
 ; CHECK-NEXT:    csel x10, x10, xzr, lt
 ; CHECK-NEXT:    cmp xzr, x9
 ; CHECK-NEXT:    ngcs xzr, x8
-; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x10
 ; CHECK-NEXT:    csel x8, x9, xzr, lt
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -579,10 +579,10 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
 ; CHECK-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-NEXT:    cmp x20, #0
 ; CHECK-NEXT:    csel x9, x19, xzr, eq
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -625,10 +625,10 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
 ; CHECK-NEXT:    csel x10, x10, xzr, lt
 ; CHECK-NEXT:    cmp xzr, x9
 ; CHECK-NEXT:    ngcs xzr, x8
-; CHECK-NEXT:    fmov d0, x10
+; CHECK-NEXT:    fmov d1, x10
 ; CHECK-NEXT:    csel x8, x9, xzr, lt
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -969,10 +969,10 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-NEXT:    cmp x20, #0
 ; CHECK-NEXT:    csel x9, x19, xzr, eq
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -1012,9 +1012,9 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NEXT:    csel x10, xzr, x11, lt
 ; CHECK-NEXT:    cmp x9, #0
 ; CHECK-NEXT:    csel x8, xzr, x8, lt
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -1063,10 +1063,10 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-NEXT:    cmp x20, #0
 ; CHECK-NEXT:    csel x9, x19, xzr, eq
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -1107,9 +1107,9 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NEXT:    csel x10, xzr, x11, lt
 ; CHECK-NEXT:    cmp x9, #0
 ; CHECK-NEXT:    csel x8, xzr, x8, lt
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -1174,10 +1174,10 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
 ; CHECK-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-NEXT:    cmp x20, #0
 ; CHECK-NEXT:    csel x9, x19, xzr, eq
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    fmov d1, x9
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
@@ -1218,9 +1218,9 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NEXT:    csel x10, xzr, x11, lt
 ; CHECK-NEXT:    cmp x9, #0
 ; CHECK-NEXT:    csel x8, xzr, x8, lt
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov d1, x10
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index ae3b6a54a1f7f..758a57646fbf7 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -710,7 +710,7 @@ define <3 x i64> @fptos_v3f64_v3i64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcvtzs v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
@@ -742,7 +742,7 @@ define <3 x i64> @fptou_v3f64_v3i64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcvtzu v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fcvtzu v0.2d, v0.2d
@@ -1015,32 +1015,54 @@ entry:
 }
 
 define <3 x i32> @fptos_v3f64_v3i32(<3 x double> %a) {
-; CHECK-LABEL: fptos_v3f64_v3i32:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    fcvtzs v1.2d, v2.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fptos_v3f64_v3i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    fcvtzs v1.2d, v2.2d
+; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fptos_v3f64_v3i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    fcvtzs v1.2d, v2.2d
+; CHECK-GI-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    ret
 entry:
   %c = fptosi <3 x double> %a to <3 x i32>
   ret <3 x i32> %c
 }
 
 define <3 x i32> @fptou_v3f64_v3i32(<3 x double> %a) {
-; CHECK-LABEL: fptou_v3f64_v3i32:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    fcvtzu v1.2d, v2.2d
-; CHECK-NEXT:    fcvtzu v0.2d, v0.2d
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fptou_v3f64_v3i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    fcvtzu v1.2d, v2.2d
+; CHECK-SD-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fptou_v3f64_v3i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    fcvtzu v1.2d, v2.2d
+; CHECK-GI-NEXT:    fcvtzu v0.2d, v0.2d
+; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    ret
 entry:
   %c = fptoui <3 x double> %a to <3 x i32>
   ret <3 x i32> %c
@@ -1375,17 +1397,29 @@ entry:
 }
 
 define <3 x i16> @fptos_v3f64_v3i16(<3 x double> %a) {
-; CHECK-LABEL: fptos_v3f64_v3i16:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    fcvtzs v1.2d, v2.2d
-; CHECK-NEXT:    fcvtzs v0.2d, v0.2d
-; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    xtn v0.4h, v0.4s
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: fptos_v3f64_v3i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    fcvtzs v1.2d, v2.2d
+; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    xtn v0.4h, v0.4s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: fptos_v3f64_v3i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    fcvtzs v1.2d, v2.2d
+; CHECK-GI-NEXT:    fcvtzs v0.2d, v0.2d
+; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    xtn v0.4h, v0.4s
+; CHECK-GI-NEXT:    ret
 entry:
   %c = fptosi <3 x double> %a to <3 x i16>
   ret <3 x i16> %c
@@ -1397,7 +1431,7 @@ define <3 x i16> @fptou_v3f64_v3i16(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcvtzs v1.2d, v2.2d
 ; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
@@ -1860,7 +1894,7 @@ define <3 x i8> @fptos_v3f64_v3i8(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcvtzs v1.2d, v2.2d
 ; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
@@ -1897,7 +1931,7 @@ define <3 x i8> @fptou_v3f64_v3i8(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcvtzs v1.2d, v2.2d
 ; CHECK-SD-NEXT:    fcvtzs v0.2d, v0.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
@@ -7199,17 +7233,17 @@ define <2 x i64> @fptos_v2f128_v2i64(<2 x fp128> %a) {
 ; CHECK-SD-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    bl __fixtfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __fixtfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
@@ -7242,17 +7276,17 @@ define <2 x i64> @fptou_v2f128_v2i64(<2 x fp128> %a) {
 ; CHECK-SD-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    bl __fixunstfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __fixunstfdi
 ; CHECK-SD-NEXT:    fmov d0, x0
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
index a33b1ef569fc3..f1121b89a1d30 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
@@ -5505,7 +5505,7 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    ldp x30, x21, [sp, #64] // 16-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #96
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index b1b5154a57b4d..368094fa92563 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -4487,7 +4487,7 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    csinv x8, x19, xzr, le
 ; CHECK-SD-NEXT:    ldp x30, x19, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #80
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/fptrunc.ll b/llvm/test/CodeGen/AArch64/fptrunc.ll
index b4c38e9f2df3b..117f5720b2ccf 100644
--- a/llvm/test/CodeGen/AArch64/fptrunc.ll
+++ b/llvm/test/CodeGen/AArch64/fptrunc.ll
@@ -197,17 +197,17 @@ define <2 x double> @fptrunc_v2f128_v2f64(<2 x fp128> %a) {
 ; CHECK-SD-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-SD-NEXT:    .cfi_offset w30, -16
-; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    mov v0.16b, v1.16b
 ; CHECK-SD-NEXT:    bl __trunctfdf2
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __trunctfdf2
-; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
@@ -251,7 +251,7 @@ define <3 x float> @fptrunc_v3f64_v3f32(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fcvtn v0.2s, v0.2d
 ; CHECK-SD-NEXT:    fcvtn2 v0.4s, v2.2d
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fsqrt.ll b/llvm/test/CodeGen/AArch64/fsqrt.ll
index 6c5fd8e52b017..001a7c5e08142 100644
--- a/llvm/test/CodeGen/AArch64/fsqrt.ll
+++ b/llvm/test/CodeGen/AArch64/fsqrt.ll
@@ -69,7 +69,7 @@ define <3 x double> @sqrt_v3f64(<3 x double> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    fsqrt v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    fsqrt v0.2d, v0.2d
diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll
index e284795760c5c..c1b3b6462bea9 100644
--- a/llvm/test/CodeGen/AArch64/icmp.ll
+++ b/llvm/test/CodeGen/AArch64/icmp.ll
@@ -1127,19 +1127,19 @@ define <3 x i64> @v3i64_i64(<3 x i64> %a, <3 x i64> %b, <3 x i64> %d, <3 x i64>
 ; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-SD-NEXT:    // kill: def $d6 killed $d6 def $q6
 ; CHECK-SD-NEXT:    // kill: def $d5 killed $d5 def $q5
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    ldr d16, [sp, #24]
 ; CHECK-SD-NEXT:    ldr d17, [sp]
-; CHECK-SD-NEXT:    mov v3.d[1], v4.d[0]
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-SD-NEXT:    mov v6.d[1], v7.d[0]
+; CHECK-SD-NEXT:    zip1 v3.2d, v3.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ldp d1, d4, [sp, #8]
-; CHECK-SD-NEXT:    mov v1.d[1], v4.d[0]
+; CHECK-SD-NEXT:    zip1 v1.2d, v1.2d, v4.2d
+; CHECK-SD-NEXT:    zip1 v4.2d, v6.2d, v7.2d
 ; CHECK-SD-NEXT:    cmgt v0.2d, v3.2d, v0.2d
-; CHECK-SD-NEXT:    bsl v0.16b, v6.16b, v1.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v1.16b
 ; CHECK-SD-NEXT:    cmgt v1.2d, v5.2d, v2.2d
 ; CHECK-SD-NEXT:    mov v2.16b, v1.16b
 ; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector.ll b/llvm/test/CodeGen/AArch64/insert-subvector.ll
index 6828fa9f1508c..11f2c75dc7a1b 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector.ll
@@ -351,9 +351,9 @@ define <8 x i8> @load_v8i8_4_2(float %tmp, <8 x i8> %b, ptr %a) {
 define <16 x i8> @load_v16i8_8_1(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_8_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ext v0.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %l = load <8 x i8>, ptr %a
   %s1 = shufflevector <8 x i8> %l, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -364,9 +364,8 @@ define <16 x i8> @load_v16i8_8_1(float %tmp, <16 x i8> %b, ptr %a) {
 define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_8_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    ldr d2, [x0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %l = load <8 x i8>, ptr %a
   %s1 = shufflevector <8 x i8> %l, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -394,7 +393,7 @@ define <8 x i8> @load_v8i8_2_15(float %tmp, <8 x i8> %b, ptr %a) {
 ; CHECK-NEXT:    ldr h0, [x0]
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-NEXT:    adrp x8, .LCPI33_0
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI33_0]
 ; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    ret
@@ -572,9 +571,9 @@ define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, ptr %a) {
 define <8 x i16> @load_v8i16_4_1(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_4_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ext v0.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %l = load <4 x i16>, ptr %a
   %s1 = shufflevector <4 x i16> %l, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -585,9 +584,8 @@ define <8 x i16> @load_v8i16_4_1(float %tmp, <8 x i16> %b, ptr %a) {
 define <8 x i16> @load_v8i16_4_2(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_4_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    ldr d2, [x0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %l = load <4 x i16>, ptr %a
   %s1 = shufflevector <4 x i16> %l, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -600,9 +598,9 @@ define <8 x i16> @load_v8i16_4_2(float %tmp, <8 x i16> %b, ptr %a) {
 define <4 x i32> @load_v4i32_2_1(float %tmp, <4 x i32> %b, ptr %a) {
 ; CHECK-LABEL: load_v4i32_2_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ext v0.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %l = load <2 x i32>, ptr %a
   %s1 = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -613,9 +611,8 @@ define <4 x i32> @load_v4i32_2_1(float %tmp, <4 x i32> %b, ptr %a) {
 define <4 x i32> @load_v4i32_2_2(float %tmp, <4 x i32> %b, ptr %a) {
 ; CHECK-LABEL: load_v4i32_2_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    ldr d2, [x0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
   %l = load <2 x i32>, ptr %a
   %s1 = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -643,7 +640,7 @@ define <16 x i8> @load3_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x1]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %la = load <4 x i8>, ptr %a
   %lb = load <4 x i8>, ptr %b
@@ -662,7 +659,7 @@ define <16 x i8> @load4_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldp s0, s1, [x0]
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x1], #4
 ; CHECK-NEXT:    ld1 { v1.s }[1], [x1]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %la = load <4 x i8>, ptr %a
   %lb = load <4 x i8>, ptr %b
@@ -681,7 +678,7 @@ define <16 x i8> @load2multi1_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x1]
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %la = load <4 x i8>, ptr %a
   %lb = load <4 x i8>, ptr %b
@@ -697,8 +694,8 @@ define <16 x i8> @load2multi2_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-NEXT:    ldr s1, [x1]
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
-; CHECK-NEXT:    mov v1.d[1], v1.d[0]
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    zip1 v1.2d, v1.2d, v1.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %la = load <4 x i8>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/insertextract.ll b/llvm/test/CodeGen/AArch64/insertextract.ll
index aa4f31fb5f53e..2cd31b6d313c4 100644
--- a/llvm/test/CodeGen/AArch64/insertextract.ll
+++ b/llvm/test/CodeGen/AArch64/insertextract.ll
@@ -57,7 +57,7 @@ define <3 x double> @insert_v3f64_0(<3 x double> %a, double %b, i32 %c) {
 ; CHECK-SD-LABEL: insert_v3f64_0:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-SD-NEXT:    mov v0.d[0], v3.d[0]
 ; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
@@ -91,7 +91,7 @@ define <3 x double> @insert_v3f64_c(<3 x double> %a, double %b, i32 %c) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    stp q0, q2, [sp, #-32]!
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-SD-NEXT:    mov x8, sp
@@ -1188,7 +1188,7 @@ define <3 x i64> @insert_v3i64_0(<3 x i64> %a, i64 %b, i32 %c) {
 ; CHECK-SD-LABEL: insert_v3i64_0:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    mov v0.d[0], x0
 ; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
@@ -1221,7 +1221,7 @@ define <3 x i64> @insert_v3i64_c(<3 x i64> %a, i64 %b, i32 %c) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    stp q0, q2, [sp, #-32]!
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-SD-NEXT:    mov x8, sp
@@ -1396,7 +1396,7 @@ define double @extract_v3f64_c(<3 x double> %a, i32 %c) {
 ; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    and x8, x0, #0x3
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    stp q0, q2, [sp, #-32]!
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-SD-NEXT:    mov x9, sp
@@ -2494,7 +2494,7 @@ define i64 @extract_v3i64_c(<3 x i64> %a, i32 %c) {
 ; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-SD-NEXT:    and x8, x0, #0x3
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    stp q0, q2, [sp, #-32]!
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-SD-NEXT:    mov x9, sp
diff --git a/llvm/test/CodeGen/AArch64/insertshuffleload.ll b/llvm/test/CodeGen/AArch64/insertshuffleload.ll
index b97b9b1dcdcf8..489383c1923ac 100644
--- a/llvm/test/CodeGen/AArch64/insertshuffleload.ll
+++ b/llvm/test/CodeGen/AArch64/insertshuffleload.ll
@@ -292,7 +292,7 @@ define <8 x i8> @wrong_shuffle(ptr %p) {
 ; CHECK-NEXT:    ldur d0, [x0, #1]
 ; CHECK-NEXT:    adrp x8, .LCPI19_0
 ; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI19_0]
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    ld1 { v0.b }[0], [x0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index 71b53c662bb22..336559594188c 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -1214,7 +1214,7 @@ define <2 x double> @stofp_v2i128_v2f64(<2 x i128> %a) {
 ; CHECK-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
@@ -1272,7 +1272,7 @@ define <2 x double> @utofp_v2i128_v2f64(<2 x i128> %a) {
 ; CHECK-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    add sp, sp, #48
 ; CHECK-SD-NEXT:    ret
 ;
@@ -1486,7 +1486,7 @@ define <3 x double> @stofp_v3i64_v3f64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    scvtf v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    scvtf v0.2d, v0.2d
@@ -1518,7 +1518,7 @@ define <3 x double> @utofp_v3i64_v3f64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ucvtf v2.2d, v2.2d
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-SD-NEXT:    ucvtf v0.2d, v0.2d
@@ -5447,7 +5447,7 @@ define <3 x float> @stofp_v3i8_v3f32(<3 x i8> %a) {
 ; CHECK-GI-NEXT:    mov v1.h[1], v0.h[3]
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    sshll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    scvtf v0.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -5477,7 +5477,7 @@ define <3 x float> @utofp_v3i8_v3f32(<3 x i8> %a) {
 ; CHECK-GI-NEXT:    mov v1.h[1], v0.h[3]
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ucvtf v0.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -6460,7 +6460,7 @@ define <3 x half> @stofp_v3i64_v3f16(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    scvtf v1.2d, v2.2d
 ; CHECK-SD-NEXT:    scvtf v0.2d, v0.2d
 ; CHECK-SD-NEXT:    fcvtn v0.2s, v0.2d
@@ -6508,7 +6508,7 @@ define <3 x half> @utofp_v3i64_v3f16(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ucvtf v1.2d, v2.2d
 ; CHECK-SD-NEXT:    ucvtf v0.2d, v0.2d
 ; CHECK-SD-NEXT:    fcvtn v0.2s, v0.2d
@@ -8222,7 +8222,7 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
 ; CHECK-GI-NOFP16-NEXT:    mov v1.h[1], v0.h[3]
 ; CHECK-GI-NOFP16-NEXT:    sshll v0.4s, v0.4h, #0
 ; CHECK-GI-NOFP16-NEXT:    sshll v1.4s, v1.4h, #0
-; CHECK-GI-NOFP16-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NOFP16-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NOFP16-NEXT:    scvtf v0.4s, v0.4s
 ; CHECK-GI-NOFP16-NEXT:    fcvtn v0.4h, v0.4s
 ; CHECK-GI-NOFP16-NEXT:    ret
@@ -8272,7 +8272,7 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
 ; CHECK-GI-NOFP16-NEXT:    mov v1.h[1], v0.h[3]
 ; CHECK-GI-NOFP16-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-GI-NOFP16-NEXT:    ushll v1.4s, v1.4h, #0
-; CHECK-GI-NOFP16-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-GI-NOFP16-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NOFP16-NEXT:    ucvtf v0.4s, v0.4s
 ; CHECK-GI-NOFP16-NEXT:    fcvtn v0.4h, v0.4s
 ; CHECK-GI-NOFP16-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index fb65a748c865f..040f89f3c4b32 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1241,7 +1241,7 @@ define <8 x i8> @vselect_equivalent_shuffle_v8i8(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    adrp x8, .LCPI89_0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI89_0]
 ; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ret
@@ -1295,7 +1295,7 @@ define <8 x i8> @vselect_equivalent_shuffle_v8i8_zeroswap(<8 x i8> %a) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    adrp x8, .LCPI91_0
-; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI91_0]
 ; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 2c2cb72112879..f7ac5bb15bca9 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -2540,7 +2540,7 @@ define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #1 // =0x1
 ; CHECK-GI-NEXT:    dup v0.2s, w8
-; CHECK-GI-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #31
 ; CHECK-GI-NEXT:    sshr v0.4s, v0.4s, #31
 ; CHECK-GI-NEXT:    ret
@@ -2587,7 +2587,7 @@ define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) {
 ; CHECK-GI-NEXT:    mov w8, #0 // =0x0
 ; CHECK-GI-NEXT:    mov v0.s[0], w8
 ; CHECK-GI-NEXT:    mov v0.s[1], w8
-; CHECK-GI-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-GI-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #31
 ; CHECK-GI-NEXT:    sshr v0.4s, v0.4s, #31
 ; CHECK-GI-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 2d3fda704908e..8dd6a6cd5ef25 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -64,9 +64,9 @@ define i32 @test_udot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
 ; CHECK-GI-NEXT:    ushll v2.4s, v2.4h, #0
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT:    mul v0.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-GI-NEXT:    mul v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    addv s0, v0.4s
 ; CHECK-GI-NEXT:    fmov w8, s0
 ; CHECK-GI-NEXT:    add w0, w8, w2
@@ -164,9 +164,9 @@ define i32 @test_sdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
 ; CHECK-GI-NEXT:    sshll v2.4s, v2.4h, #0
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    sshll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT:    mul v0.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-GI-NEXT:    mul v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    addv s0, v0.4s
 ; CHECK-GI-NEXT:    fmov w8, s0
 ; CHECK-GI-NEXT:    add w0, w8, w2
@@ -324,9 +324,9 @@ define i32 @test_usdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b
 ; CHECK-GI-NEXT:    sshll v2.4s, v2.4h, #0
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-GI-NEXT:    sshll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT:    mul v0.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-GI-NEXT:    mul v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    addv s0, v0.4s
 ; CHECK-GI-NEXT:    fmov w8, s0
 ; CHECK-GI-NEXT:    add w0, w8, w2
@@ -1541,12 +1541,11 @@ define i32 @test_udot_v24i8_nomla(ptr nocapture readonly %a1) {
 ; CHECK-GI-LABEL: test_udot_v24i8_nomla:
 ; CHECK-GI:       // %bb.0: // %entry
 ; CHECK-GI-NEXT:    movi v0.8b, #1
-; CHECK-GI-NEXT:    movi v1.8b, #1
-; CHECK-GI-NEXT:    ldr q4, [x0]
 ; CHECK-GI-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-GI-NEXT:    ldr q4, [x0]
 ; CHECK-GI-NEXT:    movi v3.2d, #0000000000000000
 ; CHECK-GI-NEXT:    ldr d5, [x0, #16]
-; CHECK-GI-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT:    zip1 v1.2d, v0.2d, v0.2d
 ; CHECK-GI-NEXT:    udot v2.4s, v5.16b, v0.16b
 ; CHECK-GI-NEXT:    udot v3.4s, v4.16b, v1.16b
 ; CHECK-GI-NEXT:    add v0.4s, v3.4s, v2.4s
@@ -2162,19 +2161,17 @@ define i32 @test_sdot_v24i8_double_nomla(<24 x i8> %a, <24 x i8> %b, <24 x i8> %
 ; CHECK-GI-NEXT:    ldr w9, [sp, #464]
 ; CHECK-GI-NEXT:    ldr w12, [sp, #400]
 ; CHECK-GI-NEXT:    mov v0.b[1], w1
-; CHECK-GI-NEXT:    movi v5.8b, #1
-; CHECK-GI-NEXT:    movi v6.8b, #1
+; CHECK-GI-NEXT:    movi v5.2d, #0000000000000000
+; CHECK-GI-NEXT:    movi v6.2d, #0000000000000000
 ; CHECK-GI-NEXT:    fmov s2, w9
 ; CHECK-GI-NEXT:    ldr w9, [sp, #96]
 ; CHECK-GI-NEXT:    movi v7.2d, #0000000000000000
 ; CHECK-GI-NEXT:    mov v1.b[1], w8
 ; CHECK-GI-NEXT:    ldr w8, [sp, #352]
-; CHECK-GI-NEXT:    movi v16.2d, #0000000000000000
-; CHECK-GI-NEXT:    movi v17.2d, #0000000000000000
+; CHECK-GI-NEXT:    zip1 v16.2d, v4.2d, v4.2d
+; CHECK-GI-NEXT:    zip1 v17.2d, v4.2d, v4.2d
 ; CHECK-GI-NEXT:    movi v18.2d, #0000000000000000
 ; CHECK-GI-NEXT:    mov v0.b[2], w2
-; CHECK-GI-NEXT:    mov v5.d[1], v4.d[0]
-; CHECK-GI-NEXT:    mov v6.d[1], v4.d[0]
 ; CHECK-GI-NEXT:    mov v1.b[2], w8
 ; CHECK-GI-NEXT:    ldr w8, [sp, #360]
 ; CHECK-GI-NEXT:    mov v0.b[3], w3
@@ -2251,12 +2248,12 @@ define i32 @test_sdot_v24i8_double_nomla(<24 x i8> %a, <24 x i8> %b, <24 x i8> %
 ; CHECK-GI-NEXT:    mov v1.b[15], w9
 ; CHECK-GI-NEXT:    fmov d2, d2
 ; CHECK-GI-NEXT:    fmov d3, d3
-; CHECK-GI-NEXT:    sdot v16.4s, v0.16b, v5.16b
-; CHECK-GI-NEXT:    sdot v18.4s, v1.16b, v6.16b
-; CHECK-GI-NEXT:    sdot v7.4s, v2.16b, v4.16b
-; CHECK-GI-NEXT:    sdot v17.4s, v3.16b, v4.16b
-; CHECK-GI-NEXT:    add v1.4s, v18.4s, v7.4s
-; CHECK-GI-NEXT:    add v0.4s, v16.4s, v17.4s
+; CHECK-GI-NEXT:    sdot v6.4s, v0.16b, v16.16b
+; CHECK-GI-NEXT:    sdot v18.4s, v1.16b, v17.16b
+; CHECK-GI-NEXT:    sdot v5.4s, v2.16b, v4.16b
+; CHECK-GI-NEXT:    sdot v7.4s, v3.16b, v4.16b
+; CHECK-GI-NEXT:    add v1.4s, v18.4s, v5.4s
+; CHECK-GI-NEXT:    add v0.4s, v6.4s, v7.4s
 ; CHECK-GI-NEXT:    addv s1, v1.4s
 ; CHECK-GI-NEXT:    addv s0, v0.4s
 ; CHECK-GI-NEXT:    fmov w9, s1
diff --git a/llvm/test/CodeGen/AArch64/neon-extracttruncate.ll b/llvm/test/CodeGen/AArch64/neon-extracttruncate.ll
index 3f590226c4715..8dea5a7d43fd5 100644
--- a/llvm/test/CodeGen/AArch64/neon-extracttruncate.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extracttruncate.ll
@@ -87,8 +87,8 @@ define <16 x i8> @extract_4_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v2.2d, v2.2d, v3.2d
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
@@ -209,8 +209,8 @@ define <16 x i8> @extract_4_mixed(<4 x i16> %a, <4 x i32> %b, <4 x i32> %c, <4 x
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
 ; CHECK-NEXT:    xtn2 v0.8h, v1.4s
-; CHECK-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %a0 = extractelement <4 x i16> %a, i32 0
diff --git a/llvm/test/CodeGen/AArch64/neon-wide-splat.ll b/llvm/test/CodeGen/AArch64/neon-wide-splat.ll
index 8f05fd6cb76bd..f5cb7e32e5229 100644
--- a/llvm/test/CodeGen/AArch64/neon-wide-splat.ll
+++ b/llvm/test/CodeGen/AArch64/neon-wide-splat.ll
@@ -133,7 +133,7 @@ define <8 x i8> @shuffle_not4(<8 x i8> %v) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    adrp x8, .LCPI11_0
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI11_0]
 ; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/pr135821.ll b/llvm/test/CodeGen/AArch64/pr135821.ll
index cfd6cd086e130..aa8c2f1aed0bf 100644
--- a/llvm/test/CodeGen/AArch64/pr135821.ll
+++ b/llvm/test/CodeGen/AArch64/pr135821.ll
@@ -8,10 +8,10 @@ define <4 x float> @f(ptr  %0) {
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    ldr q1, [x0, #56]!
-; CHECK-NEXT:    ldr d0, [x0, #16]
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [x0, #56]!
+; CHECK-NEXT:    ldr d1, [x0, #16]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl use
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/ptradd.ll b/llvm/test/CodeGen/AArch64/ptradd.ll
index 427542377bfda..63d52ddbd90c4 100644
--- a/llvm/test/CodeGen/AArch64/ptradd.ll
+++ b/llvm/test/CodeGen/AArch64/ptradd.ll
@@ -78,7 +78,7 @@ define <3 x ptr> @vector_gep_v3i32(<3 x ptr> %b, <3 x i32> %off) {
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    ext v4.16b, v3.16b, v3.16b, #8
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    saddw v2.2d, v2.2d, v4.2s
 ; CHECK-SD-NEXT:    saddw v0.2d, v0.2d, v3.2s
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 killed $q2
@@ -255,7 +255,7 @@ define <3 x ptr> @vector_gep_v3i64_base(ptr %b, <3 x i64> %off) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    fmov d3, x0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    dup v1.2d, x0
 ; CHECK-SD-NEXT:    add d2, d3, d2
 ; CHECK-SD-NEXT:    add v0.2d, v1.2d, v0.2d
diff --git a/llvm/test/CodeGen/AArch64/pull-negations-after-concat-of-truncates.ll b/llvm/test/CodeGen/AArch64/pull-negations-after-concat-of-truncates.ll
index 70c3e8a9aa0bc..d62df5f8f6c00 100644
--- a/llvm/test/CodeGen/AArch64/pull-negations-after-concat-of-truncates.ll
+++ b/llvm/test/CodeGen/AArch64/pull-negations-after-concat-of-truncates.ll
@@ -49,9 +49,9 @@ define <8 x i16> @not_not_trunc_concat_multiple_uses(<4 x i32> %x, <4 x i32> %y)
 ; CHECK-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-NEXT:    mvn v1.8b, v1.8b
 ; CHECK-NEXT:    add v2.4h, v0.4h, v1.4h
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    mov v2.d[1], v2.d[0]
-; CHECK-NEXT:    add v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v2.2d
+; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %notx = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
   %trnx = trunc <4 x i32> %notx to <4 x i16>
diff --git a/llvm/test/CodeGen/AArch64/shuffle-mask-legal.ll b/llvm/test/CodeGen/AArch64/shuffle-mask-legal.ll
index 7aa7013da4699..15cf84fe93e00 100644
--- a/llvm/test/CodeGen/AArch64/shuffle-mask-legal.ll
+++ b/llvm/test/CodeGen/AArch64/shuffle-mask-legal.ll
@@ -7,7 +7,7 @@ define <4 x i32> @PR41535(<2 x i32> %p1, <2 x i32> %p2) {
 ; CHECK-LABEL: PR41535:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #4
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:    ret
   %cat1 = shufflevector <2 x i32> %p1, <2 x i32> undef, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef>
   %cat2 = shufflevector <2 x i32> %p2, <2 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
index fb571eff39fe5..552b79cd40b15 100644
--- a/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
+++ b/llvm/test/CodeGen/AArch64/shuffle-tbl34.ll
@@ -102,20 +102,20 @@ define <16 x i8> @shuffle4_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8>
 ; CHECK-LABEL: shuffle4_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    adrp x8, .LCPI2_0
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    mov v2.d[1], v3.d[0]
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI2_0]
 ; CHECK-NEXT:    adrp x8, .LCPI2_1
 ; CHECK-NEXT:    ldr d3, [x8, :lo12:.LCPI2_1]
 ; CHECK-NEXT:    adrp x8, .LCPI2_2
-; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
-; CHECK-NEXT:    tbl v1.8b, { v2.16b }, v3.8b
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_2]
-; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-NEXT:    tbl v4.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI2_2]
+; CHECK-NEXT:    tbl v5.8b, { v1.16b }, v3.8b
+; CHECK-NEXT:    tbl v0.16b, { v4.16b, v5.16b }, v0.16b
 ; CHECK-NEXT:    ret
   %x = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 3, i32 2, i32 1, i32 12, i32 15, i32 14, i32 12>
   %y = shufflevector <8 x i8> %c, <8 x i8> %d, <8 x i32> <i32 4, i32 7, i32 6, i32 7, i32 8, i32 10, i32 9, i32 11>
@@ -213,15 +213,15 @@ define <16 x i8> @shuffle4_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x
 define <8 x i16> @shuffle4_v8i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
 ; CHECK-LABEL: shuffle4_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov d5, d2
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    adrp x8, .LCPI4_0
-; CHECK-NEXT:    fmov d4, d0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI4_0]
-; CHECK-NEXT:    mov v4.d[1], v1.d[0]
-; CHECK-NEXT:    mov v5.d[1], v3.d[0]
-; CHECK-NEXT:    tbl v0.16b, { v4.16b, v5.16b }, v0.16b
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v2.16b
 ; CHECK-NEXT:    ret
   %x = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %y = shufflevector <4 x i16> %c, <4 x i16> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -277,15 +277,15 @@ define <16 x i8> @shuffle4_v8i8_v16i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v2.2d
 ; CHECK-NEXT:    adrp x8, .LCPI6_0
-; CHECK-NEXT:    mov v2.d[1], v2.d[0]
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI6_0]
 ; CHECK-NEXT:    adrp x8, .LCPI6_1
-; CHECK-NEXT:    tbl v3.8b, { v2.16b }, v1.8b
-; CHECK-NEXT:    tbl v2.8b, { v0.16b }, v1.8b
+; CHECK-NEXT:    tbl v4.8b, { v1.16b }, v2.8b
+; CHECK-NEXT:    tbl v3.8b, { v0.16b }, v2.8b
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI6_1]
-; CHECK-NEXT:    tbl v0.16b, { v2.16b, v3.16b }, v0.16b
+; CHECK-NEXT:    tbl v0.16b, { v3.16b, v4.16b }, v0.16b
 ; CHECK-NEXT:    ret
   %x = shufflevector <8 x i8> %a, <8 x i8> %b, <4 x i32> <i32 0, i32 7, i32 5, i32 1>
   %y = shufflevector <8 x i8> %c, <8 x i8> %d, <4 x i32> <i32 0, i32 7, i32 5, i32 1>
@@ -314,17 +314,17 @@ define <16 x i8> @shuffle4_v8i8_v16i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8
 define <8 x i8> @shuffle4_v8i8_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
 ; CHECK-LABEL: shuffle4_v8i8_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v2.2d
 ; CHECK-NEXT:    adrp x8, .LCPI7_0
-; CHECK-NEXT:    mov v2.d[1], v2.d[0]
-; CHECK-NEXT:    mov v0.d[1], v0.d[0]
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v0.2d
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI7_0]
 ; CHECK-NEXT:    adrp x8, .LCPI7_1
-; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v1.8b
-; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI7_1]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
 ; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    ret
   %x = shufflevector <8 x i8> %a, <8 x i8> %b, <4 x i32> <i32 0, i32 7, i32 5, i32 1>
@@ -353,17 +353,17 @@ define <8 x i8> @shuffle4_v8i8_v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x
 define <8 x i16> @shuffle4_v4i8_zext(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
 ; CHECK-LABEL: shuffle4_v4i8_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov d5, d2
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d3 killed $d3 def $q3
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    adrp x8, .LCPI8_0
-; CHECK-NEXT:    fmov d4, d0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI8_0]
-; CHECK-NEXT:    mov v4.d[1], v1.d[0]
-; CHECK-NEXT:    mov v5.d[1], v3.d[0]
-; CHECK-NEXT:    bic v4.8h, #255, lsl #8
-; CHECK-NEXT:    bic v5.8h, #255, lsl #8
-; CHECK-NEXT:    tbl v0.16b, { v4.16b, v5.16b }, v0.16b
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    zip1 v1.2d, v2.2d, v3.2d
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    bic v1.8h, #255, lsl #8
+; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v2.16b
 ; CHECK-NEXT:    ret
   %x = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %y = shufflevector <4 x i8> %c, <4 x i8> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -503,13 +503,13 @@ define <12 x i8> @shuffle3_v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c) {
 define <8 x i16> @shuffle3_v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) {
 ; CHECK-LABEL: shuffle3_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov d3, d2
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d2 killed $d2 killed $q1_q2 def $q1_q2
+; CHECK-NEXT:    fmov d3, d1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    adrp x8, .LCPI12_0
-; CHECK-NEXT:    fmov d2, d0
+; CHECK-NEXT:    zip1 v1.2d, v0.2d, v3.2d
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI12_0]
-; CHECK-NEXT:    mov v2.d[1], v1.d[0]
-; CHECK-NEXT:    tbl v0.16b, { v2.16b, v3.16b }, v0.16b
+; CHECK-NEXT:    tbl v0.16b, { v1.16b, v2.16b }, v0.16b
 ; CHECK-NEXT:    ret
   %x = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %y = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -567,7 +567,7 @@ define <8 x i8> @insert4_v8i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8>
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
 ; CHECK-NEXT:    adrp x8, .LCPI14_0
 ; CHECK-NEXT:    adrp x9, .LCPI14_1
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    mov v3.16b, v1.16b
 ; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI14_0]
 ; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI14_1]
@@ -634,15 +634,15 @@ define <16 x i8> @insert4_v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v4.16b, v3.16b
 ; CHECK-NEXT:    adrp x8, .LCPI15_0
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q31_q0
 ; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    zip1 v7.2d, v0.2d, v2.2d
 ; CHECK-NEXT:    mov v3.16b, v1.16b
 ; CHECK-NEXT:    ldr q5, [x8, :lo12:.LCPI15_0]
-; CHECK-NEXT:    mov v0.d[1], v2.d[0]
 ; CHECK-NEXT:    adrp x8, .LCPI15_1
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI15_1]
-; CHECK-NEXT:    tbl v31.16b, { v3.16b, v4.16b }, v5.16b
-; CHECK-NEXT:    tbl v0.16b, { v31.16b, v0.16b }, v1.16b
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI15_1]
+; CHECK-NEXT:    tbl v6.16b, { v3.16b, v4.16b }, v5.16b
+; CHECK-NEXT:    tbl v0.16b, { v6.16b, v7.16b }, v0.16b
 ; CHECK-NEXT:    ret
   %e1 = extractelement <8 x i8> %a, i32 4
   %e2 = extractelement <8 x i8> %c, i32 0
diff --git a/llvm/test/CodeGen/AArch64/shuffles.ll b/llvm/test/CodeGen/AArch64/shuffles.ll
index 41dd7f06712d2..1d780d077dd4e 100644
--- a/llvm/test/CodeGen/AArch64/shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/shuffles.ll
@@ -341,7 +341,7 @@ define <8 x i8> @test_shuf8(<8 x i8> %a, <8 x i8> %b)
 ; CHECKLE-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECKLE-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECKLE-NEXT:    adrp x8, .LCPI12_0
-; CHECKLE-NEXT:    mov v0.d[1], v1.d[0]
+; CHECKLE-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECKLE-NEXT:    ldr d1, [x8, :lo12:.LCPI12_0]
 ; CHECKLE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECKLE-NEXT:    ret
@@ -352,7 +352,7 @@ define <8 x i8> @test_shuf8(<8 x i8> %a, <8 x i8> %b)
 ; CHECKBE-NEXT:    rev64 v1.8b, v1.8b
 ; CHECKBE-NEXT:    adrp x8, .LCPI12_0
 ; CHECKBE-NEXT:    add x8, x8, :lo12:.LCPI12_0
-; CHECKBE-NEXT:    mov v0.d[1], v1.d[0]
+; CHECKBE-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECKBE-NEXT:    ld1 { v1.8b }, [x8]
 ; CHECKBE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECKBE-NEXT:    rev64 v0.8b, v0.8b
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
index e5c07e0c81e35..4359af0030656 100644
--- a/llvm/test/CodeGen/AArch64/shufflevector.ll
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -10,7 +10,7 @@ define <8 x i8> @shufflevector_v8i8(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    adrp x8, .LCPI0_0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
 ; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ret
@@ -599,7 +599,7 @@ define <7 x i8> @shufflevector_v7i8(<7 x i8> %a, <7 x i8> %b) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    adrp x8, .LCPI36_0
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI36_0]
 ; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll b/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
index 6a95859c7692d..d8fe6f498d93d 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
@@ -112,7 +112,7 @@ entry:
 ; CHECK-NEXT:  and     x1, x1, x16
 ; CHECK-NEXT:  csdb
 ; CHECK-NEXT:  ldr     d0, [x1]
-; CHECK-NEXT:  mov     v0.d[1], v0.d[0]
+; CHECK-NEXT:  zip1    v0.2d, v0.2d, v0.2d
 ; CHECK-NEXT:  str     q0, [x0]
 ; CHECK-NEXT:  mov     [[TMPREG:x[0-9]+]], sp
 ; CHECK-NEXT:  and     [[TMPREG]], [[TMPREG]], x16
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
index ee82ad8467322..72ace99e99f43 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll
@@ -25,7 +25,7 @@ define <16 x i8> @concat_v16i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <8 x i8> %op1, <8 x i8> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                  i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -186,7 +186,7 @@ define <8 x i16> @concat_v8i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <4 x i16> %op1, <4 x i16> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i16> %res
@@ -316,7 +316,7 @@ define <4 x i32> @concat_v4i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <2 x i32> %op1, <2 x i32> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %res
@@ -421,7 +421,7 @@ define <2 x i64> @concat_v2i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <1 x i64> %op1, <1 x i64> %op2, <2 x i32> <i32 0, i32 1>
   ret <2 x i64> %res
@@ -529,7 +529,7 @@ define <8 x half> @concat_v8f16(<4 x half> %op1, <4 x half> %op2) vscale_range(2
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <4 x half> %op1, <4 x half> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x half> %res
@@ -659,7 +659,7 @@ define <4 x float> @concat_v4f32(<2 x float> %op1, <2 x float> %op2) vscale_rang
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <2 x float> %op1, <2 x float> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x float> %res
@@ -764,7 +764,7 @@ define <2 x double> @concat_v2f64(<1 x double> %op1, <1 x double> %op2) vscale_r
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
   %res = shufflevector <1 x double> %op1, <1 x double> %op2, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %res
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
index b60988be1e76c..b95a167b676b3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
@@ -471,8 +471,8 @@ define void @fcvt_v8f64_v8f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    str q1, [x1]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    str q0, [x1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fcvt_v8f64_v8f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
index af54b146c5b66..32f84753eac67 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
@@ -658,10 +658,9 @@ define <8 x i16> @fcvtzu_v8f64_v8i16(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    fcvtzu z1.d, p0/m, z1.d
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
-; VBITS_GE_256-NEXT:    uzp1 z2.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    uzp1 z0.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v0.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fcvtzu_v8f64_v8i16:
@@ -1551,10 +1550,9 @@ define <8 x i16> @fcvtzs_v8f64_v8i16(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
-; VBITS_GE_256-NEXT:    uzp1 z2.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    uzp1 z0.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v0.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fcvtzs_v8f64_v8i16:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp128.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp128.ll
index 61e04682fa0bf..e3aba68b95d8f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp128.ll
@@ -75,9 +75,9 @@ define void @fcvt_v4f128_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __trunctfdf2
 ; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    add x8, sp, #128
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    str z0, [x8, #1, mul vl] // 16-byte Folded Spill
 ; CHECK-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __trunctfdf2
@@ -86,10 +86,10 @@ define void @fcvt_v4f128_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __trunctfdf2
 ; CHECK-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    add x8, sp, #128
 ; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ldr z1, [x8, #1, mul vl] // 16-byte Folded Reload
 ; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    str z0, [x8, #1, mul vl] // 16-byte Folded Spill
@@ -100,9 +100,9 @@ define void @fcvt_v4f128_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __trunctfdf2
 ; CHECK-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    add x8, sp, #128
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    str z0, [x8] // 16-byte Folded Spill
 ; CHECK-NEXT:    ldr q0, [sp, #96] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __trunctfdf2
@@ -111,10 +111,10 @@ define void @fcvt_v4f128_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ldr q0, [sp, #112] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __trunctfdf2
 ; CHECK-NEXT:    ldr q1, [sp, #96] // 16-byte Folded Reload
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    add x8, sp, #128
 ; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ldr z1, [x8] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov x8, #4 // =0x4
 ; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
index 1d9e01f4ecfdf..6cc0874dfef4d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
@@ -661,10 +661,9 @@ define <8 x half> @ucvtf_v8i64_v8f16(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    ucvtf z1.h, p0/m, z1.d
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
-; VBITS_GE_256-NEXT:    uzp1 z2.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    uzp1 z0.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v0.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ucvtf_v8i64_v8f16:
@@ -1592,10 +1591,9 @@ define <8 x half> @scvtf_v8i64_v8f16(ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    scvtf z1.h, p0/m, z1.d
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
-; VBITS_GE_256-NEXT:    uzp1 z2.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    uzp1 z0.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v0.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: scvtf_v8i64_v8f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
index d2206a74533d8..78ac07388499e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
@@ -152,8 +152,8 @@ define void @masked_gather_v8i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    str q1, [x0]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    str q0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_gather_v8i16:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index 093e6cd9328c8..199710cf4510a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -232,8 +232,8 @@ define void @masked_gather_v8i16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    str q1, [x0]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    str q0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_gather_v8i16:
@@ -640,8 +640,8 @@ define void @masked_gather_v8f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    str q1, [x0]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    str q0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_gather_v8f16:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
index 34dc0bb5ef2d2..bc57180b949f5 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
@@ -821,8 +821,8 @@ define void @masked_load_sext_v16i8i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; VBITS_GE_256-NEXT:    uzp1 z1.b, z1.b, z1.b
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    cmpne p1.b, p1/z, z1.b, #0
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    cmpne p1.b, p1/z, z0.b, #0
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p1/z, [x0]
 ; VBITS_GE_256-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; VBITS_GE_256-NEXT:    sunpklo z0.h, z0.b
@@ -911,8 +911,8 @@ define void @masked_load_sext_v16i16i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; VBITS_GE_256-NEXT:    uzp1 z1.b, z1.b, z1.b
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    sunpklo z0.h, z1.b
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    sunpklo z0.h, z0.b
 ; VBITS_GE_256-NEXT:    cmpne p1.h, p1/z, z0.h, #0
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p1/z, [x0]
 ; VBITS_GE_256-NEXT:    sunpklo z1.s, z0.h
@@ -1080,8 +1080,8 @@ define void @masked_load_zext_v16i8i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; VBITS_GE_256-NEXT:    uzp1 z1.b, z1.b, z1.b
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    cmpne p1.b, p1/z, z1.b, #0
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    cmpne p1.b, p1/z, z0.b, #0
 ; VBITS_GE_256-NEXT:    ld1b { z0.b }, p1/z, [x0]
 ; VBITS_GE_256-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; VBITS_GE_256-NEXT:    uunpklo z0.h, z0.b
@@ -1170,8 +1170,8 @@ define void @masked_load_zext_v16i16i32_m32(ptr %ap, ptr %bp, ptr %c) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; VBITS_GE_256-NEXT:    uzp1 z1.b, z1.b, z1.b
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    sunpklo z0.h, z1.b
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    sunpklo z0.h, z0.b
 ; VBITS_GE_256-NEXT:    cmpne p1.h, p1/z, z0.h, #0
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p1/z, [x0]
 ; VBITS_GE_256-NEXT:    uunpklo z1.s, z0.h
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
index b0d4f79aea110..7f931f6f2553c 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
@@ -203,13 +203,13 @@ define void @masked_store_trunc_v8i64i16(ptr %ap, ptr %bp, ptr %dest) #0 {
 ; VBITS_GE_256-NEXT:    mov z3.d, p0/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    ptrue p0.s, vl4
 ; VBITS_GE_256-NEXT:    uzp1 z2.s, z2.s, z2.s
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    uzp1 z3.s, z3.s, z3.s
 ; VBITS_GE_256-NEXT:    splice z3.s, p0, z3.s, z2.s
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl8
 ; VBITS_GE_256-NEXT:    uzp1 z2.h, z3.h, z3.h
 ; VBITS_GE_256-NEXT:    cmpne p0.h, p0/z, z2.h, #0
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x2]
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_store_trunc_v8i64i16:
@@ -288,13 +288,13 @@ define void @masked_store_trunc_v16i32i8(ptr %ap, ptr %bp, ptr %dest) #0 {
 ; VBITS_GE_256-NEXT:    mov z3.s, p0/z, #-1 // =0xffffffffffffffff
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl16
 ; VBITS_GE_256-NEXT:    uzp1 z2.h, z2.h, z2.h
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    uzp1 z3.h, z3.h, z3.h
 ; VBITS_GE_256-NEXT:    uzp1 z2.b, z2.b, z2.b
 ; VBITS_GE_256-NEXT:    uzp1 z3.b, z3.b, z3.b
-; VBITS_GE_256-NEXT:    mov v3.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    cmpne p0.b, p0/z, z3.b, #0
-; VBITS_GE_256-NEXT:    st1b { z1.b }, p0, [x2]
+; VBITS_GE_256-NEXT:    zip1 v2.2d, v3.2d, v2.2d
+; VBITS_GE_256-NEXT:    cmpne p0.b, p0/z, z2.b, #0
+; VBITS_GE_256-NEXT:    st1b { z0.b }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_store_trunc_v16i32i8:
@@ -335,8 +335,8 @@ define void @masked_store_trunc_v16i32i16(ptr %ap, ptr %bp, ptr %dest) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z3.h, z3.h, z3.h
 ; VBITS_GE_256-NEXT:    uzp1 z2.b, z2.b, z2.b
 ; VBITS_GE_256-NEXT:    uzp1 z3.b, z3.b, z3.b
-; VBITS_GE_256-NEXT:    mov v3.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    sunpklo z2.h, z3.b
+; VBITS_GE_256-NEXT:    zip1 v2.2d, v3.2d, v2.2d
+; VBITS_GE_256-NEXT:    sunpklo z2.h, z2.b
 ; VBITS_GE_256-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x2]
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
index 23ae5f00b5a45..09f7a8d72f803 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll
@@ -16,8 +16,8 @@ define i1 @ptest_v16i1_256bit_min_sve(ptr %a, ptr %b) vscale_range(2, 0) {
 ; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
-; CHECK-NEXT:    mov v1.d[1], v0.d[0]
-; CHECK-NEXT:    umaxv b0, v1.16b
+; CHECK-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    umaxv b0, v0.16b
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc-stores.ll
index 020d5cb53bf21..23829b8b57c87 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc-stores.ll
@@ -96,8 +96,8 @@ define void @store_trunc_v8i64i16(ptr %ap, ptr %dest) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    str q1, [x1]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    str q0, [x1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: store_trunc_v8i64i16:
@@ -151,8 +151,8 @@ define void @store_trunc_v16i32i8(ptr %ap, ptr %dest) #0 {
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; VBITS_GE_256-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; VBITS_GE_256-NEXT:    uzp1 z1.b, z1.b, z1.b
-; VBITS_GE_256-NEXT:    mov v1.d[1], v0.d[0]
-; VBITS_GE_256-NEXT:    str q1, [x1]
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
+; VBITS_GE_256-NEXT:    str q0, [x1]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: store_trunc_v16i32i8:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll
index 8dc45eadce6f3..2231cf5f84c47 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll
@@ -118,10 +118,9 @@ define <16 x i8> @trunc_v16i32_v16i8(ptr %in) #0 {
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    uzp1 z2.b, z0.b, z0.b
-; VBITS_GE_256-NEXT:    uzp1 z0.b, z1.b, z1.b
-; VBITS_GE_256-NEXT:    mov v0.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_256-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_256-NEXT:    uzp1 z1.b, z1.b, z1.b
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: trunc_v16i32_v16i8:
@@ -372,10 +371,9 @@ define <8 x i16> @trunc_v8i64_v8i16(ptr %in) #0 {
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_GE_256-NEXT:    uzp1 z1.s, z1.s, z1.s
-; VBITS_GE_256-NEXT:    uzp1 z2.h, z0.h, z0.h
-; VBITS_GE_256-NEXT:    uzp1 z0.h, z1.h, z1.h
-; VBITS_GE_256-NEXT:    mov v0.d[1], v2.d[0]
-; VBITS_GE_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_256-NEXT:    zip1 v0.2d, v1.2d, v0.2d
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: trunc_v8i64_v8i16:
diff --git a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
index fd23f3da18cd7..65f517782d8b6 100644
--- a/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/trunc-to-tbl.ll
@@ -344,7 +344,7 @@ define void @trunc_v16i64_to_v16i8_in_loop(ptr %A, ptr %dst) {
 ; CHECK-NEXT:    ldp q18, q19, [x9, #96]
 ; CHECK-NEXT:    tbl.16b v1, { v1, v2, v3, v4 }, v0
 ; CHECK-NEXT:    tbl.16b v2, { v16, v17, v18, v19 }, v0
-; CHECK-NEXT:    mov.d v1[1], v2[0]
+; CHECK-NEXT:    zip1.2d v1, v1, v2
 ; CHECK-NEXT:    str q1, [x1, x8, lsl #4]
 ; CHECK-NEXT:    add x8, x8, #1
 ; CHECK-NEXT:    cmp x8, #1000
@@ -382,7 +382,7 @@ define void @trunc_v16i64_to_v16i8_in_loop(ptr %A, ptr %dst) {
 ; CHECK-BE-NEXT:    cmp x8, #1000
 ; CHECK-BE-NEXT:    tbl v1.16b, { v1.16b, v2.16b, v3.16b, v4.16b }, v0.16b
 ; CHECK-BE-NEXT:    tbl v2.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v0.16b
-; CHECK-BE-NEXT:    mov v1.d[1], v2.d[0]
+; CHECK-BE-NEXT:    zip1 v1.2d, v1.2d, v2.2d
 ; CHECK-BE-NEXT:    st1 { v1.16b }, [x9]
 ; CHECK-BE-NEXT:    b.eq .LBB3_1
 ; CHECK-BE-NEXT:  // %bb.2: // %exit
diff --git a/llvm/test/CodeGen/AArch64/trunc.ll b/llvm/test/CodeGen/AArch64/trunc.ll
index d510dc83cfd86..892865d689991 100644
--- a/llvm/test/CodeGen/AArch64/trunc.ll
+++ b/llvm/test/CodeGen/AArch64/trunc.ll
@@ -226,7 +226,7 @@ define <3 x i8> @trunc_v3i8_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    xtn v1.2s, v2.2d
 ; CHECK-SD-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-SD-NEXT:    fmov w2, s1
@@ -264,7 +264,7 @@ define <3 x i16> @trunc_v3i16_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v2.4s
 ; CHECK-SD-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-SD-NEXT:    ret
@@ -290,7 +290,7 @@ define <3 x i32> @trunc_v3i32_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v2.4s
 ; CHECK-SD-NEXT:    ret
 ;
@@ -349,7 +349,7 @@ define <3 x i10> @trunc_v3i10_v3i64(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    xtn v1.2s, v2.2d
 ; CHECK-SD-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-SD-NEXT:    fmov w2, s1
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index fd24282366282..25563fa61da1a 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -1734,7 +1734,7 @@ define i64 @add_pair_v2i32_v2i64_zext(<2 x i32> %x, <2 x i32> %y) {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uaddlv d0, v0.4s
 ; CHECK-SD-NEXT:    fmov x0, d0
 ; CHECK-SD-NEXT:    ret
@@ -1842,7 +1842,7 @@ define i32 @add_pair_v4i16_v4i32_zext(<4 x i16> %x, <4 x i16> %y) {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uaddlv s0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -2693,7 +2693,7 @@ define i64 @add_pair_v2i16_v2i64_zext(<2 x i16> %x, <2 x i16> %y) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    movi v2.2d, #0x00ffff0000ffff
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    and v0.16b, v0.16b, v2.16b
 ; CHECK-SD-NEXT:    uaddlv d0, v0.4s
 ; CHECK-SD-NEXT:    fmov x0, d0
@@ -2990,7 +2990,7 @@ define i32 @add_pair_v4i8_v4i32_zext(<4 x i8> %x, <4 x i8> %y) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
 ; CHECK-SD-NEXT:    bic v1.4h, #255, lsl #8
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uaddlv s0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -3110,7 +3110,7 @@ define zeroext i16 @add_pair_v8i8_v8i16_zext(<8 x i8> %x, <8 x i8> %y) {
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uaddlv h0, v0.16b
 ; CHECK-SD-NEXT:    umov w0, v0.h[0]
 ; CHECK-SD-NEXT:    ret
@@ -3447,7 +3447,7 @@ define i64 @add_pair_v2i8_v2i64_zext(<2 x i8> %x, <2 x i8> %y) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    movi v2.2d, #0x0000ff000000ff
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    and v0.16b, v0.16b, v2.16b
 ; CHECK-SD-NEXT:    uaddlv d0, v0.4s
 ; CHECK-SD-NEXT:    fmov x0, d0
@@ -4091,14 +4091,13 @@ define i32 @add_v24i8_v24i32_zext(<24 x i8> %x) {
 ; CHECK-GI-DOT-NEXT:    ldr w8, [sp]
 ; CHECK-GI-DOT-NEXT:    ldr w10, [sp, #72]
 ; CHECK-GI-DOT-NEXT:    movi v2.8b, #1
-; CHECK-GI-DOT-NEXT:    movi v3.8b, #1
+; CHECK-GI-DOT-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-GI-DOT-NEXT:    fmov s1, w9
 ; CHECK-GI-DOT-NEXT:    ldr w9, [sp, #80]
-; CHECK-GI-DOT-NEXT:    movi v4.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT:    mov v0.b[1], w1
 ; CHECK-GI-DOT-NEXT:    movi v5.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT:    mov v0.b[1], w1
 ; CHECK-GI-DOT-NEXT:    mov v1.b[1], w10
-; CHECK-GI-DOT-NEXT:    mov v3.d[1], v2.d[0]
+; CHECK-GI-DOT-NEXT:    zip1 v3.2d, v2.2d, v2.2d
 ; CHECK-GI-DOT-NEXT:    mov v0.b[2], w2
 ; CHECK-GI-DOT-NEXT:    mov v1.b[2], w9
 ; CHECK-GI-DOT-NEXT:    ldr w9, [sp, #88]
@@ -4363,14 +4362,13 @@ define i32 @add_v24i8_v24i32_sext(<24 x i8> %x) {
 ; CHECK-GI-DOT-NEXT:    ldr w8, [sp]
 ; CHECK-GI-DOT-NEXT:    ldr w10, [sp, #72]
 ; CHECK-GI-DOT-NEXT:    movi v2.8b, #1
-; CHECK-GI-DOT-NEXT:    movi v3.8b, #1
+; CHECK-GI-DOT-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-GI-DOT-NEXT:    fmov s1, w9
 ; CHECK-GI-DOT-NEXT:    ldr w9, [sp, #80]
-; CHECK-GI-DOT-NEXT:    movi v4.2d, #0000000000000000
-; CHECK-GI-DOT-NEXT:    mov v0.b[1], w1
 ; CHECK-GI-DOT-NEXT:    movi v5.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT:    mov v0.b[1], w1
 ; CHECK-GI-DOT-NEXT:    mov v1.b[1], w10
-; CHECK-GI-DOT-NEXT:    mov v3.d[1], v2.d[0]
+; CHECK-GI-DOT-NEXT:    zip1 v3.2d, v2.2d, v2.2d
 ; CHECK-GI-DOT-NEXT:    mov v0.b[2], w2
 ; CHECK-GI-DOT-NEXT:    mov v1.b[2], w9
 ; CHECK-GI-DOT-NEXT:    ldr w9, [sp, #88]
@@ -4798,7 +4796,7 @@ entry:
 define i32 @extract_hi_hi(<8 x i16> %a) {
 ; CHECK-SD-LABEL: extract_hi_hi:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov v0.d[0], v0.d[1]
+; CHECK-SD-NEXT:    zip2 v0.2d, v0.2d, v0.2d
 ; CHECK-SD-NEXT:    uaddlv s0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -4820,7 +4818,7 @@ entry:
 define i32 @extract_lo_lo(<8 x i16> %a) {
 ; CHECK-SD-LABEL: extract_lo_lo:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v0.2d
 ; CHECK-SD-NEXT:    uaddlv s0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/xtn.ll b/llvm/test/CodeGen/AArch64/xtn.ll
index 8a4d6b8c7b789..96fc6e729cd5f 100644
--- a/llvm/test/CodeGen/AArch64/xtn.ll
+++ b/llvm/test/CodeGen/AArch64/xtn.ll
@@ -270,7 +270,7 @@ define <3 x i8> @xtn_v3i64_v3i8(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    xtn v1.2s, v2.2d
 ; CHECK-SD-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-SD-NEXT:    fmov w2, s1
@@ -308,7 +308,7 @@ define <3 x i16> @xtn_v3i64_v3i16(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v2.4s
 ; CHECK-SD-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-SD-NEXT:    ret
@@ -334,7 +334,7 @@ define <3 x i32> @xtn_v3i64_v3i32(<3 x i64> %a) {
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-SD-NEXT:    // kill: def $d1 killed $d1 def $q1
 ; CHECK-SD-NEXT:    // kill: def $d2 killed $d2 def $q2
-; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT:    zip1 v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    uzp1 v0.4s, v0.4s, v2.4s
 ; CHECK-SD-NEXT:    ret
 ;



More information about the llvm-commits mailing list