[llvm] 6e1ebb9 - Fix up tests committed in 57a6bb34729df30df18a133ee2083b3323a936dc

David Sherwood via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 3 03:18:42 PST 2023


Author: David Sherwood
Date: 2023-02-03T11:01:00Z
New Revision: 6e1ebb916e467f26d3c0cb0819770cd67f956cc3

URL: https://github.com/llvm/llvm-project/commit/6e1ebb916e467f26d3c0cb0819770cd67f956cc3
DIFF: https://github.com/llvm/llvm-project/commit/6e1ebb916e467f26d3c0cb0819770cd67f956cc3.diff

LOG: Fix up tests committed in 57a6bb34729df30df18a133ee2083b3323a936dc

I committed an older version of the patch without the test updates.
This patch uses the latest versions on
https://reviews.llvm.org/D142904

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx2.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx4.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-zipx2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx2.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx2.ll
index 89ad057d6fb7..86027c7fecd8 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx2.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx2.ll
@@ -3,31 +3,40 @@
 
 ; == 8 to 64-bit elements ==
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @uzp_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
+define <vscale x 16 x i8> @uzp_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.b, z1.b }, z0.b, z1.b
+; CHECK-NEXT:    uzp { z2.b, z3.b }, z0.b, z1.b
+; CHECK-NEXT:    add z0.b, z2.b, z0.b
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
-  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+  %uzp = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  %uzp0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %uzp, 0
+  %add = add <vscale x 16 x i8> %uzp0, %zn
+  ret <vscale x 16 x i8> %add
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16> } @uzp_x2_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) nounwind {
+define <vscale x 8 x i16> @uzp_x2_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
+; CHECK-NEXT:    uzp { z2.h, z3.h }, z0.h, z1.h
+; CHECK-NEXT:    add z0.h, z2.h, z0.h
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzp.x2.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
-  ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+  %uzp = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzp.x2.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  %uzp0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %uzp, 0
+  %add = add <vscale x 8 x i16> %uzp0, %zn
+  ret <vscale x 8 x i16> %add
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half> } @uzp_x2_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) nounwind {
+define <vscale x 8 x half> @uzp_x2_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
+; CHECK-NEXT:    uzp { z2.h, z3.h }, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z2.h, z0.h
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzp.x2.nxv8f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
-  ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+  %uzp = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzp.x2.nxv8f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
+  %uzp0 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } %uzp, 0
+  %add = fadd <vscale x 8 x half> %uzp0, %zn
+  ret <vscale x 8 x half> %add
 }
 
 define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @uzp_x2_bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) nounwind {
@@ -35,56 +44,74 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @uzp_x2_bf16(<vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzp.x2.nxv8bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
-  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+  %uzp = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzp.x2.nxv8bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %uzp
 }
 
-define { <vscale x 4 x i32>, <vscale x 4 x i32> } @uzp_x2_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) nounwind {
+define <vscale x 4 x i32> @uzp_x2_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; CHECK-NEXT:    uzp { z2.s, z3.s }, z0.s, z1.s
+; CHECK-NEXT:    add z0.s, z2.s, z0.s
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzp.x2.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm)
-  ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+  %uzp = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzp.x2.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm)
+  %uzp0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %uzp, 0
+  %add = add <vscale x 4 x i32> %uzp0, %zn
+  ret <vscale x 4 x i32> %add
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float> } @uzp_x2_f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm) nounwind {
+define <vscale x 4 x float> @uzp_x2_f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; CHECK-NEXT:    uzp { z2.s, z3.s }, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z2.s, z0.s
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzp.x2.nxv4f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm)
-  ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+  %uzp = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzp.x2.nxv4f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm)
+  %uzp0 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } %uzp, 0
+  %add = fadd <vscale x 4 x float> %uzp0, %zn
+  ret <vscale x 4 x float> %add
 }
 
-define { <vscale x 2 x i64>, <vscale x 2 x i64> } @uzp_x2_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm) nounwind {
+define <vscale x 2 x i64> @uzp_x2_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; CHECK-NEXT:    uzp { z2.d, z3.d }, z0.d, z1.d
+; CHECK-NEXT:    add z0.d, z2.d, z0.d
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzp.x2.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm)
-  ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+  %uzp = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzp.x2.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm)
+  %uzp0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %uzp, 0
+  %add = add <vscale x 2 x i64> %uzp0, %zn
+  ret <vscale x 2 x i64> %add
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double> } @uzp_x2_f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm) nounwind {
+define <vscale x 2 x double> @uzp_x2_f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm) nounwind {
 ; CHECK-LABEL: uzp_x2_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; CHECK-NEXT:    uzp { z2.d, z3.d }, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z2.d, z0.d
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzp.x2.nxv2f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm)
-  ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+  %uzp = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzp.x2.nxv2f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm)
+  %uzp0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %uzp, 0
+  %add = fadd <vscale x 2 x double> %uzp0, %zn
+  ret <vscale x 2 x double> %add
 }
 
 
 ; == 128-bit elements ==
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @uzpq_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
+; NOTE: For the 128-bit case we only need to check the <vscale x 16 x i8> to
+; ensure the tuple result starts at the correct register multiple. The other
+; variants all test the same code path.
+define <vscale x 16 x i8> @uzpq_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
 ; CHECK-LABEL: uzpq_x2_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp { z0.q, z1.q }, z0.q, z1.q
+; CHECK-NEXT:    uzp { z2.q, z3.q }, z0.q, z1.q
+; CHECK-NEXT:    add z0.b, z2.b, z0.b
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzpq.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
-  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+  %uzp = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzpq.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  %uzp0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %uzp, 0
+  %add = add <vscale x 16 x i8> %uzp0, %zn
+  ret <vscale x 16 x i8> %add
 }
 
 define { <vscale x 8 x i16>, <vscale x 8 x i16> } @uzpq_x2_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) nounwind {
@@ -150,9 +177,18 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @uzpq_x2_f64(<vscale x 2
   ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @uzpq_x2_i8_not_tied(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
+; CHECK-LABEL: uzpq_x2_i8_not_tied:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp { z0.q, z1.q }, z1.q, z2.q
+; CHECK-NEXT:    ret
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzpq.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
 
 ; == 8 to 64-bit elements ==
-declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
 declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzp.x2.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
 declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzp.x2.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm)
 declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzp.x2.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm)

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx4.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx4.ll
index 1b1f7d3d0162..fe3ddbf747ac 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx4.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx4.ll
@@ -3,105 +3,105 @@
 
 ; == 8 to 64-bit elements ==
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @uzp_x4_i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) nounwind {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @uzp_x4_i8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.b - z3.b }, { z0.b - z3.b }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.b - z3.b }, { z4.b - z7.b }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x4.nxv16i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @uzp_x4_i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) nounwind {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @uzp_x4_i16(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.h - z3.h }, { z0.h - z3.h }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.h - z3.h }, { z4.h - z7.h }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzp.x4.nxv8i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @uzp_x4_f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) nounwind {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @uzp_x4_f16(<vscale x 8 x half> %unused, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.h - z3.h }, { z0.h - z3.h }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.h - z3.h }, { z4.h - z7.h }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzp.x4.nxv8f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @uzp_x4_bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) nounwind {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @uzp_x4_bf16(<vscale x 8 x bfloat> %unused, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.h - z3.h }, { z0.h - z3.h }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.h - z3.h }, { z4.h - z7.h }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzp.x4.nxv8bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @uzp_x4_i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) nounwind {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @uzp_x4_i32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.s - z3.s }, { z0.s - z3.s }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.s - z3.s }, { z4.s - z7.s }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzp.x4.nxv4i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @uzp_x4_f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4) nounwind {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @uzp_x4_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.s - z3.s }, { z0.s - z3.s }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.s - z3.s }, { z4.s - z7.s }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzp.x4.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4)
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @uzp_x4_i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) nounwind {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @uzp_x4_i64(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.d - z3.d }, { z4.d - z7.d }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzp.x4.nxv2i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @uzp_x4_f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4) nounwind {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @uzp_x4_f64(<vscale x 4 x double> %unused, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4) nounwind {
 ; CHECK-LABEL: uzp_x4_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
+; CHECK-NEXT:    mov z27.d, z5.d
+; CHECK-NEXT:    mov z26.d, z4.d
+; CHECK-NEXT:    mov z25.d, z3.d
+; CHECK-NEXT:    mov z24.d, z2.d
+; CHECK-NEXT:    uzp { z0.d - z3.d }, { z24.d - z27.d }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzp.x4.nxv2f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4)
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
@@ -110,105 +110,105 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <v
 
 ; == 128-bit elements ==
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @zipq_x4_i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) nounwind {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @zipq_x4_i8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzpq.x4.nxv16i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @zipq_x4_i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) nounwind {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @zipq_x4_i16(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzpq.x4.nxv8i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @zipq_x4_f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) nounwind {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @zipq_x4_f16(<vscale x 8 x half> %unused, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzpq.x4.nxv8f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @zipq_x4_bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) nounwind {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @zipq_x4_bf16(<vscale x 8 x bfloat> %unused, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzpq.x4.nxv8bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @zipq_x4_i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) nounwind {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @zipq_x4_i32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzpq.x4.nxv4i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @zipq_x4_f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4) nounwind {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @zipq_x4_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzpq.x4.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4)
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @zipq_x4_i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) nounwind {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @zipq_x4_i64(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z7.d, z4.d
+; CHECK-NEXT:    mov z6.d, z3.d
+; CHECK-NEXT:    mov z5.d, z2.d
+; CHECK-NEXT:    mov z4.d, z1.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzpq.x4.nxv2i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @zipq_x4_f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4) nounwind {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @zipq_x4_f64(<vscale x 4 x double> %unused, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4) nounwind {
 ; CHECK-LABEL: zipq_x4_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; CHECK-NEXT:    uzp { z0.q - z3.q }, { z0.q - z3.q }
+; CHECK-NEXT:    mov z27.d, z5.d
+; CHECK-NEXT:    mov z26.d, z4.d
+; CHECK-NEXT:    mov z25.d, z3.d
+; CHECK-NEXT:    mov z24.d, z2.d
+; CHECK-NEXT:    uzp { z0.q - z3.q }, { z24.q - z27.q }
 ; CHECK-NEXT:    ret
   %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzpq.x4.nxv2f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4)
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-zipx2.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-zipx2.ll
index 2de3c7ed2cc3..c7a3c8cd0159 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-zipx2.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-zipx2.ll
@@ -3,31 +3,40 @@
 
 ; == 8 to 64-bit elements ==
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @zip_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
+define <vscale x 16 x i8> @zip_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.b, z1.b }, z0.b, z1.b
+; CHECK-NEXT:    zip { z2.b, z3.b }, z0.b, z1.b
+; CHECK-NEXT:    add z0.b, z2.b, z0.b
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.zip.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
-  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+  %zip = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.zip.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  %zip0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %zip, 0
+  %add = add <vscale x 16 x i8> %zip0, %zn
+  ret <vscale x 16 x i8> %add
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16> } @zip_x2_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) nounwind {
+define <vscale x 8 x i16> @zip_x2_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
+; CHECK-NEXT:    zip { z2.h, z3.h }, z0.h, z1.h
+; CHECK-NEXT:    add z0.h, z2.h, z0.h
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.zip.x2.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
-  ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+  %zip = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.zip.x2.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm)
+  %zip0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %zip, 0
+  %add = add <vscale x 8 x i16> %zip0, %zn
+  ret <vscale x 8 x i16> %add
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half> } @zip_x2_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) nounwind {
+define <vscale x 8 x half> @zip_x2_f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
+; CHECK-NEXT:    zip { z2.h, z3.h }, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z2.h, z0.h
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.zip.x2.nxv8f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
-  ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+  %zip = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.zip.x2.nxv8f16(<vscale x 8 x half> %zn, <vscale x 8 x half> %zm)
+  %zip0 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } %zip, 0
+  %add = fadd <vscale x 8 x half> %zip0, %zn
+  ret <vscale x 8 x half> %add
 }
 
 define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @zip_x2_bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm) nounwind {
@@ -35,65 +44,74 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @zip_x2_bf16(<vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.zip.x2.nxv8bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
-  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+  %zip = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.zip.x2.nxv8bf16(<vscale x 8 x bfloat> %zn, <vscale x 8 x bfloat> %zm)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %zip
 }
 
-define { <vscale x 4 x i32>, <vscale x 4 x i32> } @zip_x2_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) nounwind {
+define <vscale x 4 x i32> @zip_x2_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; CHECK-NEXT:    zip { z2.s, z3.s }, z0.s, z1.s
+; CHECK-NEXT:    add z0.s, z2.s, z0.s
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.zip.x2.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm)
-  ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+  %zip = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.zip.x2.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i32> %zm)
+  %zip0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %zip, 0
+  %add = add <vscale x 4 x i32> %zip0, %zn
+  ret <vscale x 4 x i32> %add
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float> } @zip_x2_f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm) nounwind {
+define <vscale x 4 x float> @zip_x2_f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; CHECK-NEXT:    zip { z2.s, z3.s }, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z2.s, z0.s
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.zip.x2.nxv4f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm)
-  ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+  %zip = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.zip.x2.nxv4f32(<vscale x 4 x float> %zn, <vscale x 4 x float> %zm)
+  %zip0 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } %zip, 0
+  %add = fadd <vscale x 4 x float> %zip0, %zn
+  ret <vscale x 4 x float> %add
 }
 
-define { <vscale x 2 x i64>, <vscale x 2 x i64> } @zip_x2_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm) nounwind {
+define <vscale x 2 x i64> @zip_x2_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; CHECK-NEXT:    zip { z2.d, z3.d }, z0.d, z1.d
+; CHECK-NEXT:    add z0.d, z2.d, z0.d
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.zip.x2.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm)
-  ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+  %zip = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.zip.x2.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i64> %zm)
+  %zip0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %zip, 0
+  %add = add <vscale x 2 x i64> %zip0, %zn
+  ret <vscale x 2 x i64> %add
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double> } @zip_x2_f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm) nounwind {
+define <vscale x 2 x double> @zip_x2_f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm) nounwind {
 ; CHECK-LABEL: zip_x2_f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; CHECK-NEXT:    zip { z2.d, z3.d }, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z2.d, z0.d
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.zip.x2.nxv2f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm)
-  ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
-}
-
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @zip_x2_i8_not_tied(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
-; CHECK-LABEL: zip_x2_i8_not_tied:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.b, z1.b }, z1.b, z2.b
-; CHECK-NEXT:    ret
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.zip.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
-  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+  %zip = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.zip.x2.nxv2f64(<vscale x 2 x double> %zn, <vscale x 2 x double> %zm)
+  %zip0 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %zip, 0
+  %add = fadd <vscale x 2 x double> %zip0, %zn
+  ret <vscale x 2 x double> %add
 }
 
 
 ; == 128-bit elements ==
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @zipq_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
+; NOTE: For the 128-bit case we only need to check the <vscale x 16 x i8> to
+; ensure the tuple result starts at the correct register multiple. The other
+; variants all test the same code path.
+define <vscale x 16 x i8> @zipq_x2_i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) nounwind {
 ; CHECK-LABEL: zipq_x2_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip { z0.q, z1.q }, z0.q, z1.q
+; CHECK-NEXT:    zip { z2.q, z3.q }, z0.q, z1.q
+; CHECK-NEXT:    add z0.b, z2.b, z0.b
 ; CHECK-NEXT:    ret
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.zipq.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
-  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+  %zip = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.zipq.x2.nxv16i8(<vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm)
+  %zip0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %zip, 0
+  %add = add <vscale x 16 x i8> %zip0, %zn
+  ret <vscale x 16 x i8> %add
 }
 
 define { <vscale x 8 x i16>, <vscale x 8 x i16> } @zipq_x2_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i16> %zm) nounwind {


        


More information about the llvm-commits mailing list