[llvm] [AArch64] Use `ZIP1/2` over `INS` for vector concat (PR #142427)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 2 09:41:03 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-globalisel
Author: Csanád Hajdú (Il-Capitano)
<details>
<summary>Changes</summary>
Some 64-bit `INS` instructions are equivalent to `ZIP1` or `ZIP2` instructions, e.g. `mov v0.d[1], v1.d[0]` is the same as `zip1 v0.2d, v0.2d, v1.2d`. Compared to `INS`, `ZIP` has a separate output register from its first input, which can help eliminate SIMD register moves, leading to better code size and performance.
The motivation for this change is eliminating the SIMD register moves in this 4x4 f32 matrix transpose function: https://godbolt.org/z/f31Yz6q61
---
Patch is 170.91 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/142427.diff
85 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (+6-7)
- (modified) llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir (+5-5)
- (modified) llvm/test/CodeGen/AArch64/aarch64-addv.ll (+1-2)
- (modified) llvm/test/CodeGen/AArch64/aarch64-be-bv.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll (+3-3)
- (modified) llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/arm64-dup.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/arm64-fp128.ll (+15-15)
- (modified) llvm/test/CodeGen/AArch64/arm64-neon-copy.ll (+26-19)
- (modified) llvm/test/CodeGen/AArch64/arm64-nvcast.ll (+1-2)
- (modified) llvm/test/CodeGen/AArch64/arm64-tbl.ll (+1-2)
- (modified) llvm/test/CodeGen/AArch64/arm64-vcombine.ll (+5-2)
- (modified) llvm/test/CodeGen/AArch64/arm64-vshift.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/arm64-zip.ll (+4-8)
- (modified) llvm/test/CodeGen/AArch64/bf16-shuffle.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/bf16-vector-shuffle.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/bitcast-extend.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/build-vector-two-dup.ll (+21-21)
- (modified) llvm/test/CodeGen/AArch64/complex-deinterleaving-mixed-cases.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/complex-deinterleaving-uniform-cases.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/concat-vector-add-combine.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/concat-vector.ll (+22-38)
- (modified) llvm/test/CodeGen/AArch64/concatbinop.ll (+15-15)
- (modified) llvm/test/CodeGen/AArch64/ctlz.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/ctpop.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/cttz.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/extbinopload.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/extend_inreg_of_concat_subvectors.ll (+4-6)
- (modified) llvm/test/CodeGen/AArch64/extract-vector-elt.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/fabs.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/faddsub.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/fcmp.ll (+42-42)
- (modified) llvm/test/CodeGen/AArch64/fcopysign.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/fcvt.ll (+7-7)
- (modified) llvm/test/CodeGen/AArch64/fdiv.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/fminimummaximum.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/fminmax.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/fmla.ll (+24-24)
- (modified) llvm/test/CodeGen/AArch64/fmul.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/fneg.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/fp-intrinsics-vector.ll (+3-3)
- (modified) llvm/test/CodeGen/AArch64/fp-maximumnum-minimumnum.ll (+14-14)
- (modified) llvm/test/CodeGen/AArch64/fp16-vector-shuffle.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll (+36-36)
- (modified) llvm/test/CodeGen/AArch64/fptoi.ll (+80-46)
- (modified) llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/fptrunc.ll (+6-6)
- (modified) llvm/test/CodeGen/AArch64/fsqrt.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/icmp.ll (+6-6)
- (modified) llvm/test/CodeGen/AArch64/insert-subvector.ll (+21-24)
- (modified) llvm/test/CodeGen/AArch64/insertextract.ll (+6-6)
- (modified) llvm/test/CodeGen/AArch64/insertshuffleload.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/itofp.ll (+10-10)
- (modified) llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/neon-compare-instructions.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/neon-dotreduce.ll (+21-24)
- (modified) llvm/test/CodeGen/AArch64/neon-extracttruncate.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/neon-wide-splat.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/pr135821.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/ptradd.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/pull-negations-after-concat-of-truncates.ll (+3-3)
- (modified) llvm/test/CodeGen/AArch64/shuffle-mask-legal.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/shuffle-tbl34.ll (+46-46)
- (modified) llvm/test/CodeGen/AArch64/shuffles.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/shufflevector.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll (+1-1)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-concat.ll (+7-7)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll (+6-8)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-fp128.ll (+8-8)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll (+6-8)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll (+8-8)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll (+8-8)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-ptest.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-trunc-stores.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-trunc.ll (+6-8)
- (modified) llvm/test/CodeGen/AArch64/trunc-to-tbl.ll (+2-2)
- (modified) llvm/test/CodeGen/AArch64/trunc.ll (+4-4)
- (modified) llvm/test/CodeGen/AArch64/vecreduce-add.ll (+14-16)
- (modified) llvm/test/CodeGen/AArch64/xtn.ll (+3-3)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 96d0146c1e752..a68d831d8fe5a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -7590,13 +7590,13 @@ def : Pat<(vector_extract (v8bf16 V128:$Rn), VectorIndexH:$idx),
(bf16 (DUPi16 V128:$Rn, VectorIndexH:$idx))>;
// All concat_vectors operations are canonicalised to act on i64 vectors for
-// AArch64. In the general case we need an instruction, which had just as well be
-// INS.
+// AArch64. In the general case we can use ZIP1 to concatenate the low halves of
+// two SIMD registers.
multiclass ConcatPat<ValueType DstTy, ValueType SrcTy,
ComplexPattern ExtractHigh> {
def : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
- (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
- (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
+ (ZIP1v2i64 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
+ (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub))>;
// If the high lanes are zero we can instead emit a d->d register mov, which
// will implicitly clear the upper bits.
@@ -7607,11 +7607,10 @@ multiclass ConcatPat<ValueType DstTy, ValueType SrcTy,
def : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
(INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
- // Concatting the high half of two vectors is the insert of the first
- // into the low half of the second.
+ // Concatting the high half of two vectors is equivalent to ZIP2v2i64.
def : Pat<(DstTy (concat_vectors (ExtractHigh (DstTy V128:$Rn)),
(ExtractHigh (DstTy V128:$Rm)))),
- (INSvi64lane V128:$Rm, (i64 0), V128:$Rn, (i64 1))>;
+ (ZIP2v2i64 V128:$Rn, V128:$Rm)>;
}
defm : ConcatPat<v2i64, v1i64, extract_high_v2i64>;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
index 53d8b82509481..d2ed95ddbeac9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
@@ -25,8 +25,8 @@ body: |
; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
- ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
- ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+ ; CHECK-NEXT: [[ZIP1v2i64_:%[0-9]+]]:fpr128 = ZIP1v2i64 [[INSERT_SUBREG1]], [[INSERT_SUBREG]]
+ ; CHECK-NEXT: $q0 = COPY [[ZIP1v2i64_]]
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:fpr(<2 x s32>) = COPY $d0
%1:fpr(<2 x s32>) = COPY $d1
@@ -59,8 +59,8 @@ body: |
; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
- ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
- ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+ ; CHECK-NEXT: [[ZIP1v2i64_:%[0-9]+]]:fpr128 = ZIP1v2i64 [[INSERT_SUBREG1]], [[INSERT_SUBREG]]
+ ; CHECK-NEXT: $q0 = COPY [[ZIP1v2i64_]]
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:fpr(<4 x s16>) = COPY $d0
%1:fpr(<4 x s16>) = COPY $d1
@@ -110,7 +110,7 @@ body: |
; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %b, %subreg.dsub
; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], %a, %subreg.dsub
- ; CHECK-NEXT: %concat:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
+ ; CHECK-NEXT: %concat:fpr128 = ZIP1v2i64 [[INSERT_SUBREG1]], [[INSERT_SUBREG]]
; CHECK-NEXT: $q0 = COPY %concat
; CHECK-NEXT: RET_ReallyLR implicit $q0
%a:fpr(<8 x s8>) = COPY $d0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index bc675343adc08..4c056b0bdf608 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -447,7 +447,7 @@ define i64 @addv_v3i64(<3 x i64> %a) {
; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: mov v2.d[1], xzr
; CHECK-SD-NEXT: add v0.2d, v0.2d, v2.2d
; CHECK-SD-NEXT: addp d0, v0.2d
@@ -492,4 +492,3 @@ entry:
%arg1 = call i128 @llvm.vector.reduce.add.v2i128(<2 x i128> %a)
ret i128 %arg1
}
-
diff --git a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
index 4afe362686440..2f645bb15b862 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
@@ -1036,8 +1036,8 @@ define <2 x double> @test_v1f64(<1 x double> %0, ptr %1) {
; CHECK: // %bb.0:
; CHECK-NEXT: mvni v1.2s, #31, msl #16
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-NEXT: ext v0.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT: zip1 v0.2d, v1.2d, v0.2d
+; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ret
%vec = shufflevector <1 x double> <double 0xFFE00000FFE00000>, <1 x double> %0, <2 x i32> <i32 0, i32 1>
ret <2 x double> %vec
diff --git a/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll b/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll
index b1b995931ac02..291bdc52be91a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-combine-add-zext.ll
@@ -6,7 +6,7 @@ define i16 @test_add_zext_v8i16(<8 x i8> %a, <8 x i8> %b) local_unnamed_addr #0
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-NEXT: uaddlv h0, v0.16b
; CHECK-NEXT: umov w0, v0.h[0]
; CHECK-NEXT: ret
@@ -22,7 +22,7 @@ define i32 @test_add_zext_v4i32(<4 x i16> %a, <4 x i16> %b) local_unnamed_addr #
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-NEXT: uaddlv s0, v0.8h
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
@@ -38,7 +38,7 @@ define i64 @test_add_zext_v2i64(<2 x i32> %a, <2 x i32> %b) local_unnamed_addr #
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-NEXT: uaddlv d0, v0.4s
; CHECK-NEXT: fmov x0, d0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
index 8a503bb65c079..ddfbc23237441 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
@@ -714,7 +714,7 @@ define i64 @sminv_v3i64(<3 x i64> %a) {
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: mov x8, #9223372036854775807 // =0x7fffffffffffffff
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: mov v2.d[1], x8
; CHECK-SD-NEXT: cmgt v1.2d, v2.2d, v0.2d
; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
@@ -1074,7 +1074,7 @@ define i64 @smaxv_v3i64(<3 x i64> %a) {
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: mov v2.d[1], x8
; CHECK-SD-NEXT: cmgt v1.2d, v0.2d, v2.2d
; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
@@ -1432,7 +1432,7 @@ define i64 @uminv_v3i64(<3 x i64> %a) {
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: mov v2.d[1], x8
; CHECK-SD-NEXT: cmhi v1.2d, v2.2d, v0.2d
; CHECK-SD-NEXT: bif v0.16b, v2.16b, v1.16b
@@ -1788,7 +1788,7 @@ define i64 @umaxv_v3i64(<3 x i64> %a) {
; CHECK-SD-NEXT: mov v3.16b, v2.16b
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: mov v3.d[1], xzr
; CHECK-SD-NEXT: cmhi v3.2d, v0.2d, v3.2d
; CHECK-SD-NEXT: ext v4.16b, v3.16b, v3.16b, #8
diff --git a/llvm/test/CodeGen/AArch64/arm64-dup.ll b/llvm/test/CodeGen/AArch64/arm64-dup.ll
index 4c28ea7592202..0ea11c33b9c46 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dup.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dup.ll
@@ -602,7 +602,7 @@ define <4 x i32> @dup_const24(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C) nounwind
; CHECK-SD-NEXT: movk w8, #128, lsl #16
; CHECK-SD-NEXT: dup.4s v3, w8
; CHECK-SD-NEXT: add.2s v0, v0, v3
-; CHECK-SD-NEXT: mov.d v0[1], v1[0]
+; CHECK-SD-NEXT: zip1.2d v0, v0, v1
; CHECK-SD-NEXT: add.4s v1, v2, v3
; CHECK-SD-NEXT: eor.16b v0, v1, v0
; CHECK-SD-NEXT: ret
@@ -615,7 +615,7 @@ define <4 x i32> @dup_const24(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C) nounwind
; CHECK-GI-NEXT: adrp x8, .LCPI41_0
; CHECK-GI-NEXT: add.2s v0, v0, v3
; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI41_0]
-; CHECK-GI-NEXT: mov.d v0[1], v1[0]
+; CHECK-GI-NEXT: zip1.2d v0, v0, v1
; CHECK-GI-NEXT: add.4s v1, v2, v3
; CHECK-GI-NEXT: eor.16b v0, v1, v0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 7eb26096ed156..8193e49bb71d9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -635,17 +635,17 @@ define <2 x i64> @vec_fptosi_64(<2 x fp128> %val) {
; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-SD-NEXT: .cfi_def_cfa_offset 48
; CHECK-SD-NEXT: .cfi_offset w30, -16
-; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
; CHECK-SD-NEXT: mov v0.16b, v1.16b
; CHECK-SD-NEXT: bl __fixtfdi
; CHECK-SD-NEXT: fmov d0, x0
-; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-SD-NEXT: bl __fixtfdi
; CHECK-SD-NEXT: fmov d0, x0
-; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: add sp, sp, #48
; CHECK-SD-NEXT: ret
;
@@ -719,17 +719,17 @@ define <2 x i64> @vec_fptoui_64(<2 x fp128> %val) {
; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-SD-NEXT: .cfi_def_cfa_offset 48
; CHECK-SD-NEXT: .cfi_offset w30, -16
-; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
; CHECK-SD-NEXT: mov v0.16b, v1.16b
; CHECK-SD-NEXT: bl __fixunstfdi
; CHECK-SD-NEXT: fmov d0, x0
-; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-SD-NEXT: bl __fixunstfdi
; CHECK-SD-NEXT: fmov d0, x0
-; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: add sp, sp, #48
; CHECK-SD-NEXT: ret
;
@@ -1278,17 +1278,17 @@ define <2 x double> @vec_round_f64(<2 x fp128> %val) {
; CHECK-SD-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-SD-NEXT: .cfi_def_cfa_offset 48
; CHECK-SD-NEXT: .cfi_offset w30, -16
-; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
; CHECK-SD-NEXT: mov v0.16b, v1.16b
; CHECK-SD-NEXT: bl __trunctfdf2
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-SD-NEXT: bl __trunctfdf2
-; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: add sp, sp, #48
; CHECK-SD-NEXT: ret
;
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
index 51f1351a5edf4..2ac4df6e73679 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -1716,7 +1716,7 @@ entry:
define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 {
; CHECK-SD-LABEL: test_concat_v16i8_v16i8_v16i8:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v16i8_v16i8_v16i8:
@@ -1736,7 +1736,7 @@ define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
; CHECK-SD-LABEL: test_concat_v16i8_v8i8_v16i8:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v16i8_v8i8_v16i8:
@@ -1780,7 +1780,7 @@ define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
; CHECK-SD-LABEL: test_concat_v16i8_v16i8_v8i8:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v16i8_v16i8_v8i8:
@@ -1845,7 +1845,7 @@ define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v16i8_v8i8_v8i8:
@@ -1909,7 +1909,7 @@ entry:
define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 {
; CHECK-SD-LABEL: test_concat_v8i16_v8i16_v8i16:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v8i16_v8i16_v8i16:
@@ -1929,7 +1929,7 @@ define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 {
; CHECK-SD-LABEL: test_concat_v8i16_v4i16_v8i16:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v8i16_v4i16_v8i16:
@@ -1961,7 +1961,7 @@ define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 {
; CHECK-SD-LABEL: test_concat_v8i16_v8i16_v4i16:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v8i16_v8i16_v4i16:
@@ -2002,7 +2002,7 @@ define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 {
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v8i16_v4i16_v4i16:
@@ -2042,7 +2042,7 @@ entry:
define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 {
; CHECK-SD-LABEL: test_concat_v4i32_v4i32_v4i32:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v4i32_v4i32_v4i32:
@@ -2062,7 +2062,7 @@ define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 {
; CHECK-SD-LABEL: test_concat_v4i32_v2i32_v4i32:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v4i32_v2i32_v4i32:
@@ -2088,7 +2088,7 @@ define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 {
; CHECK-SD-LABEL: test_concat_v4i32_v4i32_v2i32:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v4i32_v4i32_v2i32:
@@ -2117,7 +2117,7 @@ define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-NEXT: ret
entry:
%vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -2151,7 +2151,7 @@ define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
; CHECK-SD-LABEL: test_concat_v2i64_v2i64_v1i64:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: test_concat_v2i64_v2i64_v1i64:
@@ -2169,12 +2169,19 @@ entry:
}
define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 {
-; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: mov v0.d[1], v1.d[0]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_concat_v2i64_v1i64_v1i64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-SD-NEXT: zip1 v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_concat_v2i64_v1i64_v1i64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: m...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/142427
More information about the llvm-commits
mailing list