[llvm] f7018ba - [AArch64] Add patterns for add(uzp1(x,y), uzp2(x, y)) -> addp.

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 7 08:10:02 PDT 2024


Author: David Green
Date: 2024-06-07T16:09:57+01:00
New Revision: f7018ba0eeaad8dc3e1917cfb986fc9689d72e85

URL: https://github.com/llvm/llvm-project/commit/f7018ba0eeaad8dc3e1917cfb986fc9689d72e85
DIFF: https://github.com/llvm/llvm-project/commit/f7018ba0eeaad8dc3e1917cfb986fc9689d72e85.diff

LOG: [AArch64] Add patterns for add(uzp1(x,y), uzp2(x, y)) -> addp.

If we are extracting the even lanes and the odd lanes and adding them, we can
use an addp instruction.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/test/CodeGen/AArch64/addp-shuffle.ll
    llvm/test/CodeGen/AArch64/arm64-uzp.ll
    llvm/test/CodeGen/AArch64/insert-extend.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 081d64921ee1c..2ed7850404ce0 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -9405,6 +9405,20 @@ def : Pat<(AArch64faddp (v4f16 (extract_subvector (v8f16 FPR128:$Rn), (i64 0))),
                         (v4f16 (extract_subvector (v8f16 FPR128:$Rn), (i64 4)))),
           (v4f16 (EXTRACT_SUBREG (FADDPv8f16 $Rn, $Rn), dsub))>;
 
+// add(uzp1(X, Y), uzp2(X, Y)) -> addp(X, Y)
+def : Pat<(v2i64 (add (AArch64zip1 (v2i64 FPR128:$Rn), (v2i64 FPR128:$Rm)),
+                      (AArch64zip2 (v2i64 FPR128:$Rn), (v2i64 FPR128:$Rm)))),
+          (v2i64 (ADDPv2i64 $Rn, $Rm))>;
+def : Pat<(v4i32 (add (AArch64uzp1 (v4i32 FPR128:$Rn), (v4i32 FPR128:$Rm)),
+                      (AArch64uzp2 (v4i32 FPR128:$Rn), (v4i32 FPR128:$Rm)))),
+          (v4i32 (ADDPv4i32 $Rn, $Rm))>;
+def : Pat<(v8i16 (add (AArch64uzp1 (v8i16 FPR128:$Rn), (v8i16 FPR128:$Rm)),
+                      (AArch64uzp2 (v8i16 FPR128:$Rn), (v8i16 FPR128:$Rm)))),
+          (v8i16 (ADDPv8i16 $Rn, $Rm))>;
+def : Pat<(v16i8 (add (AArch64uzp1 (v16i8 FPR128:$Rn), (v16i8 FPR128:$Rm)),
+                      (AArch64uzp2 (v16i8 FPR128:$Rn), (v16i8 FPR128:$Rm)))),
+          (v16i8 (ADDPv16i8 $Rn, $Rm))>;
+
 // Scalar 64-bit shifts in FPR64 registers.
 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
           (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;

diff  --git a/llvm/test/CodeGen/AArch64/addp-shuffle.ll b/llvm/test/CodeGen/AArch64/addp-shuffle.ll
index c15a84c7b3a2a..a187e7e94c20f 100644
--- a/llvm/test/CodeGen/AArch64/addp-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/addp-shuffle.ll
@@ -5,9 +5,7 @@
 define <4 x i32> @deinterleave_shuffle_v8i32(<8 x i32> %a) {
 ; CHECK-LABEL: deinterleave_shuffle_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 v2.4s, v0.4s, v1.4s
-; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    addp v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
   %r0 = shufflevector <8 x i32> %a, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %r1 = shufflevector <8 x i32> %a, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -18,9 +16,7 @@ define <4 x i32> @deinterleave_shuffle_v8i32(<8 x i32> %a) {
 define <4 x i32> @deinterleave_shuffle_v8i32_c(<8 x i32> %a) {
 ; CHECK-LABEL: deinterleave_shuffle_v8i32_c:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 v2.4s, v0.4s, v1.4s
-; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    addp v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
   %r0 = shufflevector <8 x i32> %a, <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %r1 = shufflevector <8 x i32> %a, <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -45,9 +41,7 @@ define <2 x i32> @deinterleave_shuffle_v4i32(<4 x i32> %a) {
 define <8 x i16> @deinterleave_shuffle_v16i16(<16 x i16> %a) {
 ; CHECK-LABEL: deinterleave_shuffle_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 v2.8h, v0.8h, v1.8h
-; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v1.8h
-; CHECK-NEXT:    add v0.8h, v2.8h, v0.8h
+; CHECK-NEXT:    addp v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
   %r0 = shufflevector <16 x i16> %a, <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %r1 = shufflevector <16 x i16> %a, <16 x i16> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -58,9 +52,7 @@ define <8 x i16> @deinterleave_shuffle_v16i16(<16 x i16> %a) {
 define <16 x i8> @deinterleave_shuffle_v32i8(<32 x i8> %a) {
 ; CHECK-LABEL: deinterleave_shuffle_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 v2.16b, v0.16b, v1.16b
-; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    add v0.16b, v2.16b, v0.16b
+; CHECK-NEXT:    addp v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %r0 = shufflevector <32 x i8> %a, <32 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %r1 = shufflevector <32 x i8> %a, <32 x i8> poison, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
@@ -71,12 +63,9 @@ define <16 x i8> @deinterleave_shuffle_v32i8(<32 x i8> %a) {
 define <4 x i64> @deinterleave_shuffle_v8i64(<8 x i64> %a) {
 ; CHECK-LABEL: deinterleave_shuffle_v8i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 v4.2d, v2.2d, v3.2d
-; CHECK-NEXT:    zip1 v5.2d, v0.2d, v1.2d
-; CHECK-NEXT:    zip2 v2.2d, v2.2d, v3.2d
-; CHECK-NEXT:    zip2 v0.2d, v0.2d, v1.2d
-; CHECK-NEXT:    add v1.2d, v4.2d, v2.2d
-; CHECK-NEXT:    add v0.2d, v5.2d, v0.2d
+; CHECK-NEXT:    addp v2.2d, v2.2d, v3.2d
+; CHECK-NEXT:    addp v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    mov v1.16b, v2.16b
 ; CHECK-NEXT:    ret
   %r0 = shufflevector <8 x i64> %a, <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %r1 = shufflevector <8 x i64> %a, <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -164,15 +153,9 @@ define <4 x i32> @udot(<4 x i32> %z, <16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    umull v3.4s, v3.4h, v4.4h
 ; CHECK-NEXT:    umull2 v4.4s, v1.8h, v2.8h
 ; CHECK-NEXT:    umull v1.4s, v1.4h, v2.4h
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v5.4s
-; CHECK-NEXT:    uzp2 v3.4s, v3.4s, v5.4s
-; CHECK-NEXT:    uzp1 v6.4s, v1.4s, v4.4s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    add v2.4s, v2.4s, v3.4s
-; CHECK-NEXT:    add v1.4s, v6.4s, v1.4s
-; CHECK-NEXT:    uzp1 v3.4s, v2.4s, v1.4s
-; CHECK-NEXT:    uzp2 v1.4s, v2.4s, v1.4s
-; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    addp v2.4s, v3.4s, v5.4s
+; CHECK-NEXT:    addp v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    addp v1.4s, v2.4s, v1.4s
 ; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
   %za = zext <16 x i8> %a to <16 x i32>
@@ -199,15 +182,9 @@ define <4 x i32> @sdot(<4 x i32> %z, <16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    smull v3.4s, v3.4h, v4.4h
 ; CHECK-NEXT:    smull2 v4.4s, v1.8h, v2.8h
 ; CHECK-NEXT:    smull v1.4s, v1.4h, v2.4h
-; CHECK-NEXT:    uzp1 v2.4s, v3.4s, v5.4s
-; CHECK-NEXT:    uzp2 v3.4s, v3.4s, v5.4s
-; CHECK-NEXT:    uzp1 v6.4s, v1.4s, v4.4s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    add v2.4s, v2.4s, v3.4s
-; CHECK-NEXT:    add v1.4s, v6.4s, v1.4s
-; CHECK-NEXT:    uzp1 v3.4s, v2.4s, v1.4s
-; CHECK-NEXT:    uzp2 v1.4s, v2.4s, v1.4s
-; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    addp v2.4s, v3.4s, v5.4s
+; CHECK-NEXT:    addp v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    addp v1.4s, v2.4s, v1.4s
 ; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
   %za = sext <16 x i8> %a to <16 x i32>

diff  --git a/llvm/test/CodeGen/AArch64/arm64-uzp.ll b/llvm/test/CodeGen/AArch64/arm64-uzp.ll
index bd6bf1bf15784..02068428c3a4f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-uzp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-uzp.ll
@@ -33,11 +33,11 @@ define <16 x i8> @vuzpQi8(<16 x i8> %A, <16 x i8> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.16b v2, v0, v1
 ; CHECK-NEXT:    uzp2.16b v0, v0, v1
-; CHECK-NEXT:    add.16b v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   %tmp4 = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
-  %tmp5 = add <16 x i8> %tmp3, %tmp4
+  %tmp5 = xor <16 x i8> %tmp3, %tmp4
   ret <16 x i8> %tmp5
 }
 
@@ -46,11 +46,11 @@ define <8 x i16> @vuzpQi16(<8 x i16> %A, <8 x i16> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.8h v2, v0, v1
 ; CHECK-NEXT:    uzp2.8h v0, v0, v1
-; CHECK-NEXT:    add.8h v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %tmp5 = add <8 x i16> %tmp3, %tmp4
+  %tmp5 = xor <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
@@ -59,11 +59,11 @@ define <4 x i32> @vuzpQi32(<4 x i32> %A, <4 x i32> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.4s v2, v0, v1
 ; CHECK-NEXT:    uzp2.4s v0, v0, v1
-; CHECK-NEXT:    add.4s v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %tmp4 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %tmp5 = add <4 x i32> %tmp3, %tmp4
+  %tmp5 = xor <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
@@ -72,11 +72,11 @@ define <4 x float> @vuzpQf(<4 x float> %A, <4 x float> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.4s v2, v0, v1
 ; CHECK-NEXT:    uzp2.4s v0, v0, v1
-; CHECK-NEXT:    fadd.4s v0, v2, v0
+; CHECK-NEXT:    fsub.4s v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %tmp4 = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %tmp5 = fadd <4 x float> %tmp3, %tmp4
+  %tmp5 = fsub <4 x float> %tmp3, %tmp4
   ret <4 x float> %tmp5
 }
 
@@ -100,11 +100,11 @@ define <8 x i16> @vuzpQi16_undef1(<8 x i16> %A, <8 x i16> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.8h v2, v0, v1
 ; CHECK-NEXT:    uzp2.8h v0, v0, v1
-; CHECK-NEXT:    add.8h v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
-  %tmp5 = add <8 x i16> %tmp3, %tmp4
+  %tmp5 = xor <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
@@ -113,11 +113,11 @@ define <8 x i16> @vuzpQi16_undef0(<8 x i16> %A, <8 x i16> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.8h v2, v0, v1
 ; CHECK-NEXT:    uzp2.8h v0, v0, v1
-; CHECK-NEXT:    add.8h v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 undef, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 undef, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %tmp5 = add <8 x i16> %tmp3, %tmp4
+  %tmp5 = xor <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
@@ -126,11 +126,11 @@ define <8 x i16> @vuzpQi16_undef01(<8 x i16> %A, <8 x i16> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.8h v2, v0, v1
 ; CHECK-NEXT:    uzp2.8h v0, v0, v1
-; CHECK-NEXT:    add.8h v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 undef, i32 undef, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 undef, i32 undef, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %tmp5 = add <8 x i16> %tmp3, %tmp4
+  %tmp5 = xor <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
@@ -139,10 +139,10 @@ define <8 x i16> @vuzpQi16_undef012(<8 x i16> %A, <8 x i16> %B) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1.8h v2, v0, v1
 ; CHECK-NEXT:    uzp2.8h v0, v0, v1
-; CHECK-NEXT:    add.8h v0, v2, v0
+; CHECK-NEXT:    eor.16b v0, v2, v0
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 6, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %tmp5 = add <8 x i16> %tmp3, %tmp4
+  %tmp5 = xor <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }

diff  --git a/llvm/test/CodeGen/AArch64/insert-extend.ll b/llvm/test/CodeGen/AArch64/insert-extend.ll
index 0b730f6e77156..851fb0d03e8aa 100644
--- a/llvm/test/CodeGen/AArch64/insert-extend.ll
+++ b/llvm/test/CodeGen/AArch64/insert-extend.ll
@@ -94,75 +94,73 @@ define i32 @large(ptr nocapture noundef readonly %p1, i32 noundef %st1, ptr noca
 ; CHECK-NEXT:    mov v3.d[1], v5.d[1]
 ; CHECK-NEXT:    uzp1 v1.4s, v4.4s, v0.4s
 ; CHECK-NEXT:    uzp2 v4.4s, v4.4s, v0.4s
-; CHECK-NEXT:    uzp2 v5.4s, v2.4s, v0.4s
-; CHECK-NEXT:    uzp1 v0.4s, v2.4s, v0.4s
-; CHECK-NEXT:    add v2.4s, v3.4s, v6.4s
+; CHECK-NEXT:    addp v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    add v5.4s, v3.4s, v6.4s
 ; CHECK-NEXT:    sub v3.4s, v6.4s, v3.4s
-; CHECK-NEXT:    sub v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    add v0.4s, v5.4s, v0.4s
-; CHECK-NEXT:    rev64 v4.4s, v2.4s
-; CHECK-NEXT:    rev64 v5.4s, v3.4s
-; CHECK-NEXT:    rev64 v6.4s, v1.4s
 ; CHECK-NEXT:    rev64 v7.4s, v0.4s
-; CHECK-NEXT:    addp v16.4s, v1.4s, v3.4s
-; CHECK-NEXT:    addp v17.4s, v0.4s, v2.4s
-; CHECK-NEXT:    sub v3.4s, v3.4s, v5.4s
-; CHECK-NEXT:    sub v2.4s, v2.4s, v4.4s
-; CHECK-NEXT:    sub v1.4s, v1.4s, v6.4s
+; CHECK-NEXT:    sub v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    rev64 v4.4s, v5.4s
+; CHECK-NEXT:    rev64 v6.4s, v3.4s
+; CHECK-NEXT:    addp v16.4s, v0.4s, v5.4s
+; CHECK-NEXT:    rev64 v2.4s, v1.4s
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v7.4s
-; CHECK-NEXT:    zip1 v18.4s, v17.4s, v17.4s
-; CHECK-NEXT:    ext v4.16b, v17.16b, v2.16b, #4
-; CHECK-NEXT:    ext v5.16b, v16.16b, v3.16b, #4
+; CHECK-NEXT:    zip1 v21.4s, v16.4s, v16.4s
+; CHECK-NEXT:    sub v4.4s, v5.4s, v4.4s
+; CHECK-NEXT:    addp v5.4s, v1.4s, v3.4s
+; CHECK-NEXT:    sub v3.4s, v3.4s, v6.4s
+; CHECK-NEXT:    sub v1.4s, v1.4s, v2.4s
+; CHECK-NEXT:    ext v7.16b, v0.16b, v16.16b, #4
+; CHECK-NEXT:    ext v2.16b, v16.16b, v4.16b, #4
+; CHECK-NEXT:    ext v6.16b, v5.16b, v3.16b, #4
+; CHECK-NEXT:    mov v19.16b, v4.16b
+; CHECK-NEXT:    ext v17.16b, v1.16b, v5.16b, #8
 ; CHECK-NEXT:    mov v20.16b, v3.16b
-; CHECK-NEXT:    ext v6.16b, v1.16b, v16.16b, #8
-; CHECK-NEXT:    ext v7.16b, v0.16b, v17.16b, #4
-; CHECK-NEXT:    mov v21.16b, v2.16b
-; CHECK-NEXT:    trn2 v0.4s, v18.4s, v0.4s
-; CHECK-NEXT:    mov v20.s[2], v16.s[3]
-; CHECK-NEXT:    zip2 v4.4s, v4.4s, v17.4s
-; CHECK-NEXT:    zip2 v5.4s, v5.4s, v16.4s
-; CHECK-NEXT:    mov v21.s[2], v17.s[3]
-; CHECK-NEXT:    ext v19.16b, v6.16b, v1.16b, #4
+; CHECK-NEXT:    trn2 v0.4s, v21.4s, v0.4s
 ; CHECK-NEXT:    ext v7.16b, v7.16b, v7.16b, #4
-; CHECK-NEXT:    mov v1.s[2], v16.s[1]
-; CHECK-NEXT:    ext v2.16b, v2.16b, v4.16b, #12
-; CHECK-NEXT:    ext v3.16b, v3.16b, v5.16b, #12
-; CHECK-NEXT:    uzp2 v4.4s, v6.4s, v19.4s
-; CHECK-NEXT:    mov v5.16b, v7.16b
-; CHECK-NEXT:    mov v6.16b, v20.16b
-; CHECK-NEXT:    mov v18.16b, v1.16b
-; CHECK-NEXT:    mov v19.16b, v21.16b
+; CHECK-NEXT:    mov v19.s[2], v16.s[3]
+; CHECK-NEXT:    zip2 v2.4s, v2.4s, v16.4s
+; CHECK-NEXT:    zip2 v6.4s, v6.4s, v5.4s
+; CHECK-NEXT:    mov v20.s[2], v5.s[3]
+; CHECK-NEXT:    ext v18.16b, v17.16b, v1.16b, #4
+; CHECK-NEXT:    mov v1.s[2], v5.s[1]
+; CHECK-NEXT:    mov v21.16b, v7.16b
 ; CHECK-NEXT:    sub v7.4s, v0.4s, v7.4s
-; CHECK-NEXT:    mov v6.s[1], v16.s[2]
-; CHECK-NEXT:    mov v5.s[0], v17.s[1]
-; CHECK-NEXT:    mov v18.s[1], v16.s[0]
-; CHECK-NEXT:    mov v19.s[1], v17.s[2]
+; CHECK-NEXT:    ext v2.16b, v4.16b, v2.16b, #12
+; CHECK-NEXT:    ext v3.16b, v3.16b, v6.16b, #12
+; CHECK-NEXT:    uzp2 v4.4s, v17.4s, v18.4s
+; CHECK-NEXT:    mov v6.16b, v1.16b
+; CHECK-NEXT:    mov v17.16b, v19.16b
+; CHECK-NEXT:    mov v18.16b, v20.16b
+; CHECK-NEXT:    mov v21.s[0], v16.s[1]
+; CHECK-NEXT:    mov v6.s[1], v5.s[0]
+; CHECK-NEXT:    mov v17.s[1], v16.s[2]
+; CHECK-NEXT:    sub v16.4s, v19.4s, v2.4s
+; CHECK-NEXT:    mov v18.s[1], v5.s[2]
 ; CHECK-NEXT:    sub v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    sub v16.4s, v20.4s, v3.4s
-; CHECK-NEXT:    sub v17.4s, v21.4s, v2.4s
-; CHECK-NEXT:    add v3.4s, v6.4s, v3.4s
-; CHECK-NEXT:    add v0.4s, v0.4s, v5.4s
-; CHECK-NEXT:    add v4.4s, v18.4s, v4.4s
-; CHECK-NEXT:    add v2.4s, v19.4s, v2.4s
-; CHECK-NEXT:    mov v3.d[1], v16.d[1]
+; CHECK-NEXT:    sub v5.4s, v20.4s, v3.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v21.4s
+; CHECK-NEXT:    add v4.4s, v6.4s, v4.4s
+; CHECK-NEXT:    add v2.4s, v17.4s, v2.4s
+; CHECK-NEXT:    add v3.4s, v18.4s, v3.4s
 ; CHECK-NEXT:    mov v0.d[1], v7.d[1]
 ; CHECK-NEXT:    mov v4.d[1], v1.d[1]
-; CHECK-NEXT:    mov v2.d[1], v17.d[1]
-; CHECK-NEXT:    cmlt v1.8h, v3.8h, #0
-; CHECK-NEXT:    cmlt v5.8h, v0.8h, #0
-; CHECK-NEXT:    cmlt v6.8h, v4.8h, #0
-; CHECK-NEXT:    cmlt v7.8h, v2.8h, #0
-; CHECK-NEXT:    add v3.4s, v1.4s, v3.4s
-; CHECK-NEXT:    add v0.4s, v5.4s, v0.4s
-; CHECK-NEXT:    add v4.4s, v6.4s, v4.4s
-; CHECK-NEXT:    add v2.4s, v7.4s, v2.4s
-; CHECK-NEXT:    eor v1.16b, v3.16b, v1.16b
-; CHECK-NEXT:    eor v0.16b, v0.16b, v5.16b
-; CHECK-NEXT:    eor v2.16b, v2.16b, v7.16b
-; CHECK-NEXT:    eor v3.16b, v4.16b, v6.16b
-; CHECK-NEXT:    add v0.4s, v0.4s, v3.4s
-; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    mov v2.d[1], v16.d[1]
+; CHECK-NEXT:    mov v3.d[1], v5.d[1]
+; CHECK-NEXT:    cmlt v7.8h, v0.8h, #0
+; CHECK-NEXT:    cmlt v1.8h, v4.8h, #0
+; CHECK-NEXT:    cmlt v6.8h, v2.8h, #0
+; CHECK-NEXT:    cmlt v5.8h, v3.8h, #0
+; CHECK-NEXT:    add v0.4s, v7.4s, v0.4s
+; CHECK-NEXT:    add v4.4s, v1.4s, v4.4s
+; CHECK-NEXT:    add v2.4s, v6.4s, v2.4s
+; CHECK-NEXT:    add v3.4s, v5.4s, v3.4s
+; CHECK-NEXT:    eor v0.16b, v0.16b, v7.16b
+; CHECK-NEXT:    eor v1.16b, v4.16b, v1.16b
+; CHECK-NEXT:    eor v2.16b, v2.16b, v6.16b
+; CHECK-NEXT:    eor v3.16b, v3.16b, v5.16b
 ; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add v2.4s, v2.4s, v3.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    addv s0, v0.4s
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    lsr w9, w8, #16

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
index e07645c27df72..fba324cfd74ac 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
@@ -611,14 +611,14 @@ define void @uzp_v8i16(ptr %a, ptr %b) #1 {
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uzp1 v2.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v1.8h
-; CHECK-NEXT:    add v0.8h, v2.8h, v0.8h
+; CHECK-NEXT:    eor v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <8 x i16>, ptr %a
   %tmp2 = load <8 x i16>, ptr %b
   %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %tmp5 = add <8 x i16> %tmp3, %tmp4
+  %tmp5 = xor <8 x i16> %tmp3, %tmp4
   store <8 x i16> %tmp5, ptr %a
   ret void
 }


        


More information about the llvm-commits mailing list