[llvm] 48a1652 - [AArch64] optimize manual addp (#181549)

via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 17 01:16:08 PST 2026


Author: Folkert de Vries
Date: 2026-02-17T10:16:03+01:00
New Revision: 48a1652eb63cb77ae563bb11f4c48bf90e7914d5

URL: https://github.com/llvm/llvm-project/commit/48a1652eb63cb77ae563bb11f4c48bf90e7914d5
DIFF: https://github.com/llvm/llvm-project/commit/48a1652eb63cb77ae563bb11f4c48bf90e7914d5.diff

LOG: [AArch64] optimize manual addp (#181549)

add patterns for 64-bit `addp` and `faddp`

```
vpadd_s16_intrin:
        addp    v0.4h, v0.4h, v1.4h
        ret

vpadd_s16_manual:
        uzp1    v2.4h, v0.4h, v1.4h
        uzp2    v0.4h, v0.4h, v1.4h
        add     v0.4h, v2.4h, v0.4h
        ret
```

So, add some logic to catch the deinterleaving shuffle and convert to
`addp`.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/test/CodeGen/AArch64/addp-shuffle.ll
    llvm/test/CodeGen/AArch64/arm64-trn.ll
    llvm/test/CodeGen/AArch64/arm64-uzp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 0979122fdb9c7..a70f0e2f5da0c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -10861,12 +10861,21 @@ def : Pat<(AArch64faddp (v4f16 (extract_subvector (v8f16 FPR128:$Rn), (i64 0))),
 def : Pat<(v2i64 (add (AArch64zip1 (v2i64 FPR128:$Rn), (v2i64 FPR128:$Rm)),
                       (AArch64zip2 (v2i64 FPR128:$Rn), (v2i64 FPR128:$Rm)))),
           (v2i64 (ADDPv2i64 $Rn, $Rm))>;
+def : Pat<(v2i32 (add (AArch64zip1 (v2i32 FPR64:$Rn), (v2i32 FPR64:$Rm)),
+                      (AArch64zip2 (v2i32 FPR64:$Rn), (v2i32 FPR64:$Rm)))),
+          (v2i32 (ADDPv2i32 $Rn, $Rm))>;
 def : Pat<(v4i32 (add (AArch64uzp1 (v4i32 FPR128:$Rn), (v4i32 FPR128:$Rm)),
                       (AArch64uzp2 (v4i32 FPR128:$Rn), (v4i32 FPR128:$Rm)))),
           (v4i32 (ADDPv4i32 $Rn, $Rm))>;
+def : Pat<(v4i16 (add (AArch64uzp1 (v4i16 FPR64:$Rn), (v4i16 FPR64:$Rm)),
+                      (AArch64uzp2 (v4i16 FPR64:$Rn), (v4i16 FPR64:$Rm)))),
+          (v4i16 (ADDPv4i16 $Rn, $Rm))>;
 def : Pat<(v8i16 (add (AArch64uzp1 (v8i16 FPR128:$Rn), (v8i16 FPR128:$Rm)),
                       (AArch64uzp2 (v8i16 FPR128:$Rn), (v8i16 FPR128:$Rm)))),
           (v8i16 (ADDPv8i16 $Rn, $Rm))>;
+def : Pat<(v8i8 (add (AArch64uzp1 (v8i8 FPR64:$Rn), (v8i8 FPR64:$Rm)),
+                     (AArch64uzp2 (v8i8 FPR64:$Rn), (v8i8 FPR64:$Rm)))),
+          (v8i8 (ADDPv8i8 $Rn, $Rm))>;
 def : Pat<(v16i8 (add (AArch64uzp1 (v16i8 FPR128:$Rn), (v16i8 FPR128:$Rm)),
                       (AArch64uzp2 (v16i8 FPR128:$Rn), (v16i8 FPR128:$Rm)))),
           (v16i8 (ADDPv16i8 $Rn, $Rm))>;
@@ -10886,13 +10895,20 @@ def : Pat<(v8i8  (add (trunc (v8i16 (bitconvert FPR128:$Rn))),
 def : Pat<(v2f64 (fadd (AArch64zip1 (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm)),
                        (AArch64zip2 (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm)))),
           (v2f64 (FADDPv2f64 $Rn, $Rm))>;
+def : Pat<(v2f32 (fadd (AArch64zip1 (v2f32 FPR64:$Rn), (v2f32 FPR64:$Rm)),
+                       (AArch64zip2 (v2f32 FPR64:$Rn), (v2f32 FPR64:$Rm)))),
+          (v2f32 (FADDPv2f32 $Rn, $Rm))>;
 def : Pat<(v4f32 (fadd (AArch64uzp1 (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm)),
                        (AArch64uzp2 (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm)))),
           (v4f32 (FADDPv4f32 $Rn, $Rm))>;
-let Predicates = [HasFullFP16] in
+let Predicates = [HasFullFP16] in {
+def : Pat<(v4f16 (fadd (AArch64uzp1 (v4f16 FPR64:$Rn), (v4f16 FPR64:$Rm)),
+                       (AArch64uzp2 (v4f16 FPR64:$Rn), (v4f16 FPR64:$Rm)))),
+          (v4f16 (FADDPv4f16 $Rn, $Rm))>;
 def : Pat<(v8f16 (fadd (AArch64uzp1 (v8f16 FPR128:$Rn), (v8f16 FPR128:$Rm)),
                        (AArch64uzp2 (v8f16 FPR128:$Rn), (v8f16 FPR128:$Rm)))),
           (v8f16 (FADDPv8f16 $Rn, $Rm))>;
+}
 
 // Scalar 64-bit shifts in FPR64 registers.
 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),

diff  --git a/llvm/test/CodeGen/AArch64/addp-shuffle.ll b/llvm/test/CodeGen/AArch64/addp-shuffle.ll
index 54c96820285d3..7ba01adad011c 100644
--- a/llvm/test/CodeGen/AArch64/addp-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/addp-shuffle.ll
@@ -208,3 +208,125 @@ define <4 x i32> @sdot(<4 x i32> %z, <16 x i8> %a, <16 x i8> %b) {
   %n = add <4 x i32> %z, %o
   ret <4 x i32> %n
 }
+
+define <8 x i8> @manual_addp_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: manual_addp_v8i8:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %1 = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %2 = add <8 x i8> %0, %1
+  ret <8 x i8> %2
+}
+
+define <4 x i16> @manual_addp_v4i16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK-LABEL: manual_addp_v4i16:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %1 = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %2 = add <4 x i16> %0, %1
+  ret <4 x i16> %2
+}
+
+define <2 x i32> @manual_addp_v2i32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK-LABEL: manual_addp_v2i32:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+  %1 = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+  %2 = add <2 x i32> %0, %1
+  ret <2 x i32> %2
+}
+
+define <16 x i8> @manual_addp_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: manual_addp_v16i8:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %1 = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %2 = add <16 x i8> %0, %1
+  ret <16 x i8> %2
+}
+
+define <8 x i16> @manual_addp_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: manual_addp_v8i16:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %2 = add <8 x i16> %0, %1
+  ret <8 x i16> %2
+}
+
+define <4 x i32> @manual_addp_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: manual_addp_v4i32:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %2 = add <4 x i32> %0, %1
+  ret <4 x i32> %2
+}
+
+define <2 x i64> @manual_addp_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: manual_addp_v2i64:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    addp v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+  %1 = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+  %2 = add <2 x i64> %0, %1
+  ret <2 x i64> %2
+}
+
+define <8 x half> @manual_faddp_v8f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-NOFP16-LABEL: manual_faddp_v8f16:
+; CHECK-NOFP16:       // %bb.0: // %start
+; CHECK-NOFP16-NEXT:    uzp1 v2.8h, v0.8h, v1.8h
+; CHECK-NOFP16-NEXT:    uzp2 v0.8h, v0.8h, v1.8h
+; CHECK-NOFP16-NEXT:    fcvtl v1.4s, v0.4h
+; CHECK-NOFP16-NEXT:    fcvtl v3.4s, v2.4h
+; CHECK-NOFP16-NEXT:    fcvtl2 v0.4s, v0.8h
+; CHECK-NOFP16-NEXT:    fcvtl2 v2.4s, v2.8h
+; CHECK-NOFP16-NEXT:    fadd v1.4s, v3.4s, v1.4s
+; CHECK-NOFP16-NEXT:    fadd v2.4s, v2.4s, v0.4s
+; CHECK-NOFP16-NEXT:    fcvtn v0.4h, v1.4s
+; CHECK-NOFP16-NEXT:    fcvtn2 v0.8h, v2.4s
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: manual_faddp_v8f16:
+; CHECK-FP16:       // %bb.0: // %start
+; CHECK-FP16-NEXT:    faddp v0.8h, v0.8h, v1.8h
+; CHECK-FP16-NEXT:    ret
+start:
+  %0 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %1 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %2 = fadd <8 x half> %0, %1
+  ret <8 x half> %2
+}
+
+define <2 x float> @manual_faddp_v2f32(<2 x float> %a, <2 x float> %b) {
+; CHECK-LABEL: manual_faddp_v2f32:
+; CHECK:       // %bb.0: // %start
+; CHECK-NEXT:    faddp v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+start:
+  %0 = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+  %1 = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+  %2 = fadd <2 x float> %0, %1
+  ret <2 x float> %2
+}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-trn.ll b/llvm/test/CodeGen/AArch64/arm64-trn.ll
index 120c2d13a7ab7..85a56c042136c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-trn.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-trn.ll
@@ -85,18 +85,14 @@ define <2 x i32> @vtrni32(ptr %A, ptr %B) nounwind {
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
 ; CHECKLE-NEXT:    ldr d1, [x1]
-; CHECKLE-NEXT:    zip1 v2.2s, v0.2s, v1.2s
-; CHECKLE-NEXT:    zip2 v0.2s, v0.2s, v1.2s
-; CHECKLE-NEXT:    add v0.2s, v2.2s, v0.2s
+; CHECKLE-NEXT:    addp v0.2s, v0.2s, v1.2s
 ; CHECKLE-NEXT:    ret
 ;
 ; CHECKBE-LABEL: vtrni32:
 ; CHECKBE:       // %bb.0:
 ; CHECKBE-NEXT:    ld1 { v0.2s }, [x0]
 ; CHECKBE-NEXT:    ld1 { v1.2s }, [x1]
-; CHECKBE-NEXT:    zip1 v2.2s, v0.2s, v1.2s
-; CHECKBE-NEXT:    zip2 v0.2s, v0.2s, v1.2s
-; CHECKBE-NEXT:    add v0.2s, v2.2s, v0.2s
+; CHECKBE-NEXT:    addp v0.2s, v0.2s, v1.2s
 ; CHECKBE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECKBE-NEXT:    ret
 	%tmp1 = load <2 x i32>, ptr %A
@@ -112,18 +108,14 @@ define <2 x float> @vtrnf(ptr %A, ptr %B) nounwind {
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
 ; CHECKLE-NEXT:    ldr d1, [x1]
-; CHECKLE-NEXT:    zip1 v2.2s, v0.2s, v1.2s
-; CHECKLE-NEXT:    zip2 v0.2s, v0.2s, v1.2s
-; CHECKLE-NEXT:    fadd v0.2s, v2.2s, v0.2s
+; CHECKLE-NEXT:    faddp v0.2s, v0.2s, v1.2s
 ; CHECKLE-NEXT:    ret
 ;
 ; CHECKBE-LABEL: vtrnf:
 ; CHECKBE:       // %bb.0:
 ; CHECKBE-NEXT:    ld1 { v0.2s }, [x0]
 ; CHECKBE-NEXT:    ld1 { v1.2s }, [x1]
-; CHECKBE-NEXT:    zip1 v2.2s, v0.2s, v1.2s
-; CHECKBE-NEXT:    zip2 v0.2s, v0.2s, v1.2s
-; CHECKBE-NEXT:    fadd v0.2s, v2.2s, v0.2s
+; CHECKBE-NEXT:    faddp v0.2s, v0.2s, v1.2s
 ; CHECKBE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECKBE-NEXT:    ret
 	%tmp1 = load <2 x float>, ptr %A

diff  --git a/llvm/test/CodeGen/AArch64/arm64-uzp.ll b/llvm/test/CodeGen/AArch64/arm64-uzp.ll
index 02068428c3a4f..79c346c7aa6d6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-uzp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-uzp.ll
@@ -5,9 +5,7 @@
 define <8 x i8> @vuzpi8(<8 x i8> %A, <8 x i8> %B) nounwind {
 ; CHECK-LABEL: vuzpi8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1.8b v2, v0, v1
-; CHECK-NEXT:    uzp2.8b v0, v0, v1
-; CHECK-NEXT:    add.8b v0, v2, v0
+; CHECK-NEXT:    addp.8b v0, v0, v1
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i8> %A, <8 x i8> %B, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i8> %A, <8 x i8> %B, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -18,9 +16,7 @@ define <8 x i8> @vuzpi8(<8 x i8> %A, <8 x i8> %B) nounwind {
 define <4 x i16> @vuzpi16(<4 x i16> %A, <4 x i16> %B) nounwind {
 ; CHECK-LABEL: vuzpi16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1.4h v2, v0, v1
-; CHECK-NEXT:    uzp2.4h v0, v0, v1
-; CHECK-NEXT:    add.4h v0, v2, v0
+; CHECK-NEXT:    addp.4h v0, v0, v1
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <4 x i16> %A, <4 x i16> %B, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %tmp4 = shufflevector <4 x i16> %A, <4 x i16> %B, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -85,9 +81,7 @@ define <4 x float> @vuzpQf(<4 x float> %A, <4 x float> %B) nounwind {
 define <8 x i8> @vuzpi8_undef(<8 x i8> %A, <8 x i8> %B) nounwind {
 ; CHECK-LABEL: vuzpi8_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1.8b v2, v0, v1
-; CHECK-NEXT:    uzp2.8b v0, v0, v1
-; CHECK-NEXT:    add.8b v0, v2, v0
+; CHECK-NEXT:    addp.8b v0, v0, v1
 ; CHECK-NEXT:    ret
   %tmp3 = shufflevector <8 x i8> %A, <8 x i8> %B, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
   %tmp4 = shufflevector <8 x i8> %A, <8 x i8> %B, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>


        


More information about the llvm-commits mailing list