[llvm] [AArch64] Match indexed forms of fmul/fmla/fmls (PR #144892)

Graham Hunter via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 20 03:42:56 PDT 2025


https://github.com/huntergr-arm updated https://github.com/llvm/llvm-project/pull/144892

>From 50480de8bf260743c0bf0b093c636ade19253636 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Thu, 19 Jun 2025 12:37:10 +0000
Subject: [PATCH 1/3] Test precommit

---
 .../CodeGen/AArch64/sve-indexed-arithmetic.ll | 383 ++++++++++++++++++
 1 file changed, 383 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll

diff --git a/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll b/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll
new file mode 100644
index 0000000000000..4d598cf5ee455
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll
@@ -0,0 +1,383 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s
+
+define void @fmul_indexed_f16_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmul_indexed_f16_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    dupq z1.h, z0.h[2]
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <16 x half>, ptr %a
+  %ld.b = load <16 x half>, ptr %b
+  %splat.lanes = shufflevector <16 x half> %ld.a, <16 x half> poison, <16 x i32> <i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2,
+                                                                                  i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %res = fmul <16 x half> %ld.a, %splat.lanes
+  store <16 x half> %res, ptr %c
+  ret void
+}
+
+define void @fmul_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmul_indexed_bf16_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    dup v0.8h, v0.h[2]
+; CHECK-NEXT:    dup v1.8h, v1.h[2]
+; CHECK-NEXT:    shll v4.4s, v2.4h, #16
+; CHECK-NEXT:    shll v6.4s, v3.4h, #16
+; CHECK-NEXT:    shll2 v2.4s, v2.8h, #16
+; CHECK-NEXT:    shll2 v3.4s, v3.8h, #16
+; CHECK-NEXT:    shll v5.4s, v0.4h, #16
+; CHECK-NEXT:    shll v7.4s, v1.4h, #16
+; CHECK-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NEXT:    fmul v4.4s, v4.4s, v5.4s
+; CHECK-NEXT:    fmul v5.4s, v6.4s, v7.4s
+; CHECK-NEXT:    fmul v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    fmul v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    bfcvtn v2.4h, v4.4s
+; CHECK-NEXT:    bfcvtn v3.4h, v5.4s
+; CHECK-NEXT:    bfcvtn2 v2.8h, v0.4s
+; CHECK-NEXT:    bfcvtn2 v3.8h, v1.4s
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <16 x bfloat>, ptr %a
+  %ld.b = load <16 x bfloat>, ptr %b
+  %splat.lanes = shufflevector <16 x bfloat> %ld.b, <16 x bfloat> poison, <16 x i32> <i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2,
+                                                                                      i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %res = fmul <16 x bfloat> %ld.a, %splat.lanes
+  store <16 x bfloat> %res, ptr %c
+  ret void
+}
+
+define void @fmul_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmul_indexed_f32_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    dupq z0.s, z0.s[3]
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <8 x float>, ptr %a
+  %ld.b = load <8 x float>, ptr %b
+  %splat.lanes = shufflevector <8 x float> %ld.b, <8 x float> poison, <8 x i32> <i32 3, i32 3, i32 3, i32 3,
+                                                                                 i32 7, i32 7, i32 7, i32 7>
+  %res = fmul <8 x float> %splat.lanes, %ld.a
+  store <8 x float> %res, ptr %c
+  ret void
+}
+
+define void @fmul_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmul_indexed_f64_256b_trn1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    trn1 z0.d, z0.d, z0.d
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <4 x double>, ptr %a
+  %ld.b = load <4 x double>, ptr %b
+  %splat.lanes = shufflevector <4 x double> %ld.b, <4 x double> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+  %res = fmul <4 x double> %splat.lanes, %ld.a
+  store <4 x double> %res, ptr %c
+  ret void
+}
+
+define void @fmul_indexed_f64_256b_trn2(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmul_indexed_f64_256b_trn2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    trn2 z0.d, z0.d, z0.d
+; CHECK-NEXT:    fmul z0.d, z1.d, z0.d
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <4 x double>, ptr %a
+  %ld.b = load <4 x double>, ptr %b
+  %splat.lanes = shufflevector <4 x double> %ld.b, <4 x double> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
+  %res = fmul <4 x double> %ld.a, %splat.lanes
+  store <4 x double> %res, ptr %c
+  ret void
+}
+
+define void @fmla_indexed_f16_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmla_indexed_f16_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x2]
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    dupq z2.h, z0.h[2]
+; CHECK-NEXT:    fmad z0.h, p0/m, z2.h, z1.h
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <16 x half>, ptr %a
+  %ld.b = load <16 x half>, ptr %b
+  %ld.c = load <16 x half>, ptr %c
+  %splat.lanes = shufflevector <16 x half> %ld.a, <16 x half> poison, <16 x i32> <i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2,
+                                                                                  i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %res = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> %ld.a, <16 x half> %splat.lanes, <16 x half> %ld.c)
+  store <16 x half> %res, ptr %c
+  ret void
+}
+
+define void @fmla_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmla_indexed_bf16_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    dup v0.8h, v0.h[2]
+; CHECK-NEXT:    dup v1.8h, v1.h[2]
+; CHECK-NEXT:    shll v4.4s, v2.4h, #16
+; CHECK-NEXT:    shll v6.4s, v3.4h, #16
+; CHECK-NEXT:    shll2 v2.4s, v2.8h, #16
+; CHECK-NEXT:    shll2 v3.4s, v3.8h, #16
+; CHECK-NEXT:    shll v5.4s, v0.4h, #16
+; CHECK-NEXT:    shll v7.4s, v1.4h, #16
+; CHECK-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NEXT:    fmul v4.4s, v4.4s, v5.4s
+; CHECK-NEXT:    fmul v5.4s, v6.4s, v7.4s
+; CHECK-NEXT:    fmul v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    fmul v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    bfcvtn v2.4h, v4.4s
+; CHECK-NEXT:    bfcvtn v3.4h, v5.4s
+; CHECK-NEXT:    bfcvtn2 v2.8h, v0.4s
+; CHECK-NEXT:    bfcvtn2 v3.8h, v1.4s
+; CHECK-NEXT:    ldp q0, q1, [x2]
+; CHECK-NEXT:    shll v4.4s, v0.4h, #16
+; CHECK-NEXT:    shll v5.4s, v2.4h, #16
+; CHECK-NEXT:    shll v6.4s, v1.4h, #16
+; CHECK-NEXT:    shll v7.4s, v3.4h, #16
+; CHECK-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NEXT:    shll2 v2.4s, v2.8h, #16
+; CHECK-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NEXT:    shll2 v3.4s, v3.8h, #16
+; CHECK-NEXT:    fadd v4.4s, v5.4s, v4.4s
+; CHECK-NEXT:    fadd v5.4s, v7.4s, v6.4s
+; CHECK-NEXT:    fadd v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    fadd v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    bfcvtn v2.4h, v4.4s
+; CHECK-NEXT:    bfcvtn v3.4h, v5.4s
+; CHECK-NEXT:    bfcvtn2 v2.8h, v0.4s
+; CHECK-NEXT:    bfcvtn2 v3.8h, v1.4s
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <16 x bfloat>, ptr %a
+  %ld.b = load <16 x bfloat>, ptr %b
+  %ld.c = load <16 x bfloat>, ptr %c
+  %splat.lanes = shufflevector <16 x bfloat> %ld.b, <16 x bfloat> poison, <16 x i32> <i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2,
+                                                                                      i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %res = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> %ld.a, <16 x bfloat> %splat.lanes, <16 x bfloat> %ld.c)
+  store <16 x bfloat> %res, ptr %c
+  ret void
+}
+
+define void @fmla_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmla_indexed_f32_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ldr z2, [x2]
+; CHECK-NEXT:    dupq z0.s, z0.s[3]
+; CHECK-NEXT:    fmad z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <8 x float>, ptr %a
+  %ld.b = load <8 x float>, ptr %b
+  %ld.c = load <8 x float>, ptr %c
+  %splat.lanes = shufflevector <8 x float> %ld.b, <8 x float> poison, <8 x i32> <i32 3, i32 3, i32 3, i32 3,
+                                                                                 i32 7, i32 7, i32 7, i32 7>
+  %res = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %splat.lanes, <8 x float> %ld.a, <8 x float> %ld.c)
+  store <8 x float> %res, ptr %c
+  ret void
+}
+
+define void @fmla_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmla_indexed_f64_256b_trn1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z2, [x2]
+; CHECK-NEXT:    trn1 z0.d, z0.d, z0.d
+; CHECK-NEXT:    fmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <4 x double>, ptr %a
+  %ld.b = load <4 x double>, ptr %b
+  %ld.c = load <4 x double>, ptr %c
+  %splat.lanes = shufflevector <4 x double> %ld.b, <4 x double> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+  %res = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %splat.lanes, <4 x double> %ld.a, <4 x double> %ld.c)
+  store <4 x double> %res, ptr %c
+  ret void
+}
+
+define void @fmla_indexed_f64_256b_trn2(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmla_indexed_f64_256b_trn2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z2, [x2]
+; CHECK-NEXT:    trn2 z0.d, z0.d, z0.d
+; CHECK-NEXT:    fmad z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <4 x double>, ptr %a
+  %ld.b = load <4 x double>, ptr %b
+  %ld.c = load <4 x double>, ptr %c
+  %splat.lanes = shufflevector <4 x double> %ld.b, <4 x double> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
+  %res = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %ld.a, <4 x double> %splat.lanes, <4 x double> %ld.c)
+  store <4 x double> %res, ptr %c
+  ret void
+}
+
+define void @fmls_indexed_f16_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmls_indexed_f16_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x2]
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    dupq z2.h, z0.h[2]
+; CHECK-NEXT:    fmsb z0.h, p0/m, z2.h, z1.h
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <16 x half>, ptr %a
+  %ld.b = load <16 x half>, ptr %b
+  %ld.c = load <16 x half>, ptr %c
+  %splat.lanes = shufflevector <16 x half> %ld.a, <16 x half> poison, <16 x i32> <i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2,
+                                                                                  i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %neg.a = fneg <16 x half> %ld.a
+  %res = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> %neg.a, <16 x half> %splat.lanes, <16 x half> %ld.c)
+  store <16 x half> %res, ptr %c
+  ret void
+}
+
+define void @fmls_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmls_indexed_bf16_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    dup v0.8h, v0.h[2]
+; CHECK-NEXT:    dup v1.8h, v1.h[2]
+; CHECK-NEXT:    shll v4.4s, v2.4h, #16
+; CHECK-NEXT:    shll v6.4s, v3.4h, #16
+; CHECK-NEXT:    shll2 v2.4s, v2.8h, #16
+; CHECK-NEXT:    shll2 v3.4s, v3.8h, #16
+; CHECK-NEXT:    shll v5.4s, v0.4h, #16
+; CHECK-NEXT:    shll v7.4s, v1.4h, #16
+; CHECK-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NEXT:    fmul v4.4s, v4.4s, v5.4s
+; CHECK-NEXT:    fmul v5.4s, v6.4s, v7.4s
+; CHECK-NEXT:    fmul v0.4s, v2.4s, v0.4s
+; CHECK-NEXT:    fmul v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    bfcvtn v2.4h, v4.4s
+; CHECK-NEXT:    bfcvtn v3.4h, v5.4s
+; CHECK-NEXT:    bfcvtn2 v2.8h, v0.4s
+; CHECK-NEXT:    bfcvtn2 v3.8h, v1.4s
+; CHECK-NEXT:    ldp q0, q1, [x2]
+; CHECK-NEXT:    shll v4.4s, v0.4h, #16
+; CHECK-NEXT:    shll v5.4s, v2.4h, #16
+; CHECK-NEXT:    shll v6.4s, v1.4h, #16
+; CHECK-NEXT:    shll v7.4s, v3.4h, #16
+; CHECK-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NEXT:    shll2 v2.4s, v2.8h, #16
+; CHECK-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NEXT:    shll2 v3.4s, v3.8h, #16
+; CHECK-NEXT:    fsub v4.4s, v4.4s, v5.4s
+; CHECK-NEXT:    fsub v5.4s, v6.4s, v7.4s
+; CHECK-NEXT:    fsub v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    fsub v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    bfcvtn v2.4h, v4.4s
+; CHECK-NEXT:    bfcvtn v3.4h, v5.4s
+; CHECK-NEXT:    bfcvtn2 v2.8h, v0.4s
+; CHECK-NEXT:    bfcvtn2 v3.8h, v1.4s
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <16 x bfloat>, ptr %a
+  %ld.b = load <16 x bfloat>, ptr %b
+  %ld.c = load <16 x bfloat>, ptr %c
+  %splat.lanes = shufflevector <16 x bfloat> %ld.b, <16 x bfloat> poison, <16 x i32> <i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2, i32  2,
+                                                                                      i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
+  %neg.a = fneg <16 x bfloat> %ld.a
+  %res = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> %neg.a, <16 x bfloat> %splat.lanes, <16 x bfloat> %ld.c)
+  store <16 x bfloat> %res, ptr %c
+  ret void
+}
+
+define void @fmls_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmls_indexed_f32_256b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ldr z2, [x2]
+; CHECK-NEXT:    dupq z0.s, z0.s[3]
+; CHECK-NEXT:    fmsb z0.s, p0/m, z1.s, z2.s
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <8 x float>, ptr %a
+  %ld.b = load <8 x float>, ptr %b
+  %ld.c = load <8 x float>, ptr %c
+  %splat.lanes = shufflevector <8 x float> %ld.b, <8 x float> poison, <8 x i32> <i32 3, i32 3, i32 3, i32 3,
+                                                                                 i32 7, i32 7, i32 7, i32 7>
+  %neg.a = fneg <8 x float> %ld.a
+  %res = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %splat.lanes, <8 x float> %neg.a, <8 x float> %ld.c)
+  store <8 x float> %res, ptr %c
+  ret void
+}
+
+define void @fmls_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmls_indexed_f64_256b_trn1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z2, [x2]
+; CHECK-NEXT:    trn1 z0.d, z0.d, z0.d
+; CHECK-NEXT:    fmsb z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <4 x double>, ptr %a
+  %ld.b = load <4 x double>, ptr %b
+  %ld.c = load <4 x double>, ptr %c
+  %splat.lanes = shufflevector <4 x double> %ld.b, <4 x double> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
+  %neg.a = fneg <4 x double> %ld.a
+  %res = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %splat.lanes, <4 x double> %neg.a, <4 x double> %ld.c)
+  store <4 x double> %res, ptr %c
+  ret void
+}
+
+define void @fmls_indexed_f64_256b_trn2(ptr %a, ptr %b, ptr %c) #0 {
+; CHECK-LABEL: fmls_indexed_f64_256b_trn2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr z0, [x1]
+; CHECK-NEXT:    ldr z1, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z2, [x2]
+; CHECK-NEXT:    trn2 z0.d, z0.d, z0.d
+; CHECK-NEXT:    fmsb z0.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    ret
+  %ld.a = load <4 x double>, ptr %a
+  %ld.b = load <4 x double>, ptr %b
+  %ld.c = load <4 x double>, ptr %c
+  %splat.lanes = shufflevector <4 x double> %ld.b, <4 x double> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
+  %neg.a = fneg <4 x double> %ld.a
+  %res = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %neg.a, <4 x double> %splat.lanes, <4 x double> %ld.c)
+  store <4 x double> %res, ptr %c
+  ret void
+}
+
+declare <16 x half> @llvm.fmuladd.v16f16(<16 x half>, <16 x half>, <16 x half>);
+declare <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat>, <16 x bfloat>, <16 x bfloat>);
+declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>);
+declare <4 x double> @llvm.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>);
+
+attributes #0 = { noinline vscale_range(2,2) "target-features"="+sve2p1,+bf16" }

>From e489e1a0644799397067e7f7765c7adcd9d222b1 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Thu, 19 Jun 2025 12:39:15 +0000
Subject: [PATCH 2/3] Add ISel patterns for indexed fmul/fmla/fmls

---
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 97 +++++++++++++++++++
 .../CodeGen/AArch64/sve-indexed-arithmetic.ll | 96 ++++++++----------
 2 files changed, 135 insertions(+), 58 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 2360e30de63b0..8bed8e7751c62 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -438,11 +438,28 @@ def AArch64fabd_p : PatFrags<(ops node:$pg, node:$op1, node:$op2),
 def AArch64fmla_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                              [(AArch64fma_p node:$pg, node:$zn, node:$zm, node:$za)]>;
 
+def AArch64fmlaidx
+    : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
+               [(AArch64fmla_p(SVEAllActive), node:$acc, node:$op1,
+                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
+                (AArch64fmla_p(SVEAllActive), node:$acc,
+                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx),
+                    node:$op1)]>;
+
 def AArch64fmls_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                              [(int_aarch64_sve_fmls_u node:$pg, node:$za, node:$zn, node:$zm),
                               (AArch64fma_p node:$pg, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$zm, node:$za),
                               (AArch64fma_p node:$pg, node:$zm, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$za)]>;
 
+def AArch64fmlsidx
+    : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
+               [(AArch64fmla_p(SVEAllActive), node:$acc,
+                    (AArch64fneg_mt(SVEAllActive), node:$op1, (undef)),
+                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
+                (AArch64fmla_p(SVEAllActive), node:$acc,
+                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx),
+                    (AArch64fneg_mt(SVEAllActive), node:$op1, (undef)))]>;
+
 def AArch64fnmla_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                               [(int_aarch64_sve_fnmla_u node:$pg, node:$za, node:$zn, node:$zm),
                                (AArch64fma_p node:$pg, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$zm, (AArch64fneg_mt node:$pg, node:$za, (undef))),
@@ -562,6 +579,13 @@ def AArch64fmul : PatFrags<(ops node:$op1, node:$op2),
                             [(fmul node:$op1, node:$op2),
                              (AArch64fmul_p (SVEAllActive), node:$op1, node:$op2)]>;
 
+def AArch64fmulidx
+    : PatFrags<(ops node:$op1, node:$op2, node:$idx),
+               [(AArch64fmul node:$op1, (int_aarch64_sve_dup_laneq node:$op2,
+                                            node:$idx)),
+                (AArch64fmul(int_aarch64_sve_dup_laneq node:$op2, node:$idx),
+                    node:$op1)]>;
+
 def AArch64fsub : PatFrags<(ops node:$op1, node:$op2),
                             [(fsub node:$op1, node:$op2),
                              (AArch64fsub_p (SVEAllActive), node:$op1, node:$op2)]>;
@@ -877,6 +901,68 @@ let Predicates = [HasSVE_or_SME] in {
 
   defm FCMLA_ZZZI : sve_fp_fcmla_by_indexed_elem<"fcmla", int_aarch64_sve_fcmla_lane>;
   defm FMUL_ZZZI   : sve_fp_fmul_by_indexed_elem<"fmul", int_aarch64_sve_fmul_lane>;
+
+  // Fold segmented lane splats in where possible.
+  def : Pat<(nxv8f16(AArch64fmulidx nxv8f16:$L, nxv8f16:$R,
+                VectorIndexH32b_timm:$Idx)),
+            (FMUL_ZZZI_H $L, $R, $Idx)>;
+  def : Pat<(nxv8f16(AArch64fmlaidx nxv8f16:$Acc, nxv8f16:$L, nxv8f16:$R,
+                VectorIndexH32b_timm:$Idx)),
+            (FMLA_ZZZI_H $Acc, $L, $R, $Idx)>;
+  def : Pat<(nxv8f16(AArch64fmlsidx nxv8f16:$Acc, nxv8f16:$L, nxv8f16:$R,
+                VectorIndexH32b_timm:$Idx)),
+            (FMLS_ZZZI_H $Acc, $L, $R, $Idx)>;
+  def : Pat<(nxv4f32(AArch64fmulidx nxv4f32:$L, nxv4f32:$R,
+                VectorIndexS32b_timm:$Idx)),
+            (FMUL_ZZZI_S $L, $R, $Idx)>;
+  def : Pat<(nxv4f32(AArch64fmlaidx nxv4f32:$Acc, nxv4f32:$L, nxv4f32:$R,
+                VectorIndexS32b_timm:$Idx)),
+            (FMLA_ZZZI_S $Acc, $L, $R, $Idx)>;
+  def : Pat<(nxv4f32(AArch64fmlsidx nxv4f32:$Acc, nxv4f32:$L, nxv4f32:$R,
+                VectorIndexS32b_timm:$Idx)),
+            (FMLS_ZZZI_S $Acc, $L, $R, $Idx)>;
+
+  // 64B segmented lane splats currently end up as trn instructions instead.
+  def : Pat<(nxv2f64(AArch64fmul nxv2f64:$L, (AArch64trn1 nxv2f64:$R,
+                                                 nxv2f64:$R))),
+            (FMUL_ZZZI_D $L, $R, 0)>;
+  def : Pat<(nxv2f64(AArch64fmul(AArch64trn1 nxv2f64:$R, nxv2f64:$R),
+                nxv2f64:$L)),
+            (FMUL_ZZZI_D $L, $R, 0)>;
+  def : Pat<(nxv2f64(AArch64fmul nxv2f64:$L, (AArch64trn2 nxv2f64:$R,
+                                                 nxv2f64:$R))),
+            (FMUL_ZZZI_D $L, $R, 1)>;
+  def : Pat<(nxv2f64(AArch64fmul(AArch64trn2 nxv2f64:$R, nxv2f64:$R),
+                nxv2f64:$L)),
+            (FMUL_ZZZI_D $L, $R, 1)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc, nxv2f64:$L,
+                (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
+            (FMLA_ZZZI_D $Acc, $L, $R, 0)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
+                (AArch64trn1 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
+            (FMLA_ZZZI_D $Acc, $L, $R, 0)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc, nxv2f64:$L,
+                (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
+            (FMLA_ZZZI_D $Acc, $L, $R, 1)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
+                (AArch64trn2 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
+            (FMLA_ZZZI_D $Acc, $L, $R, 1)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
+                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)),
+                (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
+            (FMLS_ZZZI_D $Acc, $L, $R, 0)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
+                (AArch64trn1 nxv2f64:$R, nxv2f64:$R),
+                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)))),
+            (FMLS_ZZZI_D $Acc, $L, $R, 0)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
+                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)),
+                (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
+            (FMLS_ZZZI_D $Acc, $L, $R, 1)>;
+  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
+                (AArch64trn2 nxv2f64:$R, nxv2f64:$R),
+                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)))),
+            (FMLS_ZZZI_D $Acc, $L, $R, 1)>;
 } // End HasSVE_or_SME
 
 let Predicates = [HasSVE] in {
@@ -4355,6 +4441,17 @@ defm BFMLS_ZZZI : sve_fp_fma_by_indexed_elem_bfloat<"bfmls", 0b11, int_aarch64_s
 defm BFMUL_ZZZI : sve_fp_fmul_by_indexed_elem_bfloat<"bfmul", int_aarch64_sve_fmul_lane>;
 
 defm BFCLAMP_ZZZ : sve_fp_clamp_bfloat<"bfclamp", AArch64fclamp>;
+
+// Fold segmented lane splats in where possible.
+def : Pat<(nxv8bf16(AArch64fmulidx nxv8bf16:$L, nxv8bf16:$R,
+              VectorIndexH32b_timm:$Idx)),
+          (BFMUL_ZZZI $L, $R, $Idx)>;
+def : Pat<(nxv8bf16(AArch64fmlaidx nxv8bf16:$Acc, nxv8bf16:$L, nxv8bf16:$R,
+              VectorIndexH32b_timm:$Idx)),
+          (BFMLA_ZZZI $Acc, $L, $R, $Idx)>;
+def : Pat<(nxv8bf16(AArch64fmlsidx nxv8bf16:$Acc, nxv8bf16:$L, nxv8bf16:$R,
+              VectorIndexH32b_timm:$Idx)),
+          (BFMLS_ZZZI $Acc, $L, $R, $Idx)>;
 } // End HasSVEB16B16
 
 let Predicates = [HasSVEB16B16, UseExperimentalZeroingPseudos] in {
diff --git a/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll b/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll
index 4d598cf5ee455..b43817e53a6c6 100644
--- a/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll
+++ b/llvm/test/CodeGen/AArch64/sve-indexed-arithmetic.ll
@@ -5,8 +5,7 @@ define void @fmul_indexed_f16_256b(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmul_indexed_f16_256b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    dupq z1.h, z0.h[2]
-; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z0.h[2]
 ; CHECK-NEXT:    str z0, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <16 x half>, ptr %a
@@ -55,10 +54,9 @@ define void @fmul_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmul_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmul_indexed_f32_256b:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    dupq z0.s, z0.s[3]
-; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s[3]
 ; CHECK-NEXT:    str z0, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <8 x float>, ptr %a
@@ -73,10 +71,9 @@ define void @fmul_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmul_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmul_indexed_f64_256b_trn1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    trn1 z0.d, z0.d, z0.d
-; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d[0]
 ; CHECK-NEXT:    str z0, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <4 x double>, ptr %a
@@ -90,10 +87,9 @@ define void @fmul_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmul_indexed_f64_256b_trn2(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmul_indexed_f64_256b_trn2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    trn2 z0.d, z0.d, z0.d
-; CHECK-NEXT:    fmul z0.d, z1.d, z0.d
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d[1]
 ; CHECK-NEXT:    str z0, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <4 x double>, ptr %a
@@ -109,10 +105,8 @@ define void @fmla_indexed_f16_256b(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    ldr z1, [x2]
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    dupq z2.h, z0.h[2]
-; CHECK-NEXT:    fmad z0.h, p0/m, z2.h, z1.h
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmla z1.h, z0.h, z0.h[2]
+; CHECK-NEXT:    str z1, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <16 x half>, ptr %a
   %ld.b = load <16 x half>, ptr %b
@@ -179,13 +173,11 @@ define void @fmla_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmla_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmla_indexed_f32_256b:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    ldr z2, [x2]
-; CHECK-NEXT:    dupq z0.s, z0.s[3]
-; CHECK-NEXT:    fmad z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmla z2.s, z0.s, z1.s[3]
+; CHECK-NEXT:    str z2, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <8 x float>, ptr %a
   %ld.b = load <8 x float>, ptr %b
@@ -200,13 +192,11 @@ define void @fmla_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmla_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmla_indexed_f64_256b_trn1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    ldr z2, [x2]
-; CHECK-NEXT:    trn1 z0.d, z0.d, z0.d
-; CHECK-NEXT:    fmad z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmla z2.d, z0.d, z1.d[0]
+; CHECK-NEXT:    str z2, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <4 x double>, ptr %a
   %ld.b = load <4 x double>, ptr %b
@@ -220,13 +210,11 @@ define void @fmla_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmla_indexed_f64_256b_trn2(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmla_indexed_f64_256b_trn2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    ldr z2, [x2]
-; CHECK-NEXT:    trn2 z0.d, z0.d, z0.d
-; CHECK-NEXT:    fmad z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmla z2.d, z0.d, z1.d[1]
+; CHECK-NEXT:    str z2, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <4 x double>, ptr %a
   %ld.b = load <4 x double>, ptr %b
@@ -242,10 +230,8 @@ define void @fmls_indexed_f16_256b(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr z0, [x0]
 ; CHECK-NEXT:    ldr z1, [x2]
-; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    dupq z2.h, z0.h[2]
-; CHECK-NEXT:    fmsb z0.h, p0/m, z2.h, z1.h
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmls z1.h, z0.h, z0.h[2]
+; CHECK-NEXT:    str z1, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <16 x half>, ptr %a
   %ld.b = load <16 x half>, ptr %b
@@ -314,13 +300,11 @@ define void @fmls_indexed_bf16_256b(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmls_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmls_indexed_f32_256b:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    ldr z2, [x2]
-; CHECK-NEXT:    dupq z0.s, z0.s[3]
-; CHECK-NEXT:    fmsb z0.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmls z2.s, z0.s, z1.s[3]
+; CHECK-NEXT:    str z2, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <8 x float>, ptr %a
   %ld.b = load <8 x float>, ptr %b
@@ -336,13 +320,11 @@ define void @fmls_indexed_f32_256b(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmls_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmls_indexed_f64_256b_trn1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    ldr z2, [x2]
-; CHECK-NEXT:    trn1 z0.d, z0.d, z0.d
-; CHECK-NEXT:    fmsb z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmls z2.d, z0.d, z1.d[0]
+; CHECK-NEXT:    str z2, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <4 x double>, ptr %a
   %ld.b = load <4 x double>, ptr %b
@@ -357,13 +339,11 @@ define void @fmls_indexed_f64_256b_trn1(ptr %a, ptr %b, ptr %c) #0 {
 define void @fmls_indexed_f64_256b_trn2(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fmls_indexed_f64_256b_trn2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x1]
-; CHECK-NEXT:    ldr z1, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr z0, [x0]
+; CHECK-NEXT:    ldr z1, [x1]
 ; CHECK-NEXT:    ldr z2, [x2]
-; CHECK-NEXT:    trn2 z0.d, z0.d, z0.d
-; CHECK-NEXT:    fmsb z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT:    str z0, [x2]
+; CHECK-NEXT:    fmls z2.d, z0.d, z1.d[1]
+; CHECK-NEXT:    str z2, [x2]
 ; CHECK-NEXT:    ret
   %ld.a = load <4 x double>, ptr %a
   %ld.b = load <4 x double>, ptr %b

>From 33c26b6bf061ecdf91d5cc1b4ef1b3371a3f8c4f Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Fri, 20 Jun 2025 10:36:54 +0000
Subject: [PATCH 3/3] Reformat ISel patterns

---
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 97 ++++++-------------
 1 file changed, 30 insertions(+), 67 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 8bed8e7751c62..bc2262d112b3d 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -438,27 +438,18 @@ def AArch64fabd_p : PatFrags<(ops node:$pg, node:$op1, node:$op2),
 def AArch64fmla_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                              [(AArch64fma_p node:$pg, node:$zn, node:$zm, node:$za)]>;
 
-def AArch64fmlaidx
-    : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
-               [(AArch64fmla_p(SVEAllActive), node:$acc, node:$op1,
-                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
-                (AArch64fmla_p(SVEAllActive), node:$acc,
-                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx),
-                    node:$op1)]>;
+def AArch64fmlaidx : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
+                              [(AArch64fmla_p (SVEAllActive), node:$acc, node:$op1, (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
+                               (AArch64fmla_p (SVEAllActive), node:$acc, (int_aarch64_sve_dup_laneq node:$op2, node:$idx), node:$op1)]>;
 
 def AArch64fmls_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                              [(int_aarch64_sve_fmls_u node:$pg, node:$za, node:$zn, node:$zm),
                               (AArch64fma_p node:$pg, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$zm, node:$za),
                               (AArch64fma_p node:$pg, node:$zm, (AArch64fneg_mt node:$pg, node:$zn, (undef)), node:$za)]>;
 
-def AArch64fmlsidx
-    : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
-               [(AArch64fmla_p(SVEAllActive), node:$acc,
-                    (AArch64fneg_mt(SVEAllActive), node:$op1, (undef)),
-                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
-                (AArch64fmla_p(SVEAllActive), node:$acc,
-                    (int_aarch64_sve_dup_laneq node:$op2, node:$idx),
-                    (AArch64fneg_mt(SVEAllActive), node:$op1, (undef)))]>;
+def AArch64fmlsidx : PatFrags<(ops node:$acc, node:$op1, node:$op2, node:$idx),
+                              [(AArch64fmla_p (SVEAllActive), node:$acc, (AArch64fneg_mt(SVEAllActive), node:$op1, (undef)), (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
+                               (AArch64fmla_p (SVEAllActive), node:$acc, (int_aarch64_sve_dup_laneq node:$op2, node:$idx), (AArch64fneg_mt(SVEAllActive), node:$op1, (undef)))]>;
 
 def AArch64fnmla_p : PatFrags<(ops node:$pg, node:$za, node:$zn, node:$zm),
                               [(int_aarch64_sve_fnmla_u node:$pg, node:$za, node:$zn, node:$zm),
@@ -579,12 +570,9 @@ def AArch64fmul : PatFrags<(ops node:$op1, node:$op2),
                             [(fmul node:$op1, node:$op2),
                              (AArch64fmul_p (SVEAllActive), node:$op1, node:$op2)]>;
 
-def AArch64fmulidx
-    : PatFrags<(ops node:$op1, node:$op2, node:$idx),
-               [(AArch64fmul node:$op1, (int_aarch64_sve_dup_laneq node:$op2,
-                                            node:$idx)),
-                (AArch64fmul(int_aarch64_sve_dup_laneq node:$op2, node:$idx),
-                    node:$op1)]>;
+def AArch64fmulidx : PatFrags<(ops node:$op1, node:$op2, node:$idx),
+                              [(AArch64fmul node:$op1, (int_aarch64_sve_dup_laneq node:$op2, node:$idx)),
+                               (AArch64fmul (int_aarch64_sve_dup_laneq node:$op2, node:$idx), node:$op1)]>;
 
 def AArch64fsub : PatFrags<(ops node:$op1, node:$op2),
                             [(fsub node:$op1, node:$op2),
@@ -903,65 +891,43 @@ let Predicates = [HasSVE_or_SME] in {
   defm FMUL_ZZZI   : sve_fp_fmul_by_indexed_elem<"fmul", int_aarch64_sve_fmul_lane>;
 
   // Fold segmented lane splats in where possible.
-  def : Pat<(nxv8f16(AArch64fmulidx nxv8f16:$L, nxv8f16:$R,
-                VectorIndexH32b_timm:$Idx)),
+  def : Pat<(nxv8f16 (AArch64fmulidx nxv8f16:$L, nxv8f16:$R, VectorIndexH32b_timm:$Idx)),
             (FMUL_ZZZI_H $L, $R, $Idx)>;
-  def : Pat<(nxv8f16(AArch64fmlaidx nxv8f16:$Acc, nxv8f16:$L, nxv8f16:$R,
-                VectorIndexH32b_timm:$Idx)),
+  def : Pat<(nxv8f16 (AArch64fmlaidx nxv8f16:$Acc, nxv8f16:$L, nxv8f16:$R, VectorIndexH32b_timm:$Idx)),
             (FMLA_ZZZI_H $Acc, $L, $R, $Idx)>;
-  def : Pat<(nxv8f16(AArch64fmlsidx nxv8f16:$Acc, nxv8f16:$L, nxv8f16:$R,
-                VectorIndexH32b_timm:$Idx)),
+  def : Pat<(nxv8f16 (AArch64fmlsidx nxv8f16:$Acc, nxv8f16:$L, nxv8f16:$R, VectorIndexH32b_timm:$Idx)),
             (FMLS_ZZZI_H $Acc, $L, $R, $Idx)>;
-  def : Pat<(nxv4f32(AArch64fmulidx nxv4f32:$L, nxv4f32:$R,
-                VectorIndexS32b_timm:$Idx)),
+  def : Pat<(nxv4f32 (AArch64fmulidx nxv4f32:$L, nxv4f32:$R, VectorIndexS32b_timm:$Idx)),
             (FMUL_ZZZI_S $L, $R, $Idx)>;
-  def : Pat<(nxv4f32(AArch64fmlaidx nxv4f32:$Acc, nxv4f32:$L, nxv4f32:$R,
-                VectorIndexS32b_timm:$Idx)),
+  def : Pat<(nxv4f32 (AArch64fmlaidx nxv4f32:$Acc, nxv4f32:$L, nxv4f32:$R, VectorIndexS32b_timm:$Idx)),
             (FMLA_ZZZI_S $Acc, $L, $R, $Idx)>;
-  def : Pat<(nxv4f32(AArch64fmlsidx nxv4f32:$Acc, nxv4f32:$L, nxv4f32:$R,
-                VectorIndexS32b_timm:$Idx)),
+  def : Pat<(nxv4f32 (AArch64fmlsidx nxv4f32:$Acc, nxv4f32:$L, nxv4f32:$R, VectorIndexS32b_timm:$Idx)),
             (FMLS_ZZZI_S $Acc, $L, $R, $Idx)>;
 
   // 64B segmented lane splats currently end up as trn instructions instead.
-  def : Pat<(nxv2f64(AArch64fmul nxv2f64:$L, (AArch64trn1 nxv2f64:$R,
-                                                 nxv2f64:$R))),
+  def : Pat<(nxv2f64 (AArch64fmul nxv2f64:$L, (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
             (FMUL_ZZZI_D $L, $R, 0)>;
-  def : Pat<(nxv2f64(AArch64fmul(AArch64trn1 nxv2f64:$R, nxv2f64:$R),
-                nxv2f64:$L)),
+  def : Pat<(nxv2f64 (AArch64fmul (AArch64trn1 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
             (FMUL_ZZZI_D $L, $R, 0)>;
-  def : Pat<(nxv2f64(AArch64fmul nxv2f64:$L, (AArch64trn2 nxv2f64:$R,
-                                                 nxv2f64:$R))),
+  def : Pat<(nxv2f64 (AArch64fmul nxv2f64:$L, (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
             (FMUL_ZZZI_D $L, $R, 1)>;
-  def : Pat<(nxv2f64(AArch64fmul(AArch64trn2 nxv2f64:$R, nxv2f64:$R),
-                nxv2f64:$L)),
+  def : Pat<(nxv2f64 (AArch64fmul (AArch64trn2 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
             (FMUL_ZZZI_D $L, $R, 1)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc, nxv2f64:$L,
-                (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, nxv2f64:$L, (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
             (FMLA_ZZZI_D $Acc, $L, $R, 0)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
-                (AArch64trn1 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, (AArch64trn1 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
             (FMLA_ZZZI_D $Acc, $L, $R, 0)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc, nxv2f64:$L,
-                (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, nxv2f64:$L, (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
             (FMLA_ZZZI_D $Acc, $L, $R, 1)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
-                (AArch64trn2 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, (AArch64trn2 nxv2f64:$R, nxv2f64:$R), nxv2f64:$L)),
             (FMLA_ZZZI_D $Acc, $L, $R, 1)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
-                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)),
-                (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, (AArch64fneg_mt (SVEAllActive), nxv2f64:$L, (undef)), (AArch64trn1 nxv2f64:$R, nxv2f64:$R))),
             (FMLS_ZZZI_D $Acc, $L, $R, 0)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
-                (AArch64trn1 nxv2f64:$R, nxv2f64:$R),
-                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)))),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, (AArch64trn1 nxv2f64:$R, nxv2f64:$R),(AArch64fneg_mt (SVEAllActive), nxv2f64:$L, (undef)))),
             (FMLS_ZZZI_D $Acc, $L, $R, 0)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
-                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)),
-                (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, (AArch64fneg_mt (SVEAllActive), nxv2f64:$L, (undef)), (AArch64trn2 nxv2f64:$R, nxv2f64:$R))),
             (FMLS_ZZZI_D $Acc, $L, $R, 1)>;
-  def : Pat<(nxv2f64(AArch64fmla_p(SVEAllActive), nxv2f64:$Acc,
-                (AArch64trn2 nxv2f64:$R, nxv2f64:$R),
-                (AArch64fneg_mt(SVEAllActive), nxv2f64:$L, (undef)))),
+  def : Pat<(nxv2f64 (AArch64fmla_p (SVEAllActive), nxv2f64:$Acc, (AArch64trn2 nxv2f64:$R, nxv2f64:$R), (AArch64fneg_mt (SVEAllActive), nxv2f64:$L, (undef)))),
             (FMLS_ZZZI_D $Acc, $L, $R, 1)>;
 } // End HasSVE_or_SME
 
@@ -4443,14 +4409,11 @@ defm BFMUL_ZZZI : sve_fp_fmul_by_indexed_elem_bfloat<"bfmul", int_aarch64_sve_fm
 defm BFCLAMP_ZZZ : sve_fp_clamp_bfloat<"bfclamp", AArch64fclamp>;
 
 // Fold segmented lane splats in where possible.
-def : Pat<(nxv8bf16(AArch64fmulidx nxv8bf16:$L, nxv8bf16:$R,
-              VectorIndexH32b_timm:$Idx)),
+def : Pat<(nxv8bf16 (AArch64fmulidx nxv8bf16:$L, nxv8bf16:$R, VectorIndexH32b_timm:$Idx)),
           (BFMUL_ZZZI $L, $R, $Idx)>;
-def : Pat<(nxv8bf16(AArch64fmlaidx nxv8bf16:$Acc, nxv8bf16:$L, nxv8bf16:$R,
-              VectorIndexH32b_timm:$Idx)),
+def : Pat<(nxv8bf16 (AArch64fmlaidx nxv8bf16:$Acc, nxv8bf16:$L, nxv8bf16:$R, VectorIndexH32b_timm:$Idx)),
           (BFMLA_ZZZI $Acc, $L, $R, $Idx)>;
-def : Pat<(nxv8bf16(AArch64fmlsidx nxv8bf16:$Acc, nxv8bf16:$L, nxv8bf16:$R,
-              VectorIndexH32b_timm:$Idx)),
+def : Pat<(nxv8bf16 (AArch64fmlsidx nxv8bf16:$Acc, nxv8bf16:$L, nxv8bf16:$R, VectorIndexH32b_timm:$Idx)),
           (BFMLS_ZZZI $Acc, $L, $R, $Idx)>;
 } // End HasSVEB16B16
 



More information about the llvm-commits mailing list