[clang] [llvm] [AArch64] Implement intrinsics for SME FP8 FMLAL/FMLALL (Indexed) (PR #118549)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 16 11:48:04 PST 2024
https://github.com/SpencerAbson updated https://github.com/llvm/llvm-project/pull/118549
>From 0e858a1a5f270020f20201e9ddf4736d45301b66 Mon Sep 17 00:00:00 2001
From: Spencer Abson <Spencer.Abson at arm.com>
Date: Sun, 15 Dec 2024 20:28:08 +0000
Subject: [PATCH 1/3] [AArch64] Implement intrinsics for SME FP8 FMAL/FMLALL
(indexed)
---
clang/include/clang/Basic/arm_sme.td | 14 ++
.../fp8-intrinsics/acle_sme2_fp8_mla.c | 129 ++++++++++++++++++
.../acle_sme2_fp8_imm.c | 34 +++++
.../acle_sme2_fp8_mla.c | 26 ++++
llvm/include/llvm/IR/IntrinsicsAArch64.td | 34 +++++
.../lib/Target/AArch64/AArch64SMEInstrInfo.td | 12 +-
llvm/lib/Target/AArch64/SMEInstrFormats.td | 58 +++++---
.../AArch64/sme2-fp8-intrinsics-mla.ll | 116 ++++++++++++++++
8 files changed, 399 insertions(+), 24 deletions(-)
create mode 100644 clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c
create mode 100644 clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mla.c
create mode 100644 llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll
diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td
index 7d506642ce2e8d..372cc1103d179b 100644
--- a/clang/include/clang/Basic/arm_sme.td
+++ b/clang/include/clang/Basic/arm_sme.td
@@ -859,11 +859,25 @@ let SMETargetGuard = "sme-lutv2" in {
let SMETargetGuard = "sme-f8f32" in {
def SVMOPA_FP8_ZA32 : Inst<"svmopa_za32[_mf8]_m_fpm", "viPPdd>", "m", MergeNone, "aarch64_sme_fp8_fmopa_za32",
[IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<0, ImmCheck0_3>]>;
+ // FMLALL (indexed)
+ def SVMLA_FP8_ZA32_VG4x1 : Inst<"svmla_lane_za32[_mf8]_vg4x1_fpm", "vmddi>", "m", MergeNone, "aarch64_sme_fp8_fmlall_lane_za32_vg4x1",
+ [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLA_FP8_ZA32_VG4x2 : Inst<"svmla_lane_za32[_mf8]_vg4x2_fpm", "vm2di>", "m", MergeNone, "aarch64_sme_fp8_fmlall_lane_za32_vg4x2",
+ [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLA_FP8_ZA16_VG4x4 : Inst<"svmla_lane_za32[_mf8]_vg4x4_fpm", "vm4di>", "m", MergeNone, "aarch64_sme_fp8_fmlall_lane_za32_vg4x4",
+ [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>;
}
let SMETargetGuard = "sme-f8f16" in {
def SVMOPA_FP8_ZA16 : Inst<"svmopa_za16[_mf8]_m_fpm", "viPPdd>", "m", MergeNone, "aarch64_sme_fp8_fmopa_za16",
[IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<0, ImmCheck0_1>]>;
+ // FMLAL (indexed)
+ def SVMLA_FP8_ZA16_VG2x1 : Inst<"svmla_lane_za16[_mf8]_vg2x1_fpm", "vmddi>", "m", MergeNone, "aarch64_sme_fp8_fmlal_lane_za16_vg2x1",
+ [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLA_FP8_ZA16_VG2x2 : Inst<"svmla_lane_za16[_mf8]_vg2x2_fpm", "vm2di>", "m", MergeNone, "aarch64_sme_fp8_fmlal_lane_za16_vg2x2",
+ [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLA_FP8_ZA16_VG2x4 : Inst<"svmla_lane_za16[_mf8]_vg2x4_fpm", "vm4di>", "m", MergeNone, "aarch64_sme_fp8_fmlal_lane_za16_vg2x4",
+ [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>;
}
} // let SVETargetGuard = InvalidMode
diff --git a/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c b/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c
new file mode 100644
index 00000000000000..722ccc9f5d5b0d
--- /dev/null
+++ b/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c
@@ -0,0 +1,129 @@
+
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: aarch64-registered-target
+
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSME_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSME_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+
+#include <arm_sme.h>
+
+#ifdef SME_OVERLOADED_FORMS
+#define SME_ACLE_FUNC(A1,A2_UNUSED,A3) A1##A3
+#else
+#define SME_ACLE_FUNC(A1,A2,A3) A1##A2##A3
+#endif
+
+// FMLAL (indexed)
+
+// CHECK-LABEL: define dso_local void @test_svmla_lane_za16_vg2x1(
+// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0)
+// CHECK-NEXT: ret void
+//
+// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za16_vg2x1ju13__SVMfloat8_tS_m(
+// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0:[0-9]+]] {
+// CPP-CHECK-NEXT: [[ENTRY:.*:]]
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0)
+// CPP-CHECK-NEXT: ret void
+//
+void test_svmla_lane_za16_vg2x1(uint32_t slice, svmfloat8_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") {
+ SME_ACLE_FUNC(svmla_lane_za16,_mf8,_vg2x1_fpm)(slice, zn, zm, 0, fpm);
+}
+
+// CHECK-LABEL: define dso_local void @test_svmla_lane_za16_vg2x2(
+// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15)
+// CHECK-NEXT: ret void
+//
+// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za16_vg2x2j13svmfloat8x2_tu13__SVMfloat8_tm(
+// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT: [[ENTRY:.*:]]
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15)
+// CPP-CHECK-NEXT: ret void
+//
+void test_svmla_lane_za16_vg2x2(uint32_t slice, svmfloat8x2_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") {
+ SME_ACLE_FUNC(svmla_lane_za16,_mf8,_vg2x2_fpm)(slice, zn, zm, 15, fpm);
+}
+
+// CHECK-LABEL: define dso_local void @test_svmla_lane_za16_vg2x4(
+// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7)
+// CHECK-NEXT: ret void
+//
+// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za16_vg2x4j13svmfloat8x4_tu13__SVMfloat8_tm(
+// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT: [[ENTRY:.*:]]
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7)
+// CPP-CHECK-NEXT: ret void
+//
+void test_svmla_lane_za16_vg2x4(uint32_t slice, svmfloat8x4_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") {
+ SME_ACLE_FUNC(svmla_lane_za16,_mf8,_vg2x4_fpm)(slice, zn, zm, 7, fpm);
+}
+
+// FMLALL (indexed)
+
+// CHECK-LABEL: define dso_local void @test_svmla_lane_za32_vg4x1(
+// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0)
+// CHECK-NEXT: ret void
+//
+// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za32_vg4x1ju13__SVMfloat8_tS_m(
+// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT: [[ENTRY:.*:]]
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0)
+// CPP-CHECK-NEXT: ret void
+//
+void test_svmla_lane_za32_vg4x1(uint32_t slice, svmfloat8_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") {
+ SME_ACLE_FUNC(svmla_lane_za32,_mf8,_vg4x1_fpm)(slice, zn, zm, 0, fpm);
+}
+
+// CHECK-LABEL: define dso_local void @test_svmla_lane_za32_vg4x2(
+// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15)
+// CHECK-NEXT: ret void
+//
+// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za32_vg4x2j13svmfloat8x2_tu13__SVMfloat8_tm(
+// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT: [[ENTRY:.*:]]
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15)
+// CPP-CHECK-NEXT: ret void
+//
+void test_svmla_lane_za32_vg4x2(uint32_t slice, svmfloat8x2_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") {
+ SME_ACLE_FUNC(svmla_lane_za32,_mf8,_vg4x2_fpm)(slice, zn, zm, 15, fpm);
+}
+
+// CHECK-LABEL: define dso_local void @test_svmla_lane_za32_vg4x4(
+// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7)
+// CHECK-NEXT: ret void
+//
+// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za32_vg4x4j13svmfloat8x4_tu13__SVMfloat8_tm(
+// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] {
+// CPP-CHECK-NEXT: [[ENTRY:.*:]]
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]])
+// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7)
+// CPP-CHECK-NEXT: ret void
+//
+void test_svmla_lane_za32_vg4x4(uint32_t slice, svmfloat8x4_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") {
+ SME_ACLE_FUNC(svmla_lane_za32,_mf8,_vg4x4_fpm)(slice, zn, zm, 7, fpm);
+}
\ No newline at end of file
diff --git a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c
index 62cad9cfa4c8fd..bea0b29bcc70a8 100644
--- a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c
+++ b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c
@@ -16,3 +16,37 @@ void test_svmopa(svbool_t pn, svbool_t pm, svmfloat8_t zn, svmfloat8_t zm,
// expected-error at +1 {{argument value 4 is outside the valid range [0, 3]}}
svmopa_za32_mf8_m_fpm(4, pn, pm, zn, zm, fpmr);
}
+
+void test_svmla(uint32_t slice, svmfloat8_t zn, svmfloat8x2_t znx2, svmfloat8x4_t znx4,
+ fpm_t fpmr) __arm_streaming __arm_inout("za") {
+ // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}}
+ svmla_lane_za16_mf8_vg2x1_fpm(slice, zn, zn, -1, fpmr);
+ // expected-error at +1 {{argument value 16 is outside the valid range [0, 15]}}
+ svmla_lane_za16_mf8_vg2x1_fpm(slice, zn, zn, 16, fpmr);
+
+ // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}}
+ svmla_lane_za16_mf8_vg2x2_fpm(slice, znx2, zn, -1, fpmr);
+ // expected-error at +1 {{argument value 16 is outside the valid range [0, 15]}}
+ svmla_lane_za16_mf8_vg2x2_fpm(slice, znx2, zn, 16, fpmr);
+
+ // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}}
+ svmla_lane_za16_mf8_vg2x4_fpm(slice, znx4, zn, -1, fpmr);
+ // expected-error at +1 {{argument value 16 is outside the valid range [0, 15]}}
+ svmla_lane_za16_mf8_vg2x4_fpm(slice, znx4, zn, 16, fpmr);
+
+ // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}}
+ svmla_lane_za32_mf8_vg4x1_fpm(slice, zn, zn, -1, fpmr);
+ // expected-error at +1 {{argument value 16 is outside the valid range [0, 15]}}
+ svmla_lane_za32_mf8_vg4x1_fpm(slice, zn, zn, 16, fpmr);
+
+ // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}}
+ svmla_lane_za32_mf8_vg4x2_fpm(slice, znx2, zn, -1, fpmr);
+ // expected-error at +1 {{argument value 16 is outside the valid range [0, 15]}}
+ svmla_lane_za32_mf8_vg4x2_fpm(slice, znx2, zn, 16, fpmr);
+
+ // expected-error at +1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}}
+ svmla_lane_za32_mf8_vg4x4_fpm(slice, znx4, zn, -1, fpmr);
+ // expected-error at +1 {{argument value 16 is outside the valid range [0, 15]}}
+ svmla_lane_za32_mf8_vg4x4_fpm(slice, znx4, zn, 16, fpmr);
+
+}
\ No newline at end of file
diff --git a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mla.c b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mla.c
new file mode 100644
index 00000000000000..25255f1f469b2b
--- /dev/null
+++ b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mla.c
@@ -0,0 +1,26 @@
+// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -verify -emit-llvm-only %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_sme.h>
+
+void test_svmla(uint32_t slice, svmfloat8_t zn, svmfloat8x2_t znx2, svmfloat8x4_t znx4,
+ fpm_t fpmr) __arm_streaming __arm_inout("za") {
+ // expected-error at +1 {{'svmla_lane_za16_mf8_vg2x1_fpm' needs target feature sme,sme-f8f16}}
+ svmla_lane_za16_mf8_vg2x1_fpm(slice, zn, zn, 0, fpmr);
+
+ // expected-error at +1 {{'svmla_lane_za16_mf8_vg2x2_fpm' needs target feature sme,sme-f8f16}}
+ svmla_lane_za16_mf8_vg2x2_fpm(slice, znx2, zn, 0, fpmr);
+
+ // expected-error at +1 {{'svmla_lane_za16_mf8_vg2x4_fpm' needs target feature sme,sme-f8f16}}
+ svmla_lane_za16_mf8_vg2x4_fpm(slice, znx4, zn, 0, fpmr);
+
+ // expected-error at +1 {{'svmla_lane_za32_mf8_vg4x1_fpm' needs target feature sme,sme-f8f32}}
+ svmla_lane_za32_mf8_vg4x1_fpm(slice, zn, zn, 0, fpmr);
+
+ // expected-error at +1 {{'svmla_lane_za32_mf8_vg4x2_fpm' needs target feature sme,sme-f8f32}}
+ svmla_lane_za32_mf8_vg4x2_fpm(slice, znx2, zn, 0, fpmr);
+
+ // expected-error at +1 {{'svmla_lane_za32_mf8_vg4x4_fpm' needs target feature sme,sme-f8f32}}
+ svmla_lane_za32_mf8_vg4x4_fpm(slice, znx4, zn, 0, fpmr);
+}
\ No newline at end of file
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index a26dff8d81951b..679d602695f095 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -4003,6 +4003,27 @@ let TargetPrefix = "aarch64" in {
llvm_nxv16i1_ty, llvm_nxv16i1_ty,
llvm_nxv16i8_ty, llvm_nxv16i8_ty],
[ImmArg<ArgIndex<0>>, IntrInaccessibleMemOnly, IntrHasSideEffects]>;
+
+ class SME_FP8_ZA_LANE_VGx1_Intrinsic
+ : DefaultAttrsIntrinsic<[], [llvm_i32_ty,
+ llvm_nxv16i8_ty,
+ llvm_nxv16i8_ty,
+ llvm_i32_ty],
+ [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<3>>]>;
+
+ class SME_FP8_ZA_LANE_VGx2_Intrinsic
+ : DefaultAttrsIntrinsic<[], [llvm_i32_ty,
+ llvm_nxv16i8_ty, llvm_nxv16i8_ty,
+ llvm_nxv16i8_ty,
+ llvm_i32_ty],
+ [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<4>>]>;
+
+ class SME_FP8_ZA_LANE_VGx4_Intrinsic
+ : DefaultAttrsIntrinsic<[], [llvm_i32_ty,
+ llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty,
+ llvm_nxv16i8_ty,
+ llvm_i32_ty],
+ [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<6>>]>;
//
// CVT from FP8 to half-precision/BFloat16 multi-vector
//
@@ -4029,4 +4050,17 @@ let TargetPrefix = "aarch64" in {
// FP8 outer product
def int_aarch64_sme_fp8_fmopa_za16 : SME_FP8_OuterProduct_Intrinsic;
def int_aarch64_sme_fp8_fmopa_za32 : SME_FP8_OuterProduct_Intrinsic;
+
+ //
+ // ZA multiply-add
+ //
+ // Double-vector groups (F8F16)
+ def int_aarch64_sme_fp8_fmlal_lane_za16_vg2x1 : SME_FP8_ZA_LANE_VGx1_Intrinsic;
+ def int_aarch64_sme_fp8_fmlal_lane_za16_vg2x2 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
+ def int_aarch64_sme_fp8_fmlal_lane_za16_vg2x4 : SME_FP8_ZA_LANE_VGx4_Intrinsic;
+
+ // Quad-vector groups (F8F32)
+ def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1 : SME_FP8_ZA_LANE_VGx1_Intrinsic;
+ def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
+ def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4 : SME_FP8_ZA_LANE_VGx4_Intrinsic;
}
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index bc995269ba1bb0..4e52084d345e2f 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -994,9 +994,9 @@ defm FDOT_VG4_M4ZZ_BtoH : sme2_fp8_fdot_single_vg1x4<"fdot", 0b0110001, Matrix
defm FDOT_VG2_M2Z2Z_BtoH : sme2_fp8_fdot_multi_vg1x2 <"fdot", 0b0100100, MatrixOp16, int_aarch64_sme_fp8_fdot_multi_za16_vg1x2>;
defm FDOT_VG4_M4Z4Z_BtoH : sme2_fp8_fdot_multi_vg1x4 <"fdot", 0b0100100, MatrixOp16, int_aarch64_sme_fp8_fdot_multi_za16_vg1x4>;
-def FMLAL_MZZI_BtoH : sme2_mla_ll_array_index_16b<"fmlal", 0b11, 0b00>;
-defm FMLAL_VG2_M2ZZI_BtoH : sme2_multi_vec_array_vg2_index_16b<"fmlal", 0b10, 0b111>;
-defm FMLAL_VG4_M4ZZI_BtoH : sme2_multi_vec_array_vg4_index_16b<"fmlal", 0b10, 0b110>;
+defm FMLAL_MZZI_BtoH : sme2_fp8_fmlal_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x1>;
+defm FMLAL_VG2_M2ZZI_BtoH : sme2_fp8_fmlal_index_za16_vgx2<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x2>;
+defm FMLAL_VG4_M4ZZI_BtoH : sme2_fp8_fmlal_index_za16_vgx4<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x4>;
def FMLAL_VG2_MZZ_BtoH : sme2_mla_long_array_single_16b<"fmlal">;
defm FMLAL_VG2_M2ZZ_BtoH : sme2_fp_mla_long_array_vg2_single<"fmlal", 0b001, MatrixOp16, ZZ_b, ZPR4b8, nxv16i8, null_frag>;
@@ -1019,9 +1019,9 @@ defm FDOT_VG4_M4Z4Z_BtoS : sme2_fp8_fdot_multi_vg1x4 <"fdot", 0b0100110, MatrixO
defm FVDOTB_VG4_M2ZZI_BtoS : sme2_fp8_fdotv_index_za32_vg1x4<"fvdotb", 0b0, int_aarch64_sme_fp8_fvdotb_lane_za32_vg1x4>;
defm FVDOTT_VG4_M2ZZI_BtoS : sme2_fp8_fdotv_index_za32_vg1x4<"fvdott", 0b1, int_aarch64_sme_fp8_fvdott_lane_za32_vg1x4>;
-defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, null_frag>;
-defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, null_frag>;
-defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, null_frag>;
+defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1, [FPMR, FPCR]>;
+defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2, [FPMR, FPCR]>;
+defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4, [FPMR, FPCR]>;
defm FMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"fmlall", 0b01000, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, null_frag>;
defm FMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg24_single<"fmlall", 0b000001, MatrixOp32, ZZ_b, ZPR4b8>;
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 87e4a5805f0baf..9f0cbc427333e5 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -2920,7 +2920,7 @@ class sme2_multi_vec_array_vg24_index_16b<bits<2> sz, bit vg4, bits<3> op,
RegisterOperand multi_vector_ty, string mnemonic>
: I<(outs MatrixOp16:$ZAda),
(ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm2,
- multi_vector_ty:$Zn, ZPR4b8:$Zm, VectorIndexB:$i),
+ multi_vector_ty:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i),
mnemonic, "\t$ZAda[$Rv, $imm2, " # !if(vg4, "vgx4", "vgx2") # "], $Zn, $Zm$i",
"", []>, Sched<[]> {
bits<4> Zm;
@@ -2942,30 +2942,39 @@ class sme2_multi_vec_array_vg24_index_16b<bits<2> sz, bit vg4, bits<3> op,
let Constraints = "$ZAda = $_ZAda";
}
-multiclass sme2_multi_vec_array_vg2_index_16b<string mnemonic, bits<2> sz, bits<3>op> {
- def NAME : sme2_multi_vec_array_vg24_index_16b<sz, 0b0, op, ZZ_b_mul_r, mnemonic> {
+multiclass sme2_fp8_fmlal_index_za16_vgx2<string mnemonic, SDPatternOperator intrinsic> {
+ def NAME : sme2_multi_vec_array_vg24_index_16b<0b10, 0b0, 0b111, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
bits<4> Zn;
let Inst{9-6} = Zn;
- }
- def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i",
- (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv,
- uimm2s2range:$imm2, ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), 0>;
+ }
+ def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>;
+
+ def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>;
+
+ def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i",
+ (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm2,
+ ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>;
}
-multiclass sme2_multi_vec_array_vg4_index_16b<string mnemonic, bits<2>sz, bits<3>op> {
- def NAME: sme2_multi_vec_array_vg24_index_16b<sz, 0b1, op, ZZZZ_b_mul_r, mnemonic> {
+multiclass sme2_fp8_fmlal_index_za16_vgx4<string mnemonic, SDPatternOperator intrinsic> {
+ def NAME: sme2_multi_vec_array_vg24_index_16b<0b10, 0b1, 0b110, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
bits<3> Zn;
let Inst{9-7} = Zn;
let Inst{6} = 0b0;
}
- def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i",
- (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv,
- uimm2s2range:$imm2, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), 0>;
+
+ def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>;
+
+ def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>;
+
+ def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$i",
+ (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm,
+ ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>;
}
//===----------------------------------------------------------------------===//
-// SME2 multi-vec indexed long long MLA one source 16-bit
-class sme2_mla_ll_array_index_16b<string mnemonic, bits<2> sz,bits<2> op>
+// FMLAL (single and indexed vector, FP8 to FP16)
+class sme2_fp8_fmlal_index_za16<string mnemonic, bits<2> sz,bits<2> op>
: I<(outs MatrixOp16:$ZAda),
(ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm3s2range:$imm3, ZPR8:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i),
mnemonic, "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i",
@@ -2988,9 +2997,18 @@ class sme2_mla_ll_array_index_16b<string mnemonic, bits<2> sz,bits<2> op>
let Inst{3} = i{0};
let Inst{2-0} = imm3;
+ let Uses = [FPMR, FPCR];
let Constraints = "$ZAda = $_ZAda";
}
+multiclass sme2_fp8_fmlal_index_za16<string mnemonic, SDPatternOperator intrinsic> {
+ def NAME : sme2_fp8_fmlal_index_za16<mnemonic, 0b11, 0b00>, SMEPseudo2Instr<NAME, 1>;
+
+ def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm3s2range, ZPR8, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>;
+
+ def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME, intrinsic, uimm3s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange3s2>;
+}
+
// SME2 multi-vec indexed long long MLA one source 32-bit
class sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op>
: I<(outs MatrixOp32:$ZAda),
@@ -3016,8 +3034,10 @@ class sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op>
let Constraints = "$ZAda = $_ZAda";
}
-multiclass sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op, SDPatternOperator intrinsic> {
- def NAME : sme2_mla_ll_array_index_32b<mnemonic, sz, op>, SMEPseudo2Instr<NAME, 1>;
+multiclass sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op, SDPatternOperator intrinsic, list<Register> uses=[]> {
+ def NAME : sme2_mla_ll_array_index_32b<mnemonic, sz, op>, SMEPseudo2Instr<NAME, 1> {
+ let Uses = uses;
+ }
def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s4range, ZPR8, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>;
@@ -3087,10 +3107,11 @@ class sme2_mla_ll_array_vg24_index_32b<bits<2> sz, bit vg4, bits<3> op,
//SME2 multi-vec indexed long long MLA two sources 32-bit
-multiclass sme2_mla_ll_array_vg2_index_32b<string mnemonic, bits<2> sz, bits<3> op, SDPatternOperator intrinsic> {
+multiclass sme2_mla_ll_array_vg2_index_32b<string mnemonic, bits<2> sz, bits<3> op, SDPatternOperator intrinsic, list<Register> uses=[]> {
def NAME: sme2_mla_ll_array_vg24_index_32b<sz, 0b0, op, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
bits<4> Zn;
let Inst{9-6} = Zn;
+ let Uses = uses;
}
def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm1s4range, ZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>;
@@ -3103,11 +3124,12 @@ multiclass sme2_mla_ll_array_vg2_index_32b<string mnemonic, bits<2> sz, bits<3>
// SME2 multi-vec indexed long long MLA four sources 32-bit
-multiclass sme2_mla_ll_array_vg4_index_32b<string mnemonic, bits<2> sz, bits<4> op, SDPatternOperator intrinsic> {
+multiclass sme2_mla_ll_array_vg4_index_32b<string mnemonic, bits<2> sz, bits<4> op, SDPatternOperator intrinsic, list<Register> uses=[]> {
def NAME: sme2_mla_ll_array_vg24_index_32b<sz, 0b1, op{2-0}, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
bits<3> Zn;
let Inst{9-7} = Zn;
let Inst{6} = op{3};
+ let Uses = uses;
}
def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm1s4range, ZZZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>;
diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll
new file mode 100644
index 00000000000000..03fe7ccf0b9dfc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll
@@ -0,0 +1,116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "// kill:" --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme-f8f16,+sme-f8f32 -force-streaming < %s | FileCheck %s
+
+; FMLAL (indexed)
+
+define void @test_fmlal_vg2x1(i32 %slice, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) {
+; CHECK-LABEL: test_fmlal_vg2x1:
+; CHECK: // %bb.0:
+; CHECK: mov w8, w0
+; CHECK: fmlal za.h[w8, 0:1], z0.b, z1.b[0]
+; CHECK: fmlal za.h[w8, 14:15], z0.b, z1.b[15]
+; CHECK: ret
+ call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 %slice,
+ <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm,
+ i32 0)
+ %add = add i32 %slice, 14
+ call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 %add,
+ <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm,
+ i32 15)
+ ret void
+}
+
+define void @test_fmlal_vg2x2(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zm) {
+; CHECK-LABEL: test_fmlal_vg2x2:
+; CHECK: // %bb.0:
+; CHECK: mov w8, w0
+; CHECK: fmlal za.h[w8, 0:1, vgx2], { z0.b, z1.b }, z2.b[0]
+; CHECK: fmlal za.h[w8, 6:7, vgx2], { z0.b, z1.b }, z2.b[15]
+; CHECK: ret
+ call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 %slice,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1,
+ <vscale x 16 x i8> %zm,
+ i32 0)
+ %add = add i32 %slice, 6
+ call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 %add,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1,
+ <vscale x 16 x i8> %zm,
+ i32 15)
+ ret void
+}
+
+define void @test_fmlal_vg2x4(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zm) {
+; CHECK-LABEL: test_fmlal_vg2x4:
+; CHECK: // %bb.0:
+; CHECK: mov w8, w0
+; CHECK: fmlal za.h[w8, 0:1, vgx4], { z0.b - z3.b }, z4.b[0]
+; CHECK: fmlal za.h[w8, 6:7, vgx4], { z0.b - z3.b }, z4.b[15]
+; CHECK: ret
+ call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 %slice,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
+ <vscale x 16 x i8> %zm,
+ i32 0)
+ %add = add i32 %slice, 6
+ call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 %add,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
+ <vscale x 16 x i8> %zm,
+ i32 15)
+ ret void
+}
+
+; FMLALL (indexed)
+
+define void @test_fmlall_vg4x1(i32 %slice, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) {
+; CHECK-LABEL: test_fmlall_vg4x1:
+; CHECK: // %bb.0:
+; CHECK: mov w8, w0
+; CHECK: fmlall za.s[w8, 0:3], z0.b, z1.b[0]
+; CHECK: fmlall za.s[w8, 12:15], z0.b, z1.b[15]
+; CHECK: ret
+ call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 %slice,
+ <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm,
+ i32 0)
+ %add = add i32 %slice, 12
+ call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 %add,
+ <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm,
+ i32 15)
+ ret void
+}
+
+define void @test_fmlall_vg4x2(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zm) {
+; CHECK-LABEL: test_fmlall_vg4x2:
+; CHECK: // %bb.0:
+; CHECK: mov w8, w0
+; CHECK: fmlall za.s[w8, 0:3, vgx2], { z0.b, z1.b }, z2.b[0]
+; CHECK: fmlall za.s[w8, 4:7, vgx2], { z0.b, z1.b }, z2.b[15]
+; CHECK: ret
+ call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 %slice,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1,
+ <vscale x 16 x i8> %zm,
+ i32 0)
+ %add = add i32 %slice, 4
+ call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 %add,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1,
+ <vscale x 16 x i8> %zm,
+ i32 15)
+ ret void
+}
+
+define void @test_fmlall_vg4x4(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zm) {
+; CHECK-LABEL: test_fmlall_vg4x4:
+; CHECK: // %bb.0:
+; CHECK: mov w8, w0
+; CHECK: fmlall za.s[w8, 0:3, vgx4], { z0.b - z3.b }, z4.b[8]
+; CHECK: fmlall za.s[w8, 4:7, vgx4], { z0.b - z3.b }, z4.b[15]
+; CHECK: ret
+ call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 %slice,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
+ <vscale x 16 x i8> %zm,
+ i32 8)
+ %add = add i32 %slice, 4
+ call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 %add,
+ <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
+ <vscale x 16 x i8> %zm,
+ i32 15)
+ ret void
+}
\ No newline at end of file
>From 149258eb3a93db3749ea666af19845ad4218b9d0 Mon Sep 17 00:00:00 2001
From: Spencer Abson <Spencer.Abson at arm.com>
Date: Sun, 15 Dec 2024 20:57:37 +0000
Subject: [PATCH 2/3] Fixup tablegen
---
llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td | 4 ++--
llvm/lib/Target/AArch64/SMEInstrFormats.td | 7 ++++---
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index 4e52084d345e2f..1ed16553908430 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -1019,8 +1019,8 @@ defm FDOT_VG4_M4Z4Z_BtoS : sme2_fp8_fdot_multi_vg1x4 <"fdot", 0b0100110, MatrixO
defm FVDOTB_VG4_M2ZZI_BtoS : sme2_fp8_fdotv_index_za32_vg1x4<"fvdotb", 0b0, int_aarch64_sme_fp8_fvdotb_lane_za32_vg1x4>;
defm FVDOTT_VG4_M2ZZI_BtoS : sme2_fp8_fdotv_index_za32_vg1x4<"fvdott", 0b1, int_aarch64_sme_fp8_fvdott_lane_za32_vg1x4>;
-defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1, [FPMR, FPCR]>;
-defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2, [FPMR, FPCR]>;
+defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1, [FPMR, FPCR]>;
+defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2, [FPMR, FPCR]>;
defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4, [FPMR, FPCR]>;
defm FMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"fmlall", 0b01000, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, null_frag>;
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 9f0cbc427333e5..9708560438c7fe 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -2916,7 +2916,7 @@ multiclass sme2_multi_vec_array_vg4_index_64b<string mnemonic, bits<3> op,
}
// FMLAL (multiple and indexed vector, FP8 to FP16)
-class sme2_multi_vec_array_vg24_index_16b<bits<2> sz, bit vg4, bits<3> op,
+class sme2_fp8_fmlal_vg24_index_za16<bits<2> sz, bit vg4, bits<3> op,
RegisterOperand multi_vector_ty, string mnemonic>
: I<(outs MatrixOp16:$ZAda),
(ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm2,
@@ -2939,11 +2939,12 @@ class sme2_multi_vec_array_vg24_index_16b<bits<2> sz, bit vg4, bits<3> op,
let Inst{3-2} = i{1-0};
let Inst{1-0} = imm2;
+ let Uses = [FPMR, FPCR];
let Constraints = "$ZAda = $_ZAda";
}
multiclass sme2_fp8_fmlal_index_za16_vgx2<string mnemonic, SDPatternOperator intrinsic> {
- def NAME : sme2_multi_vec_array_vg24_index_16b<0b10, 0b0, 0b111, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
+ def NAME : sme2_fp8_fmlal_vg24_index_za16<0b10, 0b0, 0b111, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
bits<4> Zn;
let Inst{9-6} = Zn;
}
@@ -2957,7 +2958,7 @@ multiclass sme2_fp8_fmlal_index_za16_vgx2<string mnemonic, SDPatternOperator int
}
multiclass sme2_fp8_fmlal_index_za16_vgx4<string mnemonic, SDPatternOperator intrinsic> {
- def NAME: sme2_multi_vec_array_vg24_index_16b<0b10, 0b1, 0b110, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
+ def NAME: sme2_fp8_fmlal_vg24_index_za16<0b10, 0b1, 0b110, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> {
bits<3> Zn;
let Inst{9-7} = Zn;
let Inst{6} = 0b0;
>From b7e4be94a6247fe21d338e354fc7a94e8836586c Mon Sep 17 00:00:00 2001
From: Spencer Abson <Spencer.Abson at arm.com>
Date: Mon, 16 Dec 2024 19:41:23 +0000
Subject: [PATCH 3/3] [NFC] Refactor FODT intrinsic definitions
---
llvm/include/llvm/IR/IntrinsicsAArch64.td | 40 +++++++++--------------
1 file changed, 15 insertions(+), 25 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 679d602695f095..3dc0a6218fc362 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -3860,20 +3860,6 @@ def int_aarch64_neon_famin : AdvSIMD_2VectorArg_Intrinsic;
// SME FP8 FDOT intrinsics
let TargetPrefix = "aarch64" in {
-class SME2_FP8_FDOT_LANE_VG1x2 :
- DefaultAttrsIntrinsic<[], [llvm_i32_ty,
- llvm_nxv16i8_ty, llvm_nxv16i8_ty,
- llvm_nxv16i8_ty,
- llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<4>>]>;
-
-class SME2_FP8_FDOT_LANE_VG1x4 :
- DefaultAttrsIntrinsic<[], [llvm_i32_ty,
- llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty,
- llvm_nxv16i8_ty,
- llvm_i32_ty],
- [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<6>>]>;
-
class SME2_FP8_FDOT_SINGLE_VG1x2 :
DefaultAttrsIntrinsic<[], [llvm_i32_ty,
llvm_nxv16i8_ty, llvm_nxv16i8_ty,
@@ -3898,17 +3884,6 @@ class SME2_FP8_FDOT_MULTI_VG1x4 :
llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty],
[IntrInaccessibleMemOnly, IntrHasSideEffects]>;
- def int_aarch64_sme_fp8_fdot_lane_za16_vg1x2 : SME2_FP8_FDOT_LANE_VG1x2;
- def int_aarch64_sme_fp8_fdot_lane_za16_vg1x4 : SME2_FP8_FDOT_LANE_VG1x4;
-
- def int_aarch64_sme_fp8_fdot_lane_za32_vg1x2 : SME2_FP8_FDOT_LANE_VG1x2;
- def int_aarch64_sme_fp8_fdot_lane_za32_vg1x4 : SME2_FP8_FDOT_LANE_VG1x4;
-
- def int_aarch64_sme_fp8_fvdot_lane_za16_vg1x2 : SME2_FP8_FDOT_LANE_VG1x2;
-
- def int_aarch64_sme_fp8_fvdotb_lane_za32_vg1x4 : SME2_FP8_FDOT_LANE_VG1x2;
- def int_aarch64_sme_fp8_fvdott_lane_za32_vg1x4 : SME2_FP8_FDOT_LANE_VG1x2;
-
def int_aarch64_sme_fp8_fdot_single_za16_vg1x2 : SME2_FP8_FDOT_SINGLE_VG1x2;
def int_aarch64_sme_fp8_fdot_single_za16_vg1x4 : SME2_FP8_FDOT_SINGLE_VG1x4;
@@ -4063,4 +4038,19 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1 : SME_FP8_ZA_LANE_VGx1_Intrinsic;
def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4 : SME_FP8_ZA_LANE_VGx4_Intrinsic;
+
+ //
+ // FP8 FDOT intrinsics
+ //
+ // (indexed)
+ def int_aarch64_sme_fp8_fdot_lane_za16_vg1x2 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
+ def int_aarch64_sme_fp8_fdot_lane_za32_vg1x2 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
+
+ def int_aarch64_sme_fp8_fdot_lane_za16_vg1x4 : SME_FP8_ZA_LANE_VGx4_Intrinsic;
+ def int_aarch64_sme_fp8_fdot_lane_za32_vg1x4 : SME_FP8_ZA_LANE_VGx4_Intrinsic;
+
+ // FVDOT
+ def int_aarch64_sme_fp8_fvdot_lane_za16_vg1x2 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
+ def int_aarch64_sme_fp8_fvdotb_lane_za32_vg1x4 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
+ def int_aarch64_sme_fp8_fvdott_lane_za32_vg1x4 : SME_FP8_ZA_LANE_VGx2_Intrinsic;
}
More information about the llvm-commits
mailing list