[llvm] [clang] [SME2] Add LUTI2 and LUTI4 double Builtins and Intrinsics (PR #73305)
Matthew Devereau via cfe-commits
cfe-commits at lists.llvm.org
Wed Nov 29 10:23:42 PST 2023
https://github.com/MDevereau updated https://github.com/llvm/llvm-project/pull/73305
>From 5aba2f1d2fe34f721a8e85eef6eecc25cb60851f Mon Sep 17 00:00:00 2001
From: Matt Devereau <matthew.devereau at arm.com>
Date: Mon, 20 Nov 2023 15:50:28 +0000
Subject: [PATCH 1/5] [SME2] Add LUTI2 and LUTI4 double Builtins and Intrinsics
See https://github.com/ARM-software/acle/pull/217
Patch by: Hassnaa Hamdi <hassnaa.hamdi at arm.com>
---
clang/include/clang/Basic/arm_sme.td | 8 +
.../acle_sme2_luti2_lane_zt_x2.c | 153 ++++++++++++++++++
.../acle_sme2_luti4_lane_zt_x2.c | 153 ++++++++++++++++++
.../aarch64-sme2-intrinsics/acle_sme2_imm.cpp | 34 ++++
llvm/include/llvm/IR/IntrinsicsAArch64.td | 10 ++
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 63 ++++++++
.../Target/AArch64/AArch64RegisterInfo.cpp | 6 +
llvm/lib/Target/AArch64/SMEInstrFormats.td | 1 +
.../AArch64/sme2-intrinsics-luti2-lane-x2.ll | 35 ++++
.../AArch64/sme2-intrinsics-luti4-lane-x2.ll | 35 ++++
10 files changed, 498 insertions(+)
create mode 100644 clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c
create mode 100644 clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c
create mode 100644 clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp
create mode 100644 llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
create mode 100644 llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td
index b5655afdf419ecf..c7b9dbacf071c43 100644
--- a/clang/include/clang/Basic/arm_sme.td
+++ b/clang/include/clang/Basic/arm_sme.td
@@ -298,3 +298,11 @@ multiclass ZAAddSub<string n_suffix> {
defm SVADD : ZAAddSub<"add">;
defm SVSUB : ZAAddSub<"sub">;
+
+//
+// lookup table expand two contiguous registers
+//
+let TargetGuard = "sme2" in {
+ def SVLUTI2_LANE_ZT_X2 : Inst<"svluti2_lane_zt[_{d}]_x2", "2.dmdm", "cUcsUsiUi", MergeNone, "aarch64_sme_luti2_lane_zt_x2", [IsStreaming, IsSharedZA, IsPreservesZA], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_7>]>;
+ def SVLUTI4_LANE_ZT_X2 : Inst<"svluti4_lane_zt[_{d}]_x2", "2.dmdm", "cUcsUsiUi", MergeNone, "aarch64_sme_luti4_lane_zt_x2", [IsStreaming, IsSharedZA, IsPreservesZA], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_3>]>;
+}
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c
new file mode 100644
index 000000000000000..51adc31c6968e7e
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c
@@ -0,0 +1,153 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+
+// REQUIRES: aarch64-registered-target
+
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+
+#include <arm_sme_draft_spec_subject_to_change.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
+#endif
+
+
+// CHECK-LABEL: @test_svluti2_lane_zt_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z23test_svluti2_lane_zt_u8u11__SVUint8_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+svuint8x2_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_u8,_x2,)(0, zn, 0);
+}
+
+
+// CHECK-LABEL: @test_svluti2_lane_zt_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z23test_svluti2_lane_zt_s8u10__SVInt8_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+svint8x2_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_s8,_x2,)(0, zn, 0);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_u16u12__SVUint16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+svuint16x2_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_u16,_x2,)(0, zn, 7);
+}
+
+
+// CHECK-LABEL: @test_svluti2_lane_zt_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_s16u11__SVInt16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+svint16x2_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_s16,_x2,)(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_u32u12__SVUint32_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+svuint32x2_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_u32,_x2,)(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_s32u11__SVInt32_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+svint32x2_t test_svluti2_lane_zt_s32(svint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_s32,_x2,)(0, zn, 7);
+}
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c
new file mode 100644
index 000000000000000..51adc31c6968e7e
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c
@@ -0,0 +1,153 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+
+// REQUIRES: aarch64-registered-target
+
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
+// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
+
+#include <arm_sme_draft_spec_subject_to_change.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
+#endif
+
+
+// CHECK-LABEL: @test_svluti2_lane_zt_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z23test_svluti2_lane_zt_u8u11__SVUint8_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+svuint8x2_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_u8,_x2,)(0, zn, 0);
+}
+
+
+// CHECK-LABEL: @test_svluti2_lane_zt_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z23test_svluti2_lane_zt_s8u10__SVInt8_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], i64 16)
+// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
+//
+svint8x2_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_s8,_x2,)(0, zn, 0);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_u16u12__SVUint16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+svuint16x2_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_u16,_x2,)(0, zn, 7);
+}
+
+
+// CHECK-LABEL: @test_svluti2_lane_zt_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_s16u11__SVInt16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
+//
+svint16x2_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_s16,_x2,)(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_u32u12__SVUint32_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+svuint32x2_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_u32,_x2,)(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_s32u11__SVInt32_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4)
+// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
+//
+svint32x2_t test_svluti2_lane_zt_s32(svint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return SVE_ACLE_FUNC(svluti2_lane_zt,_s32,_x2,)(0, zn, 7);
+}
diff --git a/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp
new file mode 100644
index 000000000000000..b3d2cffb375237b
--- /dev/null
+++ b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp
@@ -0,0 +1,34 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu \
+// RUN: -target-feature +sve2 -target-feature +sme2 -target-feature +sme-i16i64 -target-feature +sme-f64f64 -fsyntax-only -verify %s
+// REQUIRES: aarch64-registered-target
+#include <arm_sme_draft_spec_subject_to_change.h>
+
+void test_svluti2_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u32) __arm_streaming __arm_shared_za __arm_preserves_za {
+ // Test Reg Offset
+ svluti2_lane_zt_u8_x2(1, zn_u8, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti2_lane_zt_u8_x2(0, zn_u8, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ // Test Reg Offset
+ svluti2_lane_zt_u16_x2(1, zn_u16, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti2_lane_zt_u16_x2(0, zn_u16, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ // Test Reg Offset
+ svluti2_lane_zt_u32_x2(1, zn_u32, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti2_lane_zt_u32_x2(0, zn_u32, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+}
+
+void test_svluti4_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u32) __arm_streaming __arm_shared_za __arm_preserves_za {
+ // Test Reg Offset
+ svluti4_lane_zt_u8_x2(1, zn_u8, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti4_lane_zt_u8_x2(0, zn_u8, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
+ // Test Reg Offset
+ svluti4_lane_zt_u16_x2(1, zn_u16, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti4_lane_zt_u16_x2(0, zn_u16, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
+ // Test Reg Offset
+ svluti4_lane_zt_u32_x2(1, zn_u32, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti4_lane_zt_u32_x2(0, zn_u32, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 1b701a91455c946..2b80b56a055d657 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -3459,4 +3459,14 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_sme_ldr_zt : SME_LDR_STR_ZT_Intrinsic;
def int_aarch64_sme_str_zt : SME_LDR_STR_ZT_Intrinsic;
+ //
+ // Lookup table expand two registers
+ //
+ def int_aarch64_sme_luti2_lane_zt_x2
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_i32_ty, LLVMMatchType<0>, llvm_i32_ty],
+ [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrReadMem]>;
+ def int_aarch64_sme_luti4_lane_zt_x2
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_i32_ty, LLVMMatchType<0>, llvm_i32_ty],
+ [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrReadMem]>;
+
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 7617dccdeee397f..455a966ef08833f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -335,6 +335,20 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
return false;
}
+ template <unsigned BaseReg, unsigned Max>
+ bool ImmToTile(SDValue N, SDValue &Imm) {
+ if (auto *CI = dyn_cast<ConstantSDNode>(N)) {
+ uint64_t C = CI->getZExtValue();
+
+ if (C > Max)
+ return false;
+
+ Imm = CurDAG->getRegister(BaseReg + C, MVT::Other);
+ return true;
+ }
+ return false;
+ }
+
/// Form sequences of consecutive 64/128-bit registers for use in NEON
/// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
/// between 1 and 4 elements. If it contains a single element that is returned
@@ -399,6 +413,9 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
}
+ template <int64_t Max>
+ void SelectMultiVectorLuti(SDNode *Node, unsigned NumOutVecs, unsigned Opc);
+
template <unsigned MaxIdx, unsigned Scale>
bool SelectSMETileSlice(SDValue N, SDValue &Vector, SDValue &Offset) {
return SelectSMETileSlice(N, MaxIdx, Vector, Offset, Scale);
@@ -1859,6 +1876,34 @@ void AArch64DAGToDAGISel::SelectFrintFromVT(SDNode *N, unsigned NumVecs,
SelectUnaryMultiIntrinsic(N, NumVecs, true, Opcode);
}
+template <int64_t Max>
+void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node,
+ unsigned NumOutVecs,
+ unsigned Opc) {
+ if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Node->getOperand(4)))
+ if (Imm->getZExtValue() > Max)
+ return;
+
+ SDValue ZtValue;
+ ImmToTile<AArch64::ZT0, 0>(Node->getOperand(2), ZtValue);
+ SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4)};
+ SDLoc DL(Node);
+ EVT VT = Node->getValueType(0);
+
+ SDNode *Instruction =
+ CurDAG->getMachineNode(Opc, DL, {MVT::Untyped, MVT::Other}, Ops);
+ SDValue SuperReg = SDValue(Instruction, 0);
+
+ for (unsigned i = 0; i < NumOutVecs; ++i)
+ ReplaceUses(SDValue(Node, i), CurDAG->getTargetExtractSubreg(
+ AArch64::zsub0 + i, DL, VT, SuperReg));
+
+ // Copy chain
+ unsigned ChainIdx = NumOutVecs;
+ ReplaceUses(SDValue(Node, ChainIdx), SDValue(Instruction, 1));
+ CurDAG->RemoveDeadNode(Node);
+}
+
void AArch64DAGToDAGISel::SelectClamp(SDNode *N, unsigned NumVecs,
unsigned Op) {
SDLoc DL(N);
@@ -5055,6 +5100,24 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
return;
}
+ case Intrinsic::aarch64_sme_luti2_lane_zt_x2: {
+ if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
+ Node->getValueType(0),
+ {AArch64::LUTI2_2ZTZI_B, AArch64::LUTI2_2ZTZI_H,
+ AArch64::LUTI2_2ZTZI_S}))
+ // Second Immediate must be <= 7:
+ SelectMultiVectorLuti<7>(Node, 2, Opc);
+ return;
+ }
+ case Intrinsic::aarch64_sme_luti4_lane_zt_x2: {
+ if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::AnyType>(
+ Node->getValueType(0),
+ {AArch64::LUTI4_2ZTZI_B, AArch64::LUTI4_2ZTZI_H,
+ AArch64::LUTI4_2ZTZI_S}))
+ // Second Immediate must be <= 3:
+ SelectMultiVectorLuti<3>(Node, 2, Opc);
+ return;
+ }
}
} break;
case ISD::INTRINSIC_WO_CHAIN: {
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index ed64a7b4984c17c..24ba9dd95004c6f 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -440,6 +440,12 @@ AArch64RegisterInfo::getStrictlyReservedRegs(const MachineFunction &MF) const {
Reserved.set(SubReg);
}
+ if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
+ for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
+ SubReg.isValid(); ++SubReg)
+ Reserved.set(*SubReg);
+ }
+
markSuperRegs(Reserved, AArch64::FPCR);
if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 6c9b1f11a4decde..78df126cbd32438 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -15,6 +15,7 @@ def imm_to_tile16 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAH0>", []>;
def imm_to_tile32 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAS0>", []>;
def imm_to_tile64 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAD0>", []>;
def imm_to_tile128 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAQ0>", []>;
+def imm_to_zt : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZT0, 0>", []>;
def tileslice8 : ComplexPattern<i32 , 2, "SelectSMETileSlice<15, 1>", []>;
def tileslice16 : ComplexPattern<i32 , 2, "SelectSMETileSlice<7, 1>", []>;
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
new file mode 100644
index 000000000000000..df658539ba8bf8d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 < %s | FileCheck %s
+
+; lookup table expand one register
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>} @luti2_i8(<vscale x 16 x i8> %x) {
+; CHECK-LABEL: luti2_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti2 { z0.b, z1.b }, zt0, z0[0]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> %x, i32 0)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %res
+}
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>} @luti2_i16(<vscale x 8 x i16> %x) {
+; CHECK-LABEL: luti2_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti2 { z0.h, z1.h }, zt0, z0[7]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> %x, i32 7)
+ ret {<vscale x 8 x i16>, <vscale x 8 x i16>} %res
+}
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>} @luti2_i32(<vscale x 4 x i32> %x) {
+; CHECK-LABEL: luti2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> %x, i32 7)
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>}%res
+}
+
+declare {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32, <vscale x 16 x i8>, i32)
+declare {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32, <vscale x 8 x i16>, i32)
+declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32, <vscale x 4 x i32>, i32)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
new file mode 100644
index 000000000000000..4aba45025a44515
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 < %s | FileCheck %s
+
+; lookup table expand one register
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>} @luti4_i8(<vscale x 16 x i8> %x) {
+; CHECK-LABEL: luti4_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti4 { z0.b, z1.b }, zt0, z0[0]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> %x, i32 0)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %res
+}
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>} @luti4_i16(<vscale x 8 x i16> %x) {
+; CHECK-LABEL: luti4_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti4 { z0.h, z1.h }, zt0, z0[3]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8i16(i32 0, <vscale x 8 x i16> %x, i32 3)
+ ret {<vscale x 8 x i16>, <vscale x 8 x i16>} %res
+}
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>} @luti4_i32(<vscale x 4 x i32> %x) {
+; CHECK-LABEL: luti4_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti4 { z0.s, z1.s }, zt0, z0[3]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> %x, i32 3)
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %res
+}
+
+declare {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv16i8(i32, <vscale x 16 x i8>, i32)
+declare {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8i16(i32, <vscale x 8 x i16>, i32)
+declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv4i32(i32, <vscale x 4 x i32>, i32)
>From 319c8e2f406f97a07b7b78b2c4cf581a447bb4e8 Mon Sep 17 00:00:00 2001
From: Matt Devereau <matthew.devereau at arm.com>
Date: Fri, 24 Nov 2023 11:05:47 +0000
Subject: [PATCH 2/5] Removed unused old ImmToTile function & clang format
---
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 11 +----------
llvm/lib/Target/AArch64/SMEInstrFormats.td | 10 +++++-----
2 files changed, 6 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 455a966ef08833f..c921dcab580286f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -326,15 +326,6 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
return false;
}
- template <unsigned BaseReg> bool ImmToTile(SDValue N, SDValue &Imm) {
- if (auto *CI = dyn_cast<ConstantSDNode>(N)) {
- uint64_t C = CI->getZExtValue();
- Imm = CurDAG->getRegister(BaseReg + C, MVT::Other);
- return true;
- }
- return false;
- }
-
template <unsigned BaseReg, unsigned Max>
bool ImmToTile(SDValue N, SDValue &Imm) {
if (auto *CI = dyn_cast<ConstantSDNode>(N)) {
@@ -5114,7 +5105,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
Node->getValueType(0),
{AArch64::LUTI4_2ZTZI_B, AArch64::LUTI4_2ZTZI_H,
AArch64::LUTI4_2ZTZI_S}))
- // Second Immediate must be <= 3:
+ // Second Immediate must be <= 3:
SelectMultiVectorLuti<3>(Node, 2, Opc);
return;
}
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 78df126cbd32438..61762bf01978415 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
-def imm_to_tile8 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAB0>", []>;
-def imm_to_tile16 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAH0>", []>;
-def imm_to_tile32 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAS0>", []>;
-def imm_to_tile64 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAD0>", []>;
-def imm_to_tile128 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAQ0>", []>;
+def imm_to_tile8 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAB0, 0>", []>;
+def imm_to_tile16 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAH0, 1>", []>;
+def imm_to_tile32 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAS0, 3>", []>;
+def imm_to_tile64 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAD0, 7>", []>;
+def imm_to_tile128 : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZAQ0, 15>", []>;
def imm_to_zt : ComplexPattern<i32, 1, "ImmToTile<AArch64::ZT0, 0>", []>;
def tileslice8 : ComplexPattern<i32 , 2, "SelectSMETileSlice<15, 1>", []>;
>From bb829f528e66264cdd6fb36b69ce3c5d74654a21 Mon Sep 17 00:00:00 2001
From: Matt Devereau <matthew.devereau at arm.com>
Date: Mon, 27 Nov 2023 16:12:00 +0000
Subject: [PATCH 3/5] Update patch to include changes introduced in
https://github.com/ARM-software/acle/pull/278
---
clang/include/clang/Basic/arm_sme.td | 4 +-
.../acle_sme2_luti2_lane_zt_x2.c | 88 +++++++++++++++----
.../acle_sme2_luti4_lane_zt_x2.c | 88 +++++++++++++++----
.../aarch64-sme2-intrinsics/acle_sme2_imm.cpp | 28 +++++-
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 3 +-
.../AArch64/sme2-intrinsics-luti2-lane-x2.ll | 32 ++++++-
.../AArch64/sme2-intrinsics-luti4-lane-x2.ll | 30 +++++++
7 files changed, 235 insertions(+), 38 deletions(-)
diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td
index c7b9dbacf071c43..2bc7454e3cedf21 100644
--- a/clang/include/clang/Basic/arm_sme.td
+++ b/clang/include/clang/Basic/arm_sme.td
@@ -303,6 +303,6 @@ defm SVSUB : ZAAddSub<"sub">;
// lookup table expand two contiguous registers
//
let TargetGuard = "sme2" in {
- def SVLUTI2_LANE_ZT_X2 : Inst<"svluti2_lane_zt[_{d}]_x2", "2.dmdm", "cUcsUsiUi", MergeNone, "aarch64_sme_luti2_lane_zt_x2", [IsStreaming, IsSharedZA, IsPreservesZA], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_7>]>;
- def SVLUTI4_LANE_ZT_X2 : Inst<"svluti4_lane_zt[_{d}]_x2", "2.dmdm", "cUcsUsiUi", MergeNone, "aarch64_sme_luti4_lane_zt_x2", [IsStreaming, IsSharedZA, IsPreservesZA], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_3>]>;
+ def SVLUTI2_LANE_ZT_X2 : Inst<"svluti2_lane_zt_{d}_x2", "2.dmdm", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti2_lane_zt_x2", [IsStreaming, IsSharedZA, IsPreservesZA], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_7>]>;
+ def SVLUTI4_LANE_ZT_X2 : Inst<"svluti4_lane_zt_{d}_x2", "2.dmdm", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti4_lane_zt_x2", [IsStreaming, IsSharedZA, IsPreservesZA], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_3>]>;
}
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c
index 51adc31c6968e7e..dcfe7b24f291a21 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti2_lane_zt_x2.c
@@ -4,20 +4,10 @@
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sme_draft_spec_subject_to_change.h>
-#ifdef SVE_OVERLOADED_FORMS
-// A simple used,unused... macro, long enough to represent any SVE builtin.
-#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
-#else
-#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
-#endif
-
-
// CHECK-LABEL: @test_svluti2_lane_zt_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
@@ -37,7 +27,7 @@
// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
//
svuint8x2_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_u8,_x2,)(0, zn, 0);
+ return svluti2_lane_zt_u8_x2(0, zn, 0);
}
@@ -60,7 +50,7 @@ svuint8x2_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_z
// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
//
svint8x2_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_s8,_x2,)(0, zn, 0);
+ return svluti2_lane_zt_s8_x2(0, zn, 0);
}
// CHECK-LABEL: @test_svluti2_lane_zt_u16(
@@ -82,7 +72,7 @@ svint8x2_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za
// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
//
svuint16x2_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_u16,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_u16_x2(0, zn, 7);
}
@@ -105,7 +95,51 @@ svuint16x2_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_share
// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
//
svint16x2_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_s16,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_s16_x2(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> poison, <vscale x 8 x half> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x half> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_f16u13__SVFloat16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> poison, <vscale x 8 x half> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x half> [[TMP4]]
+//
+svfloat16x2_t test_svluti2_lane_zt_f16(svfloat16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return svluti2_lane_zt_f16_x2(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> poison, <vscale x 8 x bfloat> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x bfloat> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z25test_svluti2_lane_zt_bf16u14__SVBfloat16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> poison, <vscale x 8 x bfloat> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x bfloat> [[TMP4]]
+//
+svbfloat16x2_t test_svluti2_lane_zt_bf16(svbfloat16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return svluti2_lane_zt_bf16_x2(0, zn, 7);
}
// CHECK-LABEL: @test_svluti2_lane_zt_u32(
@@ -127,7 +161,7 @@ svint16x2_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_
// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
//
svuint32x2_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_u32,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_u32_x2(0, zn, 7);
}
// CHECK-LABEL: @test_svluti2_lane_zt_s32(
@@ -149,5 +183,27 @@ svuint32x2_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_share
// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
//
svint32x2_t test_svluti2_lane_zt_s32(svint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_s32,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_s32_x2(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32 0, <vscale x 4 x float> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> poison, <vscale x 4 x float> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], i64 4)
+// CHECK-NEXT: ret <vscale x 8 x float> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_f32u13__SVFloat32_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32 0, <vscale x 4 x float> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> poison, <vscale x 4 x float> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], i64 4)
+// CPP-CHECK-NEXT: ret <vscale x 8 x float> [[TMP4]]
+//
+svfloat32x2_t test_svluti2_lane_zt_f32(svfloat32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return svluti2_lane_zt_f32_x2(0, zn, 7);
}
diff --git a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c
index 51adc31c6968e7e..dcfe7b24f291a21 100644
--- a/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c
+++ b/clang/test/CodeGen/aarch64-sme2-intrinsics/acle_sme2_luti4_lane_zt_x2.c
@@ -4,20 +4,10 @@
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
-// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
#include <arm_sme_draft_spec_subject_to_change.h>
-#ifdef SVE_OVERLOADED_FORMS
-// A simple used,unused... macro, long enough to represent any SVE builtin.
-#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
-#else
-#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
-#endif
-
-
// CHECK-LABEL: @test_svluti2_lane_zt_u8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
@@ -37,7 +27,7 @@
// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
//
svuint8x2_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_u8,_x2,)(0, zn, 0);
+ return svluti2_lane_zt_u8_x2(0, zn, 0);
}
@@ -60,7 +50,7 @@ svuint8x2_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_z
// CPP-CHECK-NEXT: ret <vscale x 32 x i8> [[TMP4]]
//
svint8x2_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_s8,_x2,)(0, zn, 0);
+ return svluti2_lane_zt_s8_x2(0, zn, 0);
}
// CHECK-LABEL: @test_svluti2_lane_zt_u16(
@@ -82,7 +72,7 @@ svint8x2_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za
// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
//
svuint16x2_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_u16,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_u16_x2(0, zn, 7);
}
@@ -105,7 +95,51 @@ svuint16x2_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_share
// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]]
//
svint16x2_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_s16,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_s16_x2(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> poison, <vscale x 8 x half> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x half> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_f16u13__SVFloat16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> poison, <vscale x 8 x half> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x half> @llvm.vector.insert.nxv16f16.nxv8f16(<vscale x 16 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x half> [[TMP4]]
+//
+svfloat16x2_t test_svluti2_lane_zt_f16(svfloat16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return svluti2_lane_zt_f16_x2(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> poison, <vscale x 8 x bfloat> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], i64 8)
+// CHECK-NEXT: ret <vscale x 16 x bfloat> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z25test_svluti2_lane_zt_bf16u14__SVBfloat16_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> poison, <vscale x 8 x bfloat> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x bfloat> @llvm.vector.insert.nxv16bf16.nxv8bf16(<vscale x 16 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], i64 8)
+// CPP-CHECK-NEXT: ret <vscale x 16 x bfloat> [[TMP4]]
+//
+svbfloat16x2_t test_svluti2_lane_zt_bf16(svbfloat16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return svluti2_lane_zt_bf16_x2(0, zn, 7);
}
// CHECK-LABEL: @test_svluti2_lane_zt_u32(
@@ -127,7 +161,7 @@ svint16x2_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_
// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
//
svuint32x2_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_u32,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_u32_x2(0, zn, 7);
}
// CHECK-LABEL: @test_svluti2_lane_zt_s32(
@@ -149,5 +183,27 @@ svuint32x2_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_share
// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]]
//
svint32x2_t test_svluti2_lane_zt_s32(svint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
- return SVE_ACLE_FUNC(svluti2_lane_zt,_s32,_x2,)(0, zn, 7);
+ return svluti2_lane_zt_s32_x2(0, zn, 7);
+}
+
+// CHECK-LABEL: @test_svluti2_lane_zt_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32 0, <vscale x 4 x float> [[ZN:%.*]], i32 7)
+// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 0
+// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> poison, <vscale x 4 x float> [[TMP1]], i64 0)
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 1
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], i64 4)
+// CHECK-NEXT: ret <vscale x 8 x float> [[TMP4]]
+//
+// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_f32u13__SVFloat32_t(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32 0, <vscale x 4 x float> [[ZN:%.*]], i32 7)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 0
+// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> poison, <vscale x 4 x float> [[TMP1]], i64 0)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 1
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x float> @llvm.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], i64 4)
+// CPP-CHECK-NEXT: ret <vscale x 8 x float> [[TMP4]]
+//
+svfloat32x2_t test_svluti2_lane_zt_f32(svfloat32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
+ return svluti2_lane_zt_f32_x2(0, zn, 7);
}
diff --git a/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp
index b3d2cffb375237b..e9d8821579120d6 100644
--- a/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp
+++ b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_imm.cpp
@@ -3,7 +3,7 @@
// REQUIRES: aarch64-registered-target
#include <arm_sme_draft_spec_subject_to_change.h>
-void test_svluti2_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u32) __arm_streaming __arm_shared_za __arm_preserves_za {
+void test_svluti2_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u32, svfloat16_t zn_f16, svbfloat16_t zn_bf16, svfloat32_t zn_f32) __arm_streaming __arm_shared_za __arm_preserves_za {
// Test Reg Offset
svluti2_lane_zt_u8_x2(1, zn_u8, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
// Test index value range
@@ -16,9 +16,21 @@ void test_svluti2_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u
svluti2_lane_zt_u32_x2(1, zn_u32, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
// Test index value range
svluti2_lane_zt_u32_x2(0, zn_u32, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ // Test Reg Offset
+ svluti2_lane_zt_f16_x2(1, zn_f16, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti2_lane_zt_f16_x2(0, zn_f16, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ // Test Reg Offset
+ svluti2_lane_zt_bf16_x2(1, zn_bf16, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti2_lane_zt_bf16_x2(0, zn_bf16, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ // Test Reg Offset
+ svluti2_lane_zt_f32_x2(1, zn_f32, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti2_lane_zt_f32_x2(0, zn_f32, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
}
-void test_svluti4_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u32) __arm_streaming __arm_shared_za __arm_preserves_za {
+void test_svluti4_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u32, svfloat16_t zn_f16, svbfloat16_t zn_bf16, svfloat32_t zn_f32) __arm_streaming __arm_shared_za __arm_preserves_za {
// Test Reg Offset
svluti4_lane_zt_u8_x2(1, zn_u8, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
// Test index value range
@@ -31,4 +43,16 @@ void test_svluti4_lane_zt_x2(svuint8_t zn_u8, svuint16_t zn_u16, svuint32_t zn_u
svluti4_lane_zt_u32_x2(1, zn_u32, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
// Test index value range
svluti4_lane_zt_u32_x2(0, zn_u32, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
+ // Test Reg Offset
+ svluti4_lane_zt_f16_x2(1, zn_f16, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti4_lane_zt_f16_x2(0, zn_f16, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
+ // Test Reg Offset
+ svluti4_lane_zt_bf16_x2(1, zn_bf16, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti4_lane_zt_bf16_x2(0, zn_bf16, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
+ // Test Reg Offset
+ svluti4_lane_zt_f32_x2(1, zn_f32, 2); // expected-error {{argument value 1 is outside the valid range [0, 0]}}
+ // Test index value range
+ svluti4_lane_zt_f32_x2(0, zn_f32, 4); // expected-error {{argument value 4 is outside the valid range [0, 3]}}
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index c921dcab580286f..29f7f64b9bc302b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1674,7 +1674,8 @@ static unsigned SelectOpcodeFromVT(EVT VT, ArrayRef<unsigned> Opcodes) {
return 0;
break;
case SelectTypeKind::FP:
- if (EltVT != MVT::f16 && EltVT != MVT::f32 && EltVT != MVT::f64)
+ if (EltVT != MVT::bf16 && EltVT != MVT::f16 && EltVT != MVT::f32 &&
+ EltVT != MVT::f64)
return 0;
break;
}
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
index df658539ba8bf8d..163349f1a67eb88 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
@@ -27,9 +27,39 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @luti2_i32(<vscale x 4 x i32> %x
; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
; CHECK-NEXT: ret
%res = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32 0, <vscale x 4 x i32> %x, i32 7)
- ret {<vscale x 4 x i32>, <vscale x 4 x i32>}%res
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %res
+}
+
+define {<vscale x 8 x half>, <vscale x 8 x half>} @luti2_f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: luti2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> %x, i32 7)
+ ret {<vscale x 8 x half>, <vscale x 8 x half>} %res
+}
+
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @luti2_bf16(<vscale x 8 x bfloat> %x) {
+; CHECK-LABEL: luti2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> %x, i32 7)
+ ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>} @luti2_f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: luti2_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32 0, <vscale x 4 x float> %x, i32 7)
+ ret {<vscale x 4 x float>, <vscale x 4 x float>} %res
}
declare {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv16i8(i32, <vscale x 16 x i8>, i32)
declare {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32, <vscale x 8 x i16>, i32)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32, <vscale x 4 x i32>, i32)
+declare {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32, <vscale x 8 x half>, i32)
+declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32, <vscale x 8 x bfloat>, i32)
+declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32, <vscale x 4 x float>, i32)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
index 4aba45025a44515..c3209f5025b765c 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
@@ -30,6 +30,36 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @luti4_i32(<vscale x 4 x i32> %x
ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %res
}
+define {<vscale x 8 x half>, <vscale x 8 x half>} @luti4_f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: luti4_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti4 { z0.h, z1.h }, zt0, z0[3]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> %x, i32 3)
+ ret {<vscale x 8 x half>, <vscale x 8 x half>} %res
+}
+
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @luti4_bf16(<vscale x 8 x bfloat> %x) {
+; CHECK-LABEL: luti4_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti4 { z0.h, z1.h }, zt0, z0[3]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> %x, i32 3)
+ ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>} @luti4_i32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: luti4_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: luti4 { z0.s, z1.s }, zt0, z0[3]
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv4f32(i32 0, <vscale x 4 x float> %x, i32 3)
+ ret {<vscale x 4 x float>, <vscale x 4 x float>} %res
+}
+
declare {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv16i8(i32, <vscale x 16 x i8>, i32)
declare {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8i16(i32, <vscale x 8 x i16>, i32)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv4i32(i32, <vscale x 4 x i32>, i32)
+declare {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8f16(i32, <vscale x 8 x half>, i32)
+declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv8bf16(i32, <vscale x 8 x bfloat>, i32)
+declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x2.nxv4f32(i32, <vscale x 4 x float>, i32)
>From b4dd9baaf486881d7b344dc62f38762b329f1b8a Mon Sep 17 00:00:00 2001
From: Matt Devereau <matthew.devereau at arm.com>
Date: Mon, 27 Nov 2023 16:16:27 +0000
Subject: [PATCH 4/5] Revert bf16 addition to SelectOpcodeFromVT
---
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 29f7f64b9bc302b..c921dcab580286f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1674,8 +1674,7 @@ static unsigned SelectOpcodeFromVT(EVT VT, ArrayRef<unsigned> Opcodes) {
return 0;
break;
case SelectTypeKind::FP:
- if (EltVT != MVT::bf16 && EltVT != MVT::f16 && EltVT != MVT::f32 &&
- EltVT != MVT::f64)
+ if (EltVT != MVT::f16 && EltVT != MVT::f32 && EltVT != MVT::f64)
return 0;
break;
}
>From 3cf42e34586e8f9abb1d124418724430e6d4e90e Mon Sep 17 00:00:00 2001
From: Matt Devereau <matthew.devereau at arm.com>
Date: Wed, 29 Nov 2023 18:13:42 +0000
Subject: [PATCH 5/5] Fix tests
---
.../CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll | 12 ++++++------
.../CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll | 8 ++++----
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
index 163349f1a67eb88..02521e2b04442ee 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti2-lane-x2.ll
@@ -31,25 +31,25 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @luti2_i32(<vscale x 4 x i32> %x
}
define {<vscale x 8 x half>, <vscale x 8 x half>} @luti2_f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: luti2_i32:
+; CHECK-LABEL: luti2_f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
+; CHECK-NEXT: luti2 { z0.h, z1.h }, zt0, z0[7]
; CHECK-NEXT: ret
%res = call {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32 0, <vscale x 8 x half> %x, i32 7)
ret {<vscale x 8 x half>, <vscale x 8 x half>} %res
}
define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @luti2_bf16(<vscale x 8 x bfloat> %x) {
-; CHECK-LABEL: luti2_i32:
+; CHECK-LABEL: luti2_bf16:
; CHECK: // %bb.0:
-; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
+; CHECK-NEXT: luti2 { z0.h, z1.h }, zt0, z0[7]
; CHECK-NEXT: ret
%res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32 0, <vscale x 8 x bfloat> %x, i32 7)
ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
define {<vscale x 4 x float>, <vscale x 4 x float>} @luti2_f32(<vscale x 4 x float> %x) {
-; CHECK-LABEL: luti2_i32:
+; CHECK-LABEL: luti2_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: luti2 { z0.s, z1.s }, zt0, z0[7]
; CHECK-NEXT: ret
@@ -61,5 +61,5 @@ declare {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti2.lane.zt
declare {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8i16(i32, <vscale x 8 x i16>, i32)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4i32(i32, <vscale x 4 x i32>, i32)
declare {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32, <vscale x 8 x half>, i32)
-declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8f16(i32, <vscale x 8 x bfloat>, i32)
+declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv8bf16(i32, <vscale x 8 x bfloat>, i32)
declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti2.lane.zt.x2.nxv4f32(i32, <vscale x 4 x float>, i32)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
index c3209f5025b765c..871092501441e4e 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x2.ll
@@ -31,7 +31,7 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @luti4_i32(<vscale x 4 x i32> %x
}
define {<vscale x 8 x half>, <vscale x 8 x half>} @luti4_f16(<vscale x 8 x half> %x) {
-; CHECK-LABEL: luti4_i16:
+; CHECK-LABEL: luti4_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: luti4 { z0.h, z1.h }, zt0, z0[3]
; CHECK-NEXT: ret
@@ -40,7 +40,7 @@ define {<vscale x 8 x half>, <vscale x 8 x half>} @luti4_f16(<vscale x 8 x half>
}
define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @luti4_bf16(<vscale x 8 x bfloat> %x) {
-; CHECK-LABEL: luti4_i16:
+; CHECK-LABEL: luti4_bf16:
; CHECK: // %bb.0:
; CHECK-NEXT: luti4 { z0.h, z1.h }, zt0, z0[3]
; CHECK-NEXT: ret
@@ -48,8 +48,8 @@ define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @luti4_bf16(<vscale x 8 x
ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
-define {<vscale x 4 x float>, <vscale x 4 x float>} @luti4_i32(<vscale x 4 x float> %x) {
-; CHECK-LABEL: luti4_i32:
+define {<vscale x 4 x float>, <vscale x 4 x float>} @luti4_f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: luti4_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: luti4 { z0.s, z1.s }, zt0, z0[3]
; CHECK-NEXT: ret
More information about the cfe-commits
mailing list