[llvm-branch-commits] [clang] [llvm] release/18.x: [AArch64][SME] Implement inline-asm clobbers for za/zt0 (#79276) (PR #81593)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Feb 15 21:59:00 PST 2024
https://github.com/llvmbot updated https://github.com/llvm/llvm-project/pull/81593
>From 8b7b3fbe29051f5456334a9c6990e053fd3e59dc Mon Sep 17 00:00:00 2001
From: Matthew Devereau <matthew.devereau at arm.com>
Date: Fri, 2 Feb 2024 08:12:05 +0000
Subject: [PATCH] [AArch64][SME] Implement inline-asm clobbers for za/zt0
(#79276)
This enables specifing "za" or "zt0" to the clobber list for inline asm.
This complies with the acle SME addition to the asm extension here:
https://github.com/ARM-software/acle/pull/276
(cherry picked from commit d9c20e437fe110fb79b5ca73a52762e5b930b361)
---
clang/lib/Basic/Targets/AArch64.cpp | 9 ++++++++-
clang/test/CodeGen/aarch64-inline-asm.c | 8 ++++++++
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++++++
llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp | 4 ++++
llvm/test/CodeGen/AArch64/aarch64-za-clobber.ll | 16 ++++++++++++++++
5 files changed, 44 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AArch64/aarch64-za-clobber.ll
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 336b7a5e3d727d..3036f461c1ded1 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -1187,6 +1187,8 @@ TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
}
const char *const AArch64TargetInfo::GCCRegNames[] = {
+ // clang-format off
+
// 32-bit Integer registers
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
"w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
@@ -1223,7 +1225,12 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
// SVE predicate-as-counter registers
"pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
- "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
+ "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
+
+ // SME registers
+ "za", "zt0",
+
+ // clang-format on
};
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
diff --git a/clang/test/CodeGen/aarch64-inline-asm.c b/clang/test/CodeGen/aarch64-inline-asm.c
index 75e9a8c46b8769..8ddee560b11da4 100644
--- a/clang/test/CodeGen/aarch64-inline-asm.c
+++ b/clang/test/CodeGen/aarch64-inline-asm.c
@@ -95,3 +95,11 @@ void test_reduced_gpr_constraints(int var32, long var64) {
// CHECK: [[ARG2:%.+]] = load i64, ptr
// CHECK: call void asm sideeffect "add x0, x0, $0", "@3Ucj,~{x0}"(i64 [[ARG2]])
}
+
+void test_sme_constraints(){
+ asm("movt zt0[3, mul vl], z0" : : : "za");
+// CHECK: call void asm sideeffect "movt zt0[3, mul vl], z0", "~{za}"()
+
+ asm("movt zt0[3, mul vl], z0" : : : "zt0");
+// CHECK: call void asm sideeffect "movt zt0[3, mul vl], z0", "~{zt0}"()
+}
\ No newline at end of file
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e97f5e32201488..bfce5bc92a9ad1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10718,6 +10718,14 @@ AArch64TargetLowering::getRegForInlineAsmConstraint(
parseConstraintCode(Constraint) != AArch64CC::Invalid)
return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
+ if (Constraint == "{za}") {
+ return std::make_pair(unsigned(AArch64::ZA), &AArch64::MPRRegClass);
+ }
+
+ if (Constraint == "{zt0}") {
+ return std::make_pair(unsigned(AArch64::ZT0), &AArch64::ZTRRegClass);
+ }
+
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass *> Res;
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index f86e6947c9cdb0..48e1c1bc73022c 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -507,6 +507,10 @@ bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
return true;
+ // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
+ if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
+ return true;
+
return !isReservedReg(MF, PhysReg);
}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-za-clobber.ll b/llvm/test/CodeGen/AArch64/aarch64-za-clobber.ll
new file mode 100644
index 00000000000000..a8cba7dc9a91e9
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-za-clobber.ll
@@ -0,0 +1,16 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-linux-gnu -stop-after=aarch64-isel < %s -o - | FileCheck %s
+
+define void @alpha(<vscale x 4 x i32> %x) local_unnamed_addr {
+entry:
+; CHECK: INLINEASM &"movt zt0[3, mul vl], z0", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def early-clobber $za
+ tail call void asm sideeffect "movt zt0[3, mul vl], z0", "~{za}"()
+ ret void
+}
+
+define void @beta(<vscale x 4 x i32> %x) local_unnamed_addr {
+entry:
+; CHECK: INLINEASM &"movt zt0[3, mul vl], z0", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def early-clobber $zt0
+ tail call void asm sideeffect "movt zt0[3, mul vl], z0", "~{zt0}"()
+ ret void
+}
More information about the llvm-branch-commits
mailing list