[clang] d1ef4b0 - Revert "[AArch64] Improve TargetParser API"
Tomas Matheson via cfe-commits
cfe-commits at lists.llvm.org
Thu Dec 1 05:07:27 PST 2022
Author: Tomas Matheson
Date: 2022-12-01T13:06:54Z
New Revision: d1ef4b0a8da152fe4282f97c7c49f4930a3c66a2
URL: https://github.com/llvm/llvm-project/commit/d1ef4b0a8da152fe4282f97c7c49f4930a3c66a2
DIFF: https://github.com/llvm/llvm-project/commit/d1ef4b0a8da152fe4282f97c7c49f4930a3c66a2.diff
LOG: Revert "[AArch64] Improve TargetParser API"
Buildbots unhappy about constexpr function.
This reverts commit 450de8008bb0ccb5dfc9dd69b6f5b434158772bd.
Added:
Modified:
clang/lib/Basic/Targets/AArch64.cpp
clang/lib/Basic/Targets/AArch64.h
clang/lib/Driver/ToolChains/Arch/AArch64.cpp
llvm/include/llvm/Support/AArch64TargetParser.def
llvm/include/llvm/Support/AArch64TargetParser.h
llvm/include/llvm/Support/VersionTuple.h
llvm/lib/Support/AArch64TargetParser.cpp
llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
llvm/unittests/Support/TargetParserTest.cpp
Removed:
################################################################################
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index edc4fdca26378..c36e942cf46ac 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -45,6 +45,28 @@ const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
#include "clang/Basic/BuiltinsAArch64.def"
};
+static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
+ switch (Kind) {
+ case llvm::AArch64::ArchKind::ARMV9A:
+ case llvm::AArch64::ArchKind::ARMV9_1A:
+ case llvm::AArch64::ArchKind::ARMV9_2A:
+ case llvm::AArch64::ArchKind::ARMV9_3A:
+ case llvm::AArch64::ArchKind::ARMV9_4A:
+ return "9";
+ default:
+ return "8";
+ }
+}
+
+StringRef AArch64TargetInfo::getArchProfile() const {
+ switch (ArchKind) {
+ case llvm::AArch64::ArchKind::ARMV8R:
+ return "R";
+ default:
+ return "A";
+ }
+}
+
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), ABI("aapcs") {
@@ -148,7 +170,7 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
return Name == "generic" ||
- llvm::AArch64::parseCpu(Name).Arch != llvm::AArch64::INVALID;
+ llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
}
bool AArch64TargetInfo::setCPU(const std::string &Name) {
@@ -276,10 +298,8 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
- Builder.defineMacro("__ARM_ARCH",
- std::to_string(ArchInfo->Version.getMajor()));
- Builder.defineMacro("__ARM_ARCH_PROFILE",
- std::string("'") + (char)ArchInfo->Profile + "'");
+ Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
+ Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
Builder.defineMacro("__ARM_64BIT_STATE", "1");
Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
@@ -444,34 +464,52 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasD128)
Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
- if (*ArchInfo == llvm::AArch64::ARMV8_1A)
+ switch (ArchKind) {
+ default:
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_1A:
getTargetDefinesARMV81A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_2A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_2A:
getTargetDefinesARMV82A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_3A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_3A:
getTargetDefinesARMV83A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_4A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_4A:
getTargetDefinesARMV84A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_5A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_5A:
getTargetDefinesARMV85A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_6A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_6A:
getTargetDefinesARMV86A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_7A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_7A:
getTargetDefinesARMV87A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_8A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_8A:
getTargetDefinesARMV88A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV8_9A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_9A:
getTargetDefinesARMV89A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV9A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV9A:
getTargetDefinesARMV9A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV9_1A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV9_1A:
getTargetDefinesARMV91A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV9_2A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV9_2A:
getTargetDefinesARMV92A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV9_3A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV9_3A:
getTargetDefinesARMV93A(Opts, Builder);
- if (*ArchInfo == llvm::AArch64::ARMV9_4A)
+ break;
+ case llvm::AArch64::ArchKind::ARMV9_4A:
getTargetDefinesARMV94A(Opts, Builder);
+ break;
+ }
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
@@ -521,17 +559,17 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name, bool Enabled) const {
Features[Name] = Enabled;
- // If this "feature" is an architecture, also add features for all previous
- // architecture versions. In case of v9.x the v8.x counterparts are added too.
- const llvm::AArch64::ArchInfo &ArchInfo =
- llvm::AArch64::ArchInfo::findBySubArch(Name);
-
- if (ArchInfo == llvm::AArch64::INVALID)
- return; // Not an architecure, nothing more to do.
-
- for (const auto *OtherArch : llvm::AArch64::ArchInfos)
- if (ArchInfo.implies(*OtherArch))
- Features[OtherArch->getSubArch()] = Enabled;
+ llvm::AArch64::ArchKind AK = llvm::AArch64::getSubArchArchKind(Name);
+ // Add all previous architecture versions.
+ // In case of v9.x the v8.x counterparts are added too.
+ if ("9" == getArchVersionString(AK))
+ for (llvm::AArch64::ArchKind I = llvm::AArch64::convertV9toV8(AK);
+ I != llvm::AArch64::ArchKind::INVALID; --I)
+ Features[llvm::AArch64::getSubArch(I)] = Enabled;
+
+ for (llvm::AArch64::ArchKind I = --AK; I != llvm::AArch64::ArchKind::INVALID;
+ --I)
+ Features[llvm::AArch64::getSubArch(I)] = Enabled;
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
@@ -564,6 +602,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasD128 = false;
HasRCPC = false;
+ ArchKind = llvm::AArch64::ArchKind::INVALID;
+
for (const auto &Feature : Features) {
if (Feature == "+neon")
FPU |= NeonMode;
@@ -625,51 +665,38 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (Feature == "+strict-align")
HasUnaligned = false;
// All predecessor archs are added but select the latest one for ArchKind.
- if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
- ArchInfo = &llvm::AArch64::ARMV8A;
- if (Feature == "+v8.1a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_1A;
- if (Feature == "+v8.2a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_2A;
- if (Feature == "+v8.3a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_3A;
- if (Feature == "+v8.4a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_4A;
- if (Feature == "+v8.5a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_5A;
- if (Feature == "+v8.6a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_6A;
- if (Feature == "+v8.7a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_7A;
- if (Feature == "+v8.8a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_8A;
- if (Feature == "+v8.9a" &&
- ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
- ArchInfo = &llvm::AArch64::ARMV8_9A;
- if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
- ArchInfo = &llvm::AArch64::ARMV9A;
- if (Feature == "+v9.1a" &&
- ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
- ArchInfo = &llvm::AArch64::ARMV9_1A;
- if (Feature == "+v9.2a" &&
- ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
- ArchInfo = &llvm::AArch64::ARMV9_2A;
- if (Feature == "+v9.3a" &&
- ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
- ArchInfo = &llvm::AArch64::ARMV9_3A;
- if (Feature == "+v9.4a" &&
- ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
- ArchInfo = &llvm::AArch64::ARMV9_4A;
+ if (Feature == "+v8a" && ArchKind < llvm::AArch64::ArchKind::ARMV8A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8A;
+ if (Feature == "+v8.1a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_1A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
+ if (Feature == "+v8.2a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_2A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
+ if (Feature == "+v8.3a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_3A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
+ if (Feature == "+v8.4a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_4A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
+ if (Feature == "+v8.5a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_5A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
+ if (Feature == "+v8.6a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_6A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
+ if (Feature == "+v8.7a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_7A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
+ if (Feature == "+v8.8a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_8A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
+ if (Feature == "+v8.9a" && ArchKind < llvm::AArch64::ArchKind::ARMV8_9A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_9A;
+ if (Feature == "+v9a" && ArchKind < llvm::AArch64::ArchKind::ARMV9A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV9A;
+ if (Feature == "+v9.1a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_1A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
+ if (Feature == "+v9.2a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_2A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
+ if (Feature == "+v9.3a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_3A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
+ if (Feature == "+v9.4a" && ArchKind < llvm::AArch64::ArchKind::ARMV9_4A)
+ ArchKind = llvm::AArch64::ArchKind::ARMV9_4A;
if (Feature == "+v8r")
- ArchInfo = &llvm::AArch64::ARMV8R;
+ ArchKind = llvm::AArch64::ArchKind::ARMV8R;
if (Feature == "+fullfp16")
HasFullFP16 = true;
if (Feature == "+dotprod")
@@ -717,8 +744,8 @@ bool AArch64TargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
// Parse the CPU and add any implied features.
- const llvm::AArch64::ArchInfo &Arch = llvm::AArch64::parseCpu(CPU).Arch;
- if (Arch != llvm::AArch64::INVALID) {
+ llvm::AArch64::ArchKind Arch = llvm::AArch64::parseCPUArch(CPU);
+ if (Arch != llvm::AArch64::ArchKind::INVALID) {
uint64_t Exts = llvm::AArch64::getDefaultExtensions(CPU, Arch);
std::vector<StringRef> CPUFeats;
llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
@@ -779,13 +806,13 @@ ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
FoundArch = true;
std::pair<StringRef, StringRef> Split =
Feature.split("=").second.trim().split("+");
- const llvm::AArch64::ArchInfo &AI = llvm::AArch64::parseArch(Split.first);
+ llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseArch(Split.first);
// Parse the architecture version, adding the required features to
// Ret.Features.
- if (AI == llvm::AArch64::INVALID)
+ if (ArchKind == llvm::AArch64::ArchKind::INVALID)
continue;
- Ret.Features.push_back(AI.ArchFeature.str());
+ Ret.Features.push_back(llvm::AArch64::getArchFeature(ArchKind).str());
// Add any extra features, after the +
SplitAndAddFeatures(Split.second, Ret.Features);
} else if (Feature.startswith("cpu=")) {
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 2cbd02569df34..1791e462139f3 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -56,11 +56,12 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasD128;
bool HasRCPC;
- const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
+ llvm::AArch64::ArchKind ArchKind;
static const Builtin::Info BuiltinInfo[];
std::string ABI;
+ StringRef getArchProfile() const;
public:
AArch64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts);
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index fcbf2e08d12df..2a1269316bc75 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -70,7 +70,7 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
static bool DecodeAArch64Features(const Driver &D, StringRef text,
std::vector<StringRef> &Features,
- const llvm::AArch64::ArchInfo &ArchInfo) {
+ llvm::AArch64::ArchKind ArchKind) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
@@ -104,14 +104,14 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
// +sve implies +f32mm if the base architecture is >= v8.6A (except v9A)
// It isn't the case in general that sve implies both f64mm and f32mm
- if ((ArchInfo == llvm::AArch64::ARMV8_6A ||
- ArchInfo == llvm::AArch64::ARMV8_7A ||
- ArchInfo == llvm::AArch64::ARMV8_8A ||
- ArchInfo == llvm::AArch64::ARMV8_9A ||
- ArchInfo == llvm::AArch64::ARMV9_1A ||
- ArchInfo == llvm::AArch64::ARMV9_2A ||
- ArchInfo == llvm::AArch64::ARMV9_3A ||
- ArchInfo == llvm::AArch64::ARMV9_4A) &&
+ if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV8_7A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV8_8A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV8_9A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_1A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_2A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_3A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_4A) &&
Feature == "sve")
Features.push_back("+f32mm");
}
@@ -123,8 +123,10 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::vector<StringRef> &Features) {
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
- const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
- CPU = llvm::AArch64::resolveCPUAlias(Split.first);
+ CPU = Split.first;
+ llvm::AArch64::ArchKind ArchKind = llvm::AArch64::ArchKind::ARMV8A;
+
+ CPU = llvm::AArch64::resolveCPUAlias(CPU);
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
@@ -132,21 +134,21 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
if (CPU == "generic") {
Features.push_back("+neon");
} else {
- ArchInfo = &llvm::AArch64::parseCpu(CPU).Arch;
- if (*ArchInfo == llvm::AArch64::INVALID)
+ ArchKind = llvm::AArch64::parseCPUArch(CPU);
+ if (ArchKind == llvm::AArch64::ArchKind::INVALID)
return false;
- Features.push_back(ArchInfo->ArchFeature);
+ Features.push_back(llvm::AArch64::getArchFeature(ArchKind));
- uint64_t Extension = llvm::AArch64::getDefaultExtensions(CPU, *ArchInfo);
+ uint64_t Extension = llvm::AArch64::getDefaultExtensions(CPU, ArchKind);
if (!llvm::AArch64::getExtensionFeatures(Extension, Features))
return false;
- }
+ }
- if (Split.second.size() &&
- !DecodeAArch64Features(D, Split.second, Features, *ArchInfo))
- return false;
+ if (Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Features, ArchKind))
+ return false;
- return true;
+ return true;
}
static bool
@@ -156,26 +158,25 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
std::string MarchLowerCase = March.lower();
std::pair<StringRef, StringRef> Split = StringRef(MarchLowerCase).split("+");
- const llvm::AArch64::ArchInfo *ArchInfo =
- &llvm::AArch64::parseArch(Split.first);
+ llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseArch(Split.first);
if (Split.first == "native")
- ArchInfo = &llvm::AArch64::getArchForCpu(llvm::sys::getHostCPUName().str());
- if (*ArchInfo == llvm::AArch64::INVALID)
+ ArchKind = llvm::AArch64::getCPUArchKind(llvm::sys::getHostCPUName().str());
+ if (ArchKind == llvm::AArch64::ArchKind::INVALID)
return false;
- Features.push_back(ArchInfo->ArchFeature);
+ Features.push_back(llvm::AArch64::getArchFeature(ArchKind));
// Enable SVE2 by default on Armv9-A.
// It can still be disabled if +nosve2 is present.
// We must do this early so that DecodeAArch64Features has the correct state
- if ((*ArchInfo == llvm::AArch64::ARMV9A ||
- *ArchInfo == llvm::AArch64::ARMV9_1A ||
- *ArchInfo == llvm::AArch64::ARMV9_2A)) {
+ if ((ArchKind == llvm::AArch64::ArchKind::ARMV9A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_1A ||
+ ArchKind == llvm::AArch64::ArchKind::ARMV9_2A)) {
Features.push_back("+sve");
Features.push_back("+sve2");
}
if ((Split.second.size() &&
- !DecodeAArch64Features(D, Split.second, Features, *ArchInfo)))
+ !DecodeAArch64Features(D, Split.second, Features, ArchKind)))
return false;
return true;
diff --git a/llvm/include/llvm/Support/AArch64TargetParser.def b/llvm/include/llvm/Support/AArch64TargetParser.def
index 880139e8a0e7b..e09450d28737c 100644
--- a/llvm/include/llvm/Support/AArch64TargetParser.def
+++ b/llvm/include/llvm/Support/AArch64TargetParser.def
@@ -13,85 +13,85 @@
// NOTE: NO INCLUDE GUARD DESIRED!
#ifndef AARCH64_ARCH
-#define AARCH64_ARCH(MAJOR, MINOR, PROFILE, NAME, ID, ARCH_FEATURE, ARCH_BASE_EXT)
+#define AARCH64_ARCH(NAME, ID, ARCH_FEATURE, ARCH_BASE_EXT)
#endif
// NOTE: The order and the grouping of the elements matter to make ArchKind iterable.
// List is organised as armv8a -> armv8n-a, armv9a -> armv9m-a and armv8-r.
-AARCH64_ARCH(0, 0, InvalidProfile, "invalid", INVALID, "+",
+AARCH64_ARCH("invalid", INVALID, "+",
AArch64::AEK_NONE)
-AARCH64_ARCH(8, 0, AProfile, "armv8-a", ARMV8A, "+v8a",
+AARCH64_ARCH("armv8-a", ARMV8A, "+v8a",
(AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD))
-AARCH64_ARCH(8, 1, AProfile, "armv8.1-a", ARMV8_1A, "+v8.1a",
+AARCH64_ARCH("armv8.1-a", ARMV8_1A, "+v8.1a",
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_RDM))
-AARCH64_ARCH(8, 2, AProfile, "armv8.2-a", ARMV8_2A, "+v8.2a",
+AARCH64_ARCH("armv8.2-a", ARMV8_2A, "+v8.2a",
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM))
-AARCH64_ARCH(8, 3, AProfile, "armv8.3-a", ARMV8_3A, "+v8.3a",
+AARCH64_ARCH("armv8.3-a", ARMV8_3A, "+v8.3a",
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC))
-AARCH64_ARCH(8, 4, AProfile, "armv8.4-a", ARMV8_4A, "+v8.4a",
+AARCH64_ARCH("armv8.4-a", ARMV8_4A, "+v8.4a",
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD))
-AARCH64_ARCH(8, 5, AProfile, "armv8.5-a", ARMV8_5A, "+v8.5a",
+AARCH64_ARCH("armv8.5-a", ARMV8_5A, "+v8.5a",
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD))
-AARCH64_ARCH(8, 6, AProfile, "armv8.6-a", ARMV8_6A, "+v8.6a",
+AARCH64_ARCH("armv8.6-a", ARMV8_6A, "+v8.6a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
-AARCH64_ARCH(8, 7, AProfile, "armv8.7-a", ARMV8_7A, "+v8.7a",
+AARCH64_ARCH("armv8.7-a", ARMV8_7A, "+v8.7a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
-AARCH64_ARCH(8, 8, AProfile, "armv8.8-a", ARMV8_8A, "+v8.8a",
+AARCH64_ARCH("armv8.8-a", ARMV8_8A, "+v8.8a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
-AARCH64_ARCH(8, 9, AProfile, "armv8.9-a", ARMV8_9A, "+v8.9a",
+AARCH64_ARCH("armv8.9-a", ARMV8_9A, "+v8.9a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
-AARCH64_ARCH(9, 0, AProfile, "armv9-a", ARMV9A, "+v9a",
+AARCH64_ARCH("armv9-a", ARMV9A, "+v9a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_SVE2))
-AARCH64_ARCH(9, 1, AProfile, "armv9.1-a", ARMV9_1A, "+v9.1a",
+AARCH64_ARCH("armv9.1-a", ARMV9_1A, "+v9.1a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_SVE2))
-AARCH64_ARCH(9, 2, AProfile, "armv9.2-a", ARMV9_2A, "+v9.2a",
+AARCH64_ARCH("armv9.2-a", ARMV9_2A, "+v9.2a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_SVE2))
-AARCH64_ARCH(9, 3, AProfile, "armv9.3-a", ARMV9_3A, "+v9.3a",
+AARCH64_ARCH("armv9.3-a", ARMV9_3A, "+v9.3a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_SVE2))
-AARCH64_ARCH(9, 4, AProfile, "armv9.4-a", ARMV9_4A, "+v9.4a",
+AARCH64_ARCH("armv9.4-a", ARMV9_4A, "+v9.4a",
(AArch64::AEK_CRC | AArch64::AEK_FP |
AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_SVE2))
// For v8-R, we do not enable crypto and align with GCC that enables a more
// minimal set of optional architecture extensions.
-AARCH64_ARCH(8, 0, RProfile, "armv8-r", ARMV8R, "+v8r",
+AARCH64_ARCH("armv8-r", ARMV8R, "+v8r",
(AArch64::AEK_CRC | AArch64::AEK_RDM | AArch64::AEK_SSBS |
AArch64::AEK_DOTPROD | AArch64::AEK_FP | AArch64::AEK_SIMD |
AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_RAS |
@@ -101,6 +101,7 @@ AARCH64_ARCH(8, 0, RProfile, "armv8-r", ARMV8R, "+v8r",
#ifndef AARCH64_ARCH_EXT_NAME
#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
+// FIXME: This would be nicer were it tablegen
AARCH64_ARCH_EXT_NAME("invalid", AArch64::AEK_INVALID, {}, {})
AARCH64_ARCH_EXT_NAME("none", AArch64::AEK_NONE, {}, {})
AARCH64_ARCH_EXT_NAME("crc", AArch64::AEK_CRC, "+crc", "-crc")
diff --git a/llvm/include/llvm/Support/AArch64TargetParser.h b/llvm/include/llvm/Support/AArch64TargetParser.h
index 27c1849bed7e8..5347c4e1f5e34 100644
--- a/llvm/include/llvm/Support/AArch64TargetParser.h
+++ b/llvm/include/llvm/Support/AArch64TargetParser.h
@@ -15,9 +15,9 @@
#define LLVM_SUPPORT_AARCH64TARGETPARSER_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/VersionTuple.h"
#include <vector>
+// FIXME:This should be made into class design,to avoid dupplication.
namespace llvm {
class Triple;
@@ -83,129 +83,101 @@ enum ArchExtKind : uint64_t {
AEK_LSE128 = 1ULL << 52, // FEAT_LSE128
};
-// Represents an extension that can be enabled with -march=<arch>+<extension>.
-// Typically these correspond to Arm Architecture extensions, unlike
-// SubtargetFeature which may represent either an actual extension or some
-// internal LLVM property.
-struct ExtensionInfo {
- StringRef Name; // Human readable name, e.g. "profile".
- ArchExtKind ID; // Corresponding to the ArchExtKind, this extensions
- // representation in the bitfield.
- StringRef Feature; // -mattr enable string, e.g. "+spe"
- StringRef NegFeature; // -mattr disable string, e.g. "-spe"
-};
-
-inline constexpr ExtensionInfo Extensions[] = {
-#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE) \
- {NAME, ID, FEATURE, NEGFEATURE},
+enum class ArchKind {
+#define AARCH64_ARCH(NAME, ID, ARCH_FEATURE, ARCH_BASE_EXT) ID,
#include "AArch64TargetParser.def"
};
-enum ArchProfile { AProfile = 'A', RProfile = 'R', InvalidProfile = '?' };
-
-// Information about a specific architecture, e.g. V8.1-A
-struct ArchInfo {
- VersionTuple Version; // Architecture version, major + minor.
- ArchProfile Profile; // Architecuture profile
- StringRef Name; // Human readable name, e.g. "armv8.1-a"
- StringRef ArchFeature; // Command line feature flag, e.g. +v8a
- uint64_t DefaultExts; // bitfield of default extensions ArchExtKind
-
- // These are not intended to be copied or created outside of this file.
- ArchInfo(const ArchInfo &) = delete;
- ArchInfo(const ArchInfo &&) = delete;
- ArchInfo &operator=(const ArchInfo &rhs) = delete;
- ArchInfo &&operator=(const ArchInfo &&rhs) = delete;
-
- // Comparison is done by address. Copies should not exist.
- bool operator==(const ArchInfo &Other) const { return this == &Other; }
- bool operator!=(const ArchInfo &Other) const { return this != &Other; }
-
- // Defines the following partial order, indicating when an architecture is
- // a superset of another:
- //
- // v9.4a > v9.3a > v9.3a > v9.3a > v9a;
- // v v v v v
- // v8.9a > v8.8a > v8.7a > v8.6a > v8.5a > v8.4a > ... > v8a;
- //
- // v8r and INVALID have no relation to anything. This is used to
- // determine which features to enable for a given architecture. See
- // AArch64TargetInfo::setFeatureEnabled.
- bool implies(const ArchInfo &Other) const {
- if (this->Profile != Other.Profile)
- return false; // ARMV8R and INVALID
- if (this->Version.getMajor() == Other.Version.getMajor()) {
- return this->Version > Other.Version;
- }
- if (this->Version.getMajor() == 9 && Other.Version.getMajor() == 8) {
- return this->Version.getMinor().value() + 5 >=
- Other.Version.getMinor().value();
- }
- return false;
- }
+struct ArchNames {
+ StringRef Name;
+ StringRef ArchFeature;
+ uint64_t ArchBaseExtensions;
+ ArchKind ID;
// Return ArchFeature without the leading "+".
- constexpr StringRef getSubArch() const { return ArchFeature.substr(1); }
-
- // Search for ArchInfo by SubArch name
- static const ArchInfo &findBySubArch(StringRef SubArch);
+ StringRef getSubArch() const { return ArchFeature.substr(1); }
};
-// Create ArchInfo structs named <ID>
-#define AARCH64_ARCH(MAJOR, MINOR, PROFILE, NAME, ID, ARCH_FEATURE, \
- ARCH_BASE_EXT) \
- inline constexpr ArchInfo ID = {VersionTuple{MAJOR, MINOR}, PROFILE, NAME, \
- ARCH_FEATURE, ARCH_BASE_EXT};
+const ArchNames AArch64ARCHNames[] = {
+#define AARCH64_ARCH(NAME, ID, ARCH_FEATURE, ARCH_BASE_EXT) \
+ {NAME, ARCH_FEATURE, ARCH_BASE_EXT, AArch64::ArchKind::ID},
#include "AArch64TargetParser.def"
-#undef AARCH64_ARCH
+};
-// The set of all architectures
-inline constexpr std::array<const ArchInfo *, 17> ArchInfos = {
-#define AARCH64_ARCH(MAJOR, MINOR, PROFILE, NAME, ID, ARCH_FEATURE, \
- ARCH_BASE_EXT) \
- &ID,
+// List of Arch Extension names.
+struct ExtName {
+ StringRef Name;
+ uint64_t ID;
+ StringRef Feature;
+ StringRef NegFeature;
+};
+
+const ExtName AArch64ARCHExtNames[] = {
+#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE) \
+ {NAME, ID, FEATURE, NEGFEATURE},
#include "AArch64TargetParser.def"
};
-// Details of a specific CPU.
-struct CpuInfo {
- StringRef Name; // Name, as written for -mcpu.
- const ArchInfo &Arch;
+// List of CPU names and their arches.
+// The same CPU can have multiple arches and can be default on multiple arches.
+// When finding the Arch for a CPU, first-found prevails. Sort them accordingly.
+// When this becomes table-generated, we'd probably need two tables.
+struct CpuNames {
+ StringRef Name;
+ ArchKind ArchID;
uint64_t DefaultExtensions;
};
-inline constexpr CpuInfo CpuInfos[] = {
-#define AARCH64_CPU_NAME(NAME, ARCH_ID, DEFAULT_EXT) \
- {NAME, ARCH_ID, DEFAULT_EXT},
+const CpuNames AArch64CPUNames[] = {
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_EXT) \
+ {NAME, AArch64::ArchKind::ID, DEFAULT_EXT},
#include "AArch64TargetParser.def"
};
-// An alias for a CPU.
-struct CpuAlias {
+const struct {
StringRef Alias;
StringRef Name;
+} AArch64CPUAliases[] = {
+#define AARCH64_CPU_ALIAS(ALIAS, NAME) {ALIAS, NAME},
+#include "AArch64TargetParser.def"
};
-inline constexpr CpuAlias CpuAliases[] = {
-#define AARCH64_CPU_ALIAS(ALIAS, NAME) {ALIAS, NAME},
+const ArchKind ArchKinds[] = {
+#define AARCH64_ARCH(NAME, ID, ARCH_FEATURE, ARCH_BASE_EXT) ArchKind::ID,
#include "AArch64TargetParser.def"
};
+inline ArchKind &operator--(ArchKind &Kind) {
+ if ((Kind == ArchKind::INVALID) || (Kind == ArchKind::ARMV8A) ||
+ (Kind == ArchKind::ARMV9A) || (Kind == ArchKind::ARMV8R))
+ Kind = ArchKind::INVALID;
+ else {
+ unsigned KindAsInteger = static_cast<unsigned>(Kind);
+ Kind = static_cast<ArchKind>(--KindAsInteger);
+ }
+ return Kind;
+}
+
bool getExtensionFeatures(uint64_t Extensions,
std::vector<StringRef> &Features);
+StringRef getArchFeature(ArchKind AK);
+StringRef getArchName(ArchKind AK);
+StringRef getSubArch(ArchKind AK);
+StringRef getArchExtName(unsigned ArchExtKind);
StringRef getArchExtFeature(StringRef ArchExt);
+ArchKind convertV9toV8(ArchKind AK);
StringRef resolveCPUAlias(StringRef CPU);
// Information by Name
-uint64_t getDefaultExtensions(StringRef CPU, const ArchInfo &AI);
-const ArchInfo &getArchForCpu(StringRef CPU);
+uint64_t getDefaultExtensions(StringRef CPU, ArchKind AK);
+ArchKind getCPUArchKind(StringRef CPU);
+ArchKind getSubArchArchKind(StringRef SubArch);
// Parser
-const ArchInfo &parseArch(StringRef Arch);
+ArchKind parseArch(StringRef Arch);
ArchExtKind parseArchExt(StringRef ArchExt);
-// Given the name of a CPU or alias, return the correponding CpuInfo.
-const CpuInfo &parseCpu(StringRef Name);
+ArchKind parseCPUArch(StringRef CPU);
// Used by target parser tests
void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
diff --git a/llvm/include/llvm/Support/VersionTuple.h b/llvm/include/llvm/Support/VersionTuple.h
index 3adec07e2a23c..2020a5c06f561 100644
--- a/llvm/include/llvm/Support/VersionTuple.h
+++ b/llvm/include/llvm/Support/VersionTuple.h
@@ -41,25 +41,24 @@ class VersionTuple {
unsigned HasBuild : 1;
public:
- constexpr VersionTuple()
+ VersionTuple()
: Major(0), Minor(0), HasMinor(false), Subminor(0), HasSubminor(false),
Build(0), HasBuild(false) {}
- explicit constexpr VersionTuple(unsigned Major)
+ explicit VersionTuple(unsigned Major)
: Major(Major), Minor(0), HasMinor(false), Subminor(0),
HasSubminor(false), Build(0), HasBuild(false) {}
- explicit constexpr VersionTuple(unsigned Major, unsigned Minor)
+ explicit VersionTuple(unsigned Major, unsigned Minor)
: Major(Major), Minor(Minor), HasMinor(true), Subminor(0),
HasSubminor(false), Build(0), HasBuild(false) {}
- explicit constexpr VersionTuple(unsigned Major, unsigned Minor,
- unsigned Subminor)
+ explicit VersionTuple(unsigned Major, unsigned Minor, unsigned Subminor)
: Major(Major), Minor(Minor), HasMinor(true), Subminor(Subminor),
HasSubminor(true), Build(0), HasBuild(false) {}
- explicit constexpr VersionTuple(unsigned Major, unsigned Minor,
- unsigned Subminor, unsigned Build)
+ explicit VersionTuple(unsigned Major, unsigned Minor, unsigned Subminor,
+ unsigned Build)
: Major(Major), Minor(Minor), HasMinor(true), Subminor(Subminor),
HasSubminor(true), Build(Build), HasBuild(true) {}
diff --git a/llvm/lib/Support/AArch64TargetParser.cpp b/llvm/lib/Support/AArch64TargetParser.cpp
index a6a8777d3914c..aecb193e409a8 100644
--- a/llvm/lib/Support/AArch64TargetParser.cpp
+++ b/llvm/lib/Support/AArch64TargetParser.cpp
@@ -25,33 +25,34 @@ static unsigned checkArchVersion(llvm::StringRef Arch) {
return 0;
}
-uint64_t AArch64::getDefaultExtensions(StringRef CPU,
- const AArch64::ArchInfo &AI) {
+uint64_t AArch64::getDefaultExtensions(StringRef CPU, AArch64::ArchKind AK) {
if (CPU == "generic")
- return AI.DefaultExts;
+ return AArch64ARCHNames[static_cast<unsigned>(AK)].ArchBaseExtensions;
return StringSwitch<uint64_t>(CPU)
-#define AARCH64_CPU_NAME(NAME, ARCH_ID, DEFAULT_EXT) \
- .Case(NAME, ARCH_ID.DefaultExts | DEFAULT_EXT)
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_EXT) \
+ .Case(NAME, AArch64ARCHNames[static_cast<unsigned>(ArchKind::ID)] \
+ .ArchBaseExtensions | \
+ DEFAULT_EXT)
#include "../../include/llvm/Support/AArch64TargetParser.def"
.Default(AArch64::AEK_INVALID);
}
-const AArch64::ArchInfo &AArch64::getArchForCpu(StringRef CPU) {
+AArch64::ArchKind AArch64::getCPUArchKind(StringRef CPU) {
if (CPU == "generic")
- return ARMV8A;
+ return ArchKind::ARMV8A;
- return *StringSwitch<const AArch64::ArchInfo *>(CPU)
-#define AARCH64_CPU_NAME(NAME, ARCH_ID, DEFAULT_EXT) .Case(NAME, &ARCH_ID)
+ return StringSwitch<AArch64::ArchKind>(CPU)
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_EXT) .Case(NAME, ArchKind::ID)
#include "../../include/llvm/Support/AArch64TargetParser.def"
- .Default(&INVALID);
+ .Default(ArchKind::INVALID);
}
-const AArch64::ArchInfo &AArch64::ArchInfo::findBySubArch(StringRef SubArch) {
- for (const auto *A : AArch64::ArchInfos)
- if (A->getSubArch() == SubArch)
- return *A;
- return AArch64::INVALID;
+AArch64::ArchKind AArch64::getSubArchArchKind(StringRef SubArch) {
+ for (const auto &A : AArch64ARCHNames)
+ if (A.getSubArch() == SubArch)
+ return A.ID;
+ return ArchKind::INVALID;
}
bool AArch64::getExtensionFeatures(uint64_t Extensions,
@@ -79,27 +80,53 @@ StringRef AArch64::resolveCPUAlias(StringRef CPU) {
.Default(CPU);
}
+StringRef AArch64::getArchFeature(AArch64::ArchKind AK) {
+ return AArch64ARCHNames[static_cast<unsigned>(AK)].ArchFeature;
+}
+
+StringRef AArch64::getArchName(AArch64::ArchKind AK) {
+ return AArch64ARCHNames[static_cast<unsigned>(AK)].Name;
+}
+
+StringRef AArch64::getSubArch(AArch64::ArchKind AK) {
+ return AArch64ARCHNames[static_cast<unsigned>(AK)].getSubArch();
+}
+
StringRef AArch64::getArchExtFeature(StringRef ArchExt) {
if (ArchExt.startswith("no")) {
StringRef ArchExtBase(ArchExt.substr(2));
- for (const auto &AE : Extensions) {
+ for (const auto &AE : AArch64ARCHExtNames) {
if (!AE.NegFeature.empty() && ArchExtBase == AE.Name)
return AE.NegFeature;
}
}
- for (const auto &AE : Extensions)
+ for (const auto &AE : AArch64ARCHExtNames)
if (!AE.Feature.empty() && ArchExt == AE.Name)
return AE.Feature;
return StringRef();
}
+AArch64::ArchKind AArch64::convertV9toV8(AArch64::ArchKind AK) {
+ if (AK == AArch64::ArchKind::INVALID)
+ return AK;
+ if (AK < AArch64::ArchKind::ARMV9A)
+ return AK;
+ if (AK >= AArch64::ArchKind::ARMV8R)
+ return AArch64::ArchKind::INVALID;
+ unsigned AK_v8 = static_cast<unsigned>(AArch64::ArchKind::ARMV8_5A);
+ AK_v8 += static_cast<unsigned>(AK) -
+ static_cast<unsigned>(AArch64::ArchKind::ARMV9A);
+ return static_cast<AArch64::ArchKind>(AK_v8);
+}
+
void AArch64::fillValidCPUArchList(SmallVectorImpl<StringRef> &Values) {
- for (const auto &C : CpuInfos)
- if (C.Arch != INVALID)
- Values.push_back(C.Name);
+ for (const auto &Arch : AArch64CPUNames) {
+ if (Arch.ArchID != ArchKind::INVALID)
+ Values.push_back(Arch.Name);
+ }
- for (const auto &Alias : CpuAliases)
+ for (const auto &Alias: AArch64CPUAliases)
Values.push_back(Alias.Alias);
}
@@ -109,37 +136,39 @@ bool AArch64::isX18ReservedByDefault(const Triple &TT) {
}
// Allows partial match, ex. "v8a" matches "armv8a".
-const AArch64::ArchInfo &AArch64::parseArch(StringRef Arch) {
+AArch64::ArchKind AArch64::parseArch(StringRef Arch) {
Arch = llvm::ARM::getCanonicalArchName(Arch);
if (checkArchVersion(Arch) < 8)
- return AArch64::INVALID;
+ return ArchKind::INVALID;
StringRef Syn = llvm::ARM::getArchSynonym(Arch);
- for (const auto *A : ArchInfos) {
- if (A->Name.endswith(Syn))
- return *A;
+ for (const auto &A : AArch64ARCHNames) {
+ if (A.Name.endswith(Syn))
+ return A.ID;
}
- return AArch64::INVALID;
+ return ArchKind::INVALID;
}
AArch64::ArchExtKind AArch64::parseArchExt(StringRef ArchExt) {
- for (const auto &A : Extensions) {
+ for (const auto &A : AArch64ARCHExtNames) {
if (ArchExt == A.Name)
return static_cast<ArchExtKind>(A.ID);
}
return AArch64::AEK_INVALID;
}
-const AArch64::CpuInfo &AArch64::parseCpu(StringRef Name) {
+AArch64::ArchKind AArch64::parseCPUArch(StringRef CPU) {
// Resolve aliases first.
- Name = resolveCPUAlias(Name);
-
+ for (const auto &Alias : AArch64CPUAliases) {
+ if (CPU == Alias.Alias) {
+ CPU = Alias.Name;
+ break;
+ }
+ }
// Then find the CPU name.
- for (const auto &C : CpuInfos)
- if (Name == C.Name)
- return C;
+ for (const auto &C : AArch64CPUNames)
+ if (CPU == C.Name)
+ return C.ArchID;
- // "generic" returns invalid.
- assert(Name != "invalid" && "Unexpected recursion.");
- return parseCpu("invalid");
+ return ArchKind::INVALID;
}
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index baec22ad48d96..53290c6e8c196 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -6806,48 +6806,67 @@ bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
}
-static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
+static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
SmallVector<StringRef, 4> &RequestedExtensions) {
const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
if (!NoCrypto && Crypto) {
- // Map 'generic' (and others) to sha2 and aes, because
- // that was the traditional meaning of crypto.
- if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
- ArchInfo == AArch64::ARMV8_3A) {
+ switch (ArchKind) {
+ default:
+ // Map 'generic' (and others) to sha2 and aes, because
+ // that was the traditional meaning of crypto.
+ case AArch64::ArchKind::ARMV8_1A:
+ case AArch64::ArchKind::ARMV8_2A:
+ case AArch64::ArchKind::ARMV8_3A:
RequestedExtensions.push_back("sha2");
RequestedExtensions.push_back("aes");
- }
- if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
- ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
- ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
- ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
- ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
- ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
+ break;
+ case AArch64::ArchKind::ARMV8_4A:
+ case AArch64::ArchKind::ARMV8_5A:
+ case AArch64::ArchKind::ARMV8_6A:
+ case AArch64::ArchKind::ARMV8_7A:
+ case AArch64::ArchKind::ARMV8_8A:
+ case AArch64::ArchKind::ARMV8_9A:
+ case AArch64::ArchKind::ARMV9A:
+ case AArch64::ArchKind::ARMV9_1A:
+ case AArch64::ArchKind::ARMV9_2A:
+ case AArch64::ArchKind::ARMV9_3A:
+ case AArch64::ArchKind::ARMV9_4A:
+ case AArch64::ArchKind::ARMV8R:
RequestedExtensions.push_back("sm4");
RequestedExtensions.push_back("sha3");
RequestedExtensions.push_back("sha2");
RequestedExtensions.push_back("aes");
+ break;
}
} else if (NoCrypto) {
- // Map 'generic' (and others) to sha2 and aes, because
- // that was the traditional meaning of crypto.
- if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
- ArchInfo == AArch64::ARMV8_3A) {
+ switch (ArchKind) {
+ default:
+ // Map 'generic' (and others) to sha2 and aes, because
+ // that was the traditional meaning of crypto.
+ case AArch64::ArchKind::ARMV8_1A:
+ case AArch64::ArchKind::ARMV8_2A:
+ case AArch64::ArchKind::ARMV8_3A:
RequestedExtensions.push_back("nosha2");
RequestedExtensions.push_back("noaes");
- }
- if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
- ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
- ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
- ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
- ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
- ArchInfo == AArch64::ARMV9_4A) {
+ break;
+ case AArch64::ArchKind::ARMV8_4A:
+ case AArch64::ArchKind::ARMV8_5A:
+ case AArch64::ArchKind::ARMV8_6A:
+ case AArch64::ArchKind::ARMV8_7A:
+ case AArch64::ArchKind::ARMV8_8A:
+ case AArch64::ArchKind::ARMV8_9A:
+ case AArch64::ArchKind::ARMV9A:
+ case AArch64::ArchKind::ARMV9_1A:
+ case AArch64::ArchKind::ARMV9_2A:
+ case AArch64::ArchKind::ARMV9_3A:
+ case AArch64::ArchKind::ARMV9_4A:
RequestedExtensions.push_back("nosm4");
RequestedExtensions.push_back("nosha3");
RequestedExtensions.push_back("nosha2");
RequestedExtensions.push_back("noaes");
+ break;
}
}
}
@@ -6861,8 +6880,8 @@ bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
std::tie(Arch, ExtensionString) =
getParser().parseStringToEndOfStatement().trim().split('+');
- const AArch64::ArchInfo &ArchInfo = AArch64::parseArch(Arch);
- if (ArchInfo == AArch64::INVALID)
+ AArch64::ArchKind ID = AArch64::parseArch(Arch);
+ if (ID == AArch64::ArchKind::INVALID)
return Error(ArchLoc, "unknown arch name");
if (parseToken(AsmToken::EndOfStatement))
@@ -6870,9 +6889,9 @@ bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
// Get the architecture and extension features.
std::vector<StringRef> AArch64Features;
- AArch64Features.push_back(ArchInfo.ArchFeature);
- AArch64::getExtensionFeatures(
- AArch64::getDefaultExtensions("generic", ArchInfo), AArch64Features);
+ AArch64Features.push_back(AArch64::getArchFeature(ID));
+ AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
+ AArch64Features);
MCSubtargetInfo &STI = copySTI();
std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
@@ -6883,7 +6902,7 @@ bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
if (!ExtensionString.empty())
ExtensionString.split(RequestedExtensions, '+');
- ExpandCryptoAEK(ArchInfo, RequestedExtensions);
+ ExpandCryptoAEK(ID, RequestedExtensions);
FeatureBitset Features = STI.getFeatureBits();
for (auto Name : RequestedExtensions) {
@@ -6979,7 +6998,7 @@ bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
CurLoc = incrementLoc(CurLoc, CPU.size());
- ExpandCryptoAEK(llvm::AArch64::getArchForCpu(CPU), RequestedExtensions);
+ ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
for (auto Name : RequestedExtensions) {
// Advance source location past '+'.
diff --git a/llvm/unittests/Support/TargetParserTest.cpp b/llvm/unittests/Support/TargetParserTest.cpp
index a99df08f2789c..5bcb131c13958 100644
--- a/llvm/unittests/Support/TargetParserTest.cpp
+++ b/llvm/unittests/Support/TargetParserTest.cpp
@@ -952,11 +952,11 @@ class AArch64CPUTestFixture
TEST_P(AArch64CPUTestFixture, testAArch64CPU) {
ARMCPUTestParams params = GetParam();
- const AArch64::ArchInfo &AI = AArch64::parseCpu(params.CPUName).Arch;
- EXPECT_EQ(params.ExpectedArch, AI.Name);
+ AArch64::ArchKind AK = AArch64::parseCPUArch(params.CPUName);
+ EXPECT_EQ(params.ExpectedArch, AArch64::getArchName(AK));
uint64_t default_extensions =
- AArch64::getDefaultExtensions(params.CPUName, AI);
+ AArch64::getDefaultExtensions(params.CPUName, AK);
EXPECT_PRED_FORMAT2(AssertSameExtensionFlags<ARM::ISAKind::AARCH64>,
params.ExpectedFlags, default_extensions);
}
@@ -1402,14 +1402,14 @@ TEST(TargetParserTest, testAArch64CPUArchList) {
// valid, and match the expected 'magic' count.
EXPECT_EQ(List.size(), NumAArch64CPUArchs);
for(StringRef CPU : List) {
- EXPECT_NE(AArch64::parseCpu(CPU).Arch, AArch64::INVALID);
+ EXPECT_NE(AArch64::parseCPUArch(CPU), AArch64::ArchKind::INVALID);
}
}
bool testAArch64Arch(StringRef Arch, StringRef DefaultCPU, StringRef SubArch,
unsigned ArchAttr) {
- const AArch64::ArchInfo &AI = AArch64::parseArch(Arch);
- return AI != AArch64::INVALID;
+ AArch64::ArchKind AK = AArch64::parseArch(Arch);
+ return AK != AArch64::ArchKind::INVALID;
}
TEST(TargetParserTest, testAArch64Arch) {
@@ -1445,81 +1445,148 @@ TEST(TargetParserTest, testAArch64Arch) {
ARMBuildAttrs::CPUArch::v8_A));
}
-bool testAArch64Extension(StringRef CPUName, const AArch64::ArchInfo &AI,
+bool testAArch64Extension(StringRef CPUName, AArch64::ArchKind AK,
StringRef ArchExt) {
- return AArch64::getDefaultExtensions(CPUName, AI) &
+ return AArch64::getDefaultExtensions(CPUName, AK) &
AArch64::parseArchExt(ArchExt);
}
TEST(TargetParserTest, testAArch64Extension) {
- EXPECT_FALSE(testAArch64Extension("cortex-a34", AArch64::INVALID, "ras"));
- EXPECT_FALSE(testAArch64Extension("cortex-a35", AArch64::INVALID, "ras"));
- EXPECT_FALSE(testAArch64Extension("cortex-a53", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("cortex-a55", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("cortex-a55", AArch64::INVALID, "fp16"));
- EXPECT_FALSE(testAArch64Extension("cortex-a55", AArch64::INVALID, "fp16fml"));
- EXPECT_TRUE(testAArch64Extension("cortex-a55", AArch64::INVALID, "dotprod"));
- EXPECT_FALSE(testAArch64Extension("cortex-a57", AArch64::INVALID, "ras"));
- EXPECT_FALSE(testAArch64Extension("cortex-a72", AArch64::INVALID, "ras"));
- EXPECT_FALSE(testAArch64Extension("cortex-a73", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("cortex-a75", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("cortex-a75", AArch64::INVALID, "fp16"));
- EXPECT_FALSE(testAArch64Extension("cortex-a75", AArch64::INVALID, "fp16fml"));
- EXPECT_TRUE(testAArch64Extension("cortex-a75", AArch64::INVALID, "dotprod"));
- EXPECT_TRUE(testAArch64Extension("cortex-r82", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("cortex-r82", AArch64::INVALID, "fp16"));
- EXPECT_TRUE(testAArch64Extension("cortex-r82", AArch64::INVALID, "fp16fml"));
- EXPECT_TRUE(testAArch64Extension("cortex-r82", AArch64::INVALID, "dotprod"));
- EXPECT_TRUE(testAArch64Extension("cortex-r82", AArch64::INVALID, "lse"));
- EXPECT_FALSE(testAArch64Extension("cyclone", AArch64::INVALID, "ras"));
- EXPECT_FALSE(testAArch64Extension("exynos-m3", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("exynos-m4", AArch64::INVALID, "dotprod"));
- EXPECT_TRUE(testAArch64Extension("exynos-m4", AArch64::INVALID, "fp16"));
- EXPECT_TRUE(testAArch64Extension("exynos-m4", AArch64::INVALID, "lse"));
- EXPECT_TRUE(testAArch64Extension("exynos-m4", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("exynos-m4", AArch64::INVALID, "rdm"));
- EXPECT_TRUE(testAArch64Extension("exynos-m5", AArch64::INVALID, "dotprod"));
- EXPECT_TRUE(testAArch64Extension("exynos-m5", AArch64::INVALID, "fp16"));
- EXPECT_TRUE(testAArch64Extension("exynos-m5", AArch64::INVALID, "lse"));
- EXPECT_TRUE(testAArch64Extension("exynos-m5", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("exynos-m5", AArch64::INVALID, "rdm"));
- EXPECT_TRUE(testAArch64Extension("falkor", AArch64::INVALID, "rdm"));
- EXPECT_FALSE(testAArch64Extension("kryo", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("saphira", AArch64::INVALID, "crc"));
- EXPECT_TRUE(testAArch64Extension("saphira", AArch64::INVALID, "lse"));
- EXPECT_TRUE(testAArch64Extension("saphira", AArch64::INVALID, "rdm"));
- EXPECT_TRUE(testAArch64Extension("saphira", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("saphira", AArch64::INVALID, "rcpc"));
- EXPECT_TRUE(testAArch64Extension("saphira", AArch64::INVALID, "profile"));
- EXPECT_FALSE(testAArch64Extension("saphira", AArch64::INVALID, "fp16"));
- EXPECT_FALSE(testAArch64Extension("thunderx2t99", AArch64::INVALID, "ras"));
- EXPECT_FALSE(testAArch64Extension("thunderx", AArch64::INVALID, "lse"));
- EXPECT_FALSE(testAArch64Extension("thunderxt81", AArch64::INVALID, "lse"));
- EXPECT_FALSE(testAArch64Extension("thunderxt83", AArch64::INVALID, "lse"));
- EXPECT_FALSE(testAArch64Extension("thunderxt88", AArch64::INVALID, "lse"));
- EXPECT_TRUE(testAArch64Extension("tsv110", AArch64::INVALID, "crypto"));
- EXPECT_FALSE(testAArch64Extension("tsv110", AArch64::INVALID, "sha3"));
- EXPECT_FALSE(testAArch64Extension("tsv110", AArch64::INVALID, "sm4"));
- EXPECT_TRUE(testAArch64Extension("tsv110", AArch64::INVALID, "ras"));
- EXPECT_TRUE(testAArch64Extension("tsv110", AArch64::INVALID, "profile"));
- EXPECT_TRUE(testAArch64Extension("tsv110", AArch64::INVALID, "fp16"));
- EXPECT_TRUE(testAArch64Extension("tsv110", AArch64::INVALID, "fp16fml"));
- EXPECT_TRUE(testAArch64Extension("tsv110", AArch64::INVALID, "dotprod"));
- EXPECT_TRUE(testAArch64Extension("a64fx", AArch64::INVALID, "fp16"));
- EXPECT_TRUE(testAArch64Extension("a64fx", AArch64::INVALID, "sve"));
- EXPECT_FALSE(testAArch64Extension("a64fx", AArch64::INVALID, "sve2"));
- EXPECT_TRUE(testAArch64Extension("carmel", AArch64::INVALID, "crypto"));
- EXPECT_TRUE(testAArch64Extension("carmel", AArch64::INVALID, "fp16"));
-
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8A, "ras"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_1A, "ras"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_2A, "profile"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_2A, "fp16"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_2A, "fp16fml"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_3A, "fp16"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_3A, "fp16fml"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_4A, "fp16"));
- EXPECT_FALSE(testAArch64Extension("generic", AArch64::ARMV8_4A, "fp16fml"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a34",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a35",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a53",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("cortex-a55",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("cortex-a55",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a55",
+ AArch64::ArchKind::INVALID, "fp16fml"));
+ EXPECT_TRUE(testAArch64Extension("cortex-a55",
+ AArch64::ArchKind::INVALID, "dotprod"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a57",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a72",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a73",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("cortex-a75",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("cortex-a75",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_FALSE(testAArch64Extension("cortex-a75",
+ AArch64::ArchKind::INVALID, "fp16fml"));
+ EXPECT_TRUE(testAArch64Extension("cortex-a75",
+ AArch64::ArchKind::INVALID, "dotprod"));
+ EXPECT_TRUE(testAArch64Extension("cortex-r82",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("cortex-r82",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_TRUE(testAArch64Extension("cortex-r82",
+ AArch64::ArchKind::INVALID, "fp16fml"));
+ EXPECT_TRUE(testAArch64Extension("cortex-r82",
+ AArch64::ArchKind::INVALID, "dotprod"));
+ EXPECT_TRUE(testAArch64Extension("cortex-r82",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_FALSE(testAArch64Extension("cyclone",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_FALSE(testAArch64Extension("exynos-m3",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m4",
+ AArch64::ArchKind::INVALID, "dotprod"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m4",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m4",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m4",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m4",
+ AArch64::ArchKind::INVALID, "rdm"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m5",
+ AArch64::ArchKind::INVALID, "dotprod"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m5",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m5",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m5",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("exynos-m5",
+ AArch64::ArchKind::INVALID, "rdm"));
+ EXPECT_TRUE(testAArch64Extension("falkor",
+ AArch64::ArchKind::INVALID, "rdm"));
+ EXPECT_FALSE(testAArch64Extension("kryo",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "crc"));
+ EXPECT_TRUE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_TRUE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "rdm"));
+ EXPECT_TRUE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "rcpc"));
+ EXPECT_TRUE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "profile"));
+ EXPECT_FALSE(testAArch64Extension("saphira",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_FALSE(testAArch64Extension("thunderx2t99",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_FALSE(testAArch64Extension("thunderx",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_FALSE(testAArch64Extension("thunderxt81",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_FALSE(testAArch64Extension("thunderxt83",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_FALSE(testAArch64Extension("thunderxt88",
+ AArch64::ArchKind::INVALID, "lse"));
+ EXPECT_TRUE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "crypto"));
+ EXPECT_FALSE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "sha3"));
+ EXPECT_FALSE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "sm4"));
+ EXPECT_TRUE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "ras"));
+ EXPECT_TRUE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "profile"));
+ EXPECT_TRUE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_TRUE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "fp16fml"));
+ EXPECT_TRUE(testAArch64Extension("tsv110",
+ AArch64::ArchKind::INVALID, "dotprod"));
+ EXPECT_TRUE(testAArch64Extension("a64fx",
+ AArch64::ArchKind::INVALID, "fp16"));
+ EXPECT_TRUE(testAArch64Extension("a64fx",
+ AArch64::ArchKind::INVALID, "sve"));
+ EXPECT_FALSE(testAArch64Extension("a64fx",
+ AArch64::ArchKind::INVALID, "sve2"));
+ EXPECT_TRUE(
+ testAArch64Extension("carmel", AArch64::ArchKind::INVALID, "crypto"));
+ EXPECT_TRUE(
+ testAArch64Extension("carmel", AArch64::ArchKind::INVALID, "fp16"));
+
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8A, "ras"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_1A, "ras"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_2A, "profile"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_2A, "fp16"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_2A, "fp16fml"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_3A, "fp16"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_3A, "fp16fml"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_4A, "fp16"));
+ EXPECT_FALSE(testAArch64Extension(
+ "generic", AArch64::ArchKind::ARMV8_4A, "fp16fml"));
}
TEST(TargetParserTest, AArch64ExtensionFeatures) {
@@ -1641,62 +1708,25 @@ TEST(TargetParserTest, AArch64ArchFeatures) {
EXPECT_EQ(AArch64::getArchFeature(AArch64::ArchKind::ARMV8R), "+v8r");
}
-TEST(TargetParserTest, AArch64ArchPartialOrder) {
- EXPECT_FALSE(AArch64::INVALID.implies(AArch64::INVALID));
-
- for (const auto *A : AArch64::ArchInfos) {
- EXPECT_EQ(*A, *A);
- if (!(*A == *A)) {
- EXPECT_NE(*A, *A);
- }
- // Comparison with invalid is always false
- EXPECT_FALSE(A->implies(AArch64::INVALID));
- EXPECT_FALSE(AArch64::INVALID.implies(*A));
-
- // v8r has no relation to other valid architectures
- if (*A != AArch64::ARMV8R) {
- EXPECT_FALSE(A->implies(AArch64::ARMV8R));
- EXPECT_FALSE(AArch64::ARMV8R.implies(*A));
- }
+TEST(TargetParserTest, AArch64ArchV9toV8Conversion) {
+ for (auto AK : AArch64::ArchKinds) {
+ if (AK == AArch64::ArchKind::INVALID)
+ EXPECT_EQ(AK, AArch64::convertV9toV8(AK));
+ else if (AK < AArch64::ArchKind::ARMV9A)
+ EXPECT_EQ(AK, AArch64::convertV9toV8(AK));
+ else if (AK >= AArch64::ArchKind::ARMV8R)
+ EXPECT_EQ(AArch64::ArchKind::INVALID, AArch64::convertV9toV8(AK));
+ else
+ EXPECT_TRUE(AArch64::convertV9toV8(AK) < AArch64::ArchKind::ARMV9A);
}
-
- for (const auto *A : {
- &AArch64::ARMV8_1A,
- &AArch64::ARMV8_2A,
- &AArch64::ARMV8_3A,
- &AArch64::ARMV8_4A,
- &AArch64::ARMV8_5A,
- &AArch64::ARMV8_6A,
- &AArch64::ARMV8_7A,
- &AArch64::ARMV8_8A,
- &AArch64::ARMV8_9A,
- })
- EXPECT_TRUE(A->implies(AArch64::ARMV8A));
-
- for (const auto *A : {&AArch64::ARMV9_1A, &AArch64::ARMV9_2A,
- &AArch64::ARMV9_3A, &AArch64::ARMV9_4A})
- EXPECT_TRUE(A->implies(AArch64::ARMV9A));
-
- EXPECT_TRUE(AArch64::ARMV8_1A.implies(AArch64::ARMV8A));
- EXPECT_TRUE(AArch64::ARMV8_2A.implies(AArch64::ARMV8_1A));
- EXPECT_TRUE(AArch64::ARMV8_3A.implies(AArch64::ARMV8_2A));
- EXPECT_TRUE(AArch64::ARMV8_4A.implies(AArch64::ARMV8_3A));
- EXPECT_TRUE(AArch64::ARMV8_5A.implies(AArch64::ARMV8_4A));
- EXPECT_TRUE(AArch64::ARMV8_6A.implies(AArch64::ARMV8_5A));
- EXPECT_TRUE(AArch64::ARMV8_7A.implies(AArch64::ARMV8_6A));
- EXPECT_TRUE(AArch64::ARMV8_8A.implies(AArch64::ARMV8_7A));
- EXPECT_TRUE(AArch64::ARMV8_9A.implies(AArch64::ARMV8_8A));
-
- EXPECT_TRUE(AArch64::ARMV9_1A.implies(AArch64::ARMV9A));
- EXPECT_TRUE(AArch64::ARMV9_2A.implies(AArch64::ARMV9_1A));
- EXPECT_TRUE(AArch64::ARMV9_3A.implies(AArch64::ARMV9_2A));
- EXPECT_TRUE(AArch64::ARMV9_4A.implies(AArch64::ARMV9_3A));
-
- EXPECT_TRUE(AArch64::ARMV9A.implies(AArch64::ARMV8_5A));
- EXPECT_TRUE(AArch64::ARMV9_1A.implies(AArch64::ARMV8_6A));
- EXPECT_TRUE(AArch64::ARMV9_2A.implies(AArch64::ARMV8_7A));
- EXPECT_TRUE(AArch64::ARMV9_3A.implies(AArch64::ARMV8_8A));
- EXPECT_TRUE(AArch64::ARMV9_4A.implies(AArch64::ARMV8_9A));
+ EXPECT_EQ(AArch64::ArchKind::ARMV8_5A,
+ AArch64::convertV9toV8(AArch64::ArchKind::ARMV9A));
+ EXPECT_EQ(AArch64::ArchKind::ARMV8_6A,
+ AArch64::convertV9toV8(AArch64::ArchKind::ARMV9_1A));
+ EXPECT_EQ(AArch64::ArchKind::ARMV8_7A,
+ AArch64::convertV9toV8(AArch64::ArchKind::ARMV9_2A));
+ EXPECT_EQ(AArch64::ArchKind::ARMV8_8A,
+ AArch64::convertV9toV8(AArch64::ArchKind::ARMV9_3A));
}
TEST(TargetParserTest, AArch64ArchExtFeature) {
More information about the cfe-commits
mailing list