[clang] [llvm] [clang] Introduce target-specific `Sema` components (PR #93179)
Vlad Serebrennikov via llvm-commits
llvm-commits at lists.llvm.org
Fri May 24 07:03:37 PDT 2024
https://github.com/Endilll updated https://github.com/llvm/llvm-project/pull/93179
>From 83e778c77b471eb966869c7078d6f8f824417f04 Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Thu, 23 May 2024 14:56:11 +0300
Subject: [PATCH 1/4] [clang] Introduce target-specific `Sema` components
---
clang/include/clang/Sema/Sema.h | 218 +-
clang/include/clang/Sema/SemaAMDGPU.h | 68 +
clang/include/clang/Sema/SemaARM.h | 62 +
clang/include/clang/Sema/SemaBPF.h | 28 +
clang/include/clang/Sema/SemaHexagon.h | 29 +
clang/include/clang/Sema/SemaLoongArch.h | 30 +
clang/include/clang/Sema/SemaMIPS.h | 33 +
clang/include/clang/Sema/SemaNVPTX.h | 31 +
clang/include/clang/Sema/SemaPPC.h | 58 +
clang/include/clang/Sema/SemaSystemZ.h | 28 +
clang/include/clang/Sema/SemaWasm.h | 52 +
clang/lib/Parse/ParseOpenMP.cpp | 5 +-
clang/lib/Sema/CMakeLists.txt | 10 +
clang/lib/Sema/Sema.cpp | 20 +
clang/lib/Sema/SemaAMDGPU.cpp | 284 ++
clang/lib/Sema/SemaARM.cpp | 1074 ++++++
clang/lib/Sema/SemaBPF.cpp | 174 +
clang/lib/Sema/SemaChecking.cpp | 3009 +----------------
clang/lib/Sema/SemaDecl.cpp | 14 +-
clang/lib/Sema/SemaDeclAttr.cpp | 427 +--
clang/lib/Sema/SemaExprCXX.cpp | 3 +-
clang/lib/Sema/SemaHexagon.cpp | 289 ++
clang/lib/Sema/SemaLoongArch.cpp | 515 +++
clang/lib/Sema/SemaMIPS.cpp | 239 ++
clang/lib/Sema/SemaNVPTX.cpp | 35 +
clang/lib/Sema/SemaPPC.cpp | 433 +++
clang/lib/Sema/SemaSystemZ.cpp | 94 +
.../lib/Sema/SemaTemplateInstantiateDecl.cpp | 7 +-
clang/lib/Sema/SemaWasm.cpp | 337 ++
clang/utils/TableGen/MveEmitter.cpp | 4 +-
30 files changed, 4142 insertions(+), 3468 deletions(-)
create mode 100644 clang/include/clang/Sema/SemaAMDGPU.h
create mode 100644 clang/include/clang/Sema/SemaARM.h
create mode 100644 clang/include/clang/Sema/SemaBPF.h
create mode 100644 clang/include/clang/Sema/SemaHexagon.h
create mode 100644 clang/include/clang/Sema/SemaLoongArch.h
create mode 100644 clang/include/clang/Sema/SemaMIPS.h
create mode 100644 clang/include/clang/Sema/SemaNVPTX.h
create mode 100644 clang/include/clang/Sema/SemaPPC.h
create mode 100644 clang/include/clang/Sema/SemaSystemZ.h
create mode 100644 clang/include/clang/Sema/SemaWasm.h
create mode 100644 clang/lib/Sema/SemaAMDGPU.cpp
create mode 100644 clang/lib/Sema/SemaARM.cpp
create mode 100644 clang/lib/Sema/SemaBPF.cpp
create mode 100644 clang/lib/Sema/SemaHexagon.cpp
create mode 100644 clang/lib/Sema/SemaLoongArch.cpp
create mode 100644 clang/lib/Sema/SemaMIPS.cpp
create mode 100644 clang/lib/Sema/SemaNVPTX.cpp
create mode 100644 clang/lib/Sema/SemaPPC.cpp
create mode 100644 clang/lib/Sema/SemaSystemZ.cpp
create mode 100644 clang/lib/Sema/SemaWasm.cpp
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 057ff61ccc644..3ba80ed144436 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -67,6 +67,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
@@ -168,15 +169,25 @@ class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
+class SemaAMDGPU;
+class SemaARM;
+class SemaBPF;
class SemaCodeCompletion;
class SemaCUDA;
class SemaHLSL;
+class SemaHexagon;
+class SemaLoongArch;
+class SemaMIPS;
+class SemaNVPTX;
class SemaObjC;
class SemaOpenACC;
class SemaOpenMP;
+class SemaPPC;
class SemaPseudoObject;
class SemaRISCV;
class SemaSYCL;
+class SemaSystemZ;
+class SemaWasm;
class SemaX86;
class StandardConversionSequence;
class Stmt;
@@ -993,6 +1004,21 @@ class Sema final : public SemaBase {
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
+ SemaAMDGPU &AMDGPU() {
+ assert(AMDGPUPtr);
+ return *AMDGPUPtr;
+ }
+
+ SemaARM &ARM() {
+ assert(ARMPtr);
+ return *ARMPtr;
+ }
+
+ SemaBPF &BPF() {
+ assert(BPFPtr);
+ return *BPFPtr;
+ }
+
SemaCodeCompletion &CodeCompletion() {
assert(CodeCompletionPtr);
return *CodeCompletionPtr;
@@ -1008,6 +1034,26 @@ class Sema final : public SemaBase {
return *HLSLPtr;
}
+ SemaHexagon &Hexagon() {
+ assert(HexagonPtr);
+ return *HexagonPtr;
+ }
+
+ SemaLoongArch &LoongArch() {
+ assert(LoongArchPtr);
+ return *LoongArchPtr;
+ }
+
+ SemaMIPS &MIPS() {
+ assert(MIPSPtr);
+ return *MIPSPtr;
+ }
+
+ SemaNVPTX &NVPTX() {
+ assert(NVPTXPtr);
+ return *NVPTXPtr;
+ }
+
SemaObjC &ObjC() {
assert(ObjCPtr);
return *ObjCPtr;
@@ -1023,6 +1069,11 @@ class Sema final : public SemaBase {
return *OpenMPPtr;
}
+ SemaPPC &PPC() {
+ assert(PPCPtr);
+ return *PPCPtr;
+ }
+
SemaPseudoObject &PseudoObject() {
assert(PseudoObjectPtr);
return *PseudoObjectPtr;
@@ -1038,6 +1089,16 @@ class Sema final : public SemaBase {
return *SYCLPtr;
}
+ SemaSystemZ &SystemZ() {
+ assert(SystemZPtr);
+ return *SystemZPtr;
+ }
+
+ SemaWasm &Wasm() {
+ assert(WasmPtr);
+ return *WasmPtr;
+ }
+
SemaX86 &X86() {
assert(X86Ptr);
return *X86Ptr;
@@ -1073,15 +1134,25 @@ class Sema final : public SemaBase {
mutable IdentifierInfo *Ident_super;
+ std::unique_ptr<SemaAMDGPU> AMDGPUPtr;
+ std::unique_ptr<SemaARM> ARMPtr;
+ std::unique_ptr<SemaBPF> BPFPtr;
std::unique_ptr<SemaCodeCompletion> CodeCompletionPtr;
std::unique_ptr<SemaCUDA> CUDAPtr;
std::unique_ptr<SemaHLSL> HLSLPtr;
+ std::unique_ptr<SemaHexagon> HexagonPtr;
+ std::unique_ptr<SemaLoongArch> LoongArchPtr;
+ std::unique_ptr<SemaMIPS> MIPSPtr;
+ std::unique_ptr<SemaNVPTX> NVPTXPtr;
std::unique_ptr<SemaObjC> ObjCPtr;
std::unique_ptr<SemaOpenACC> OpenACCPtr;
std::unique_ptr<SemaOpenMP> OpenMPPtr;
+ std::unique_ptr<SemaPPC> PPCPtr;
std::unique_ptr<SemaPseudoObject> PseudoObjectPtr;
std::unique_ptr<SemaRISCV> RISCVPtr;
std::unique_ptr<SemaSYCL> SYCLPtr;
+ std::unique_ptr<SemaSystemZ> SystemZPtr;
+ std::unique_ptr<SemaWasm> WasmPtr;
std::unique_ptr<SemaX86> X86Ptr;
///@}
@@ -2073,6 +2144,8 @@ class Sema final : public SemaBase {
bool checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
unsigned MaxArgCount);
bool checkArgCount(CallExpr *Call, unsigned DesiredArgCount);
+
+ bool ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
@@ -2087,8 +2160,6 @@ class Sema final : public SemaBase {
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
- void checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg);
-
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
@@ -2102,54 +2173,13 @@ class Sema final : public SemaBase {
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
- bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
- unsigned MaxWidth);
- bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool ParseSVEImmChecks(CallExpr *TheCall,
- SmallVector<std::tuple<int, int, int>, 3> &ImmChecks);
- bool CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
- bool WantCDE);
- bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
-
- bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
-
- bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall);
- bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall);
- bool CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
-
bool BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool BuiltinVAStartARMMicrosoft(CallExpr *Call);
bool BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID);
bool BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
unsigned BuiltinID);
bool BuiltinComplex(CallExpr *TheCall);
- bool BuiltinVSX(CallExpr *TheCall);
bool BuiltinOSLogFormat(CallExpr *TheCall);
- bool ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
bool BuiltinPrefetch(CallExpr *TheCall);
bool BuiltinAllocaWithAlign(CallExpr *TheCall);
@@ -2162,13 +2192,6 @@ class Sema final : public SemaBase {
ExprResult BuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult AtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
- bool BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum,
- unsigned ExpectedFieldNum, bool AllowName);
- bool BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
- bool BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
- const char *TypeDesc);
-
- bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool BuiltinElementwiseMath(CallExpr *TheCall);
bool BuiltinElementwiseTernaryMath(CallExpr *TheCall,
@@ -2185,16 +2208,6 @@ class Sema final : public SemaBase {
ExprResult BuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
- // WebAssembly builtin handling.
- bool BuiltinWasmRefNullExtern(CallExpr *TheCall);
- bool BuiltinWasmRefNullFunc(CallExpr *TheCall);
- bool BuiltinWasmTableGet(CallExpr *TheCall);
- bool BuiltinWasmTableSet(CallExpr *TheCall);
- bool BuiltinWasmTableSize(CallExpr *TheCall);
- bool BuiltinWasmTableGrow(CallExpr *TheCall);
- bool BuiltinWasmTableFill(CallExpr *TheCall);
- bool BuiltinWasmTableCopy(CallExpr *TheCall);
-
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args, bool IsCXXMember,
VariadicCallType CallType, SourceLocation Loc,
@@ -3548,6 +3561,56 @@ class Sema final : public SemaBase {
BuiltinFunction
};
+ /// A helper function to provide Attribute Location for the Attr types
+ /// AND the ParsedAttr.
+ template <typename AttrInfo>
+ static std::enable_if_t<std::is_base_of_v<Attr, AttrInfo>, SourceLocation>
+ getAttrLoc(const AttrInfo &AL) {
+ return AL.getLocation();
+ }
+ SourceLocation getAttrLoc(const ParsedAttr &AL);
+
+ /// If Expr is a valid integer constant, get the value of the integer
+ /// expression and return success or failure. May output an error.
+ ///
+ /// Negative argument is implicitly converted to unsigned, unless
+ /// \p StrictlyUnsigned is true.
+ template <typename AttrInfo>
+ bool checkUInt32Argument(const AttrInfo &AI, const Expr *Expr,
+ uint32_t &Val, unsigned Idx = UINT_MAX,
+ bool StrictlyUnsigned = false) {
+ std::optional<llvm::APSInt> I = llvm::APSInt(32);
+ if (Expr->isTypeDependent() ||
+ !(I = Expr->getIntegerConstantExpr(Context))) {
+ if (Idx != UINT_MAX)
+ Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
+ << &AI << Idx << AANT_ArgumentIntegerConstant
+ << Expr->getSourceRange();
+ else
+ Diag(getAttrLoc(AI), diag::err_attribute_argument_type)
+ << &AI << AANT_ArgumentIntegerConstant << Expr->getSourceRange();
+ return false;
+ }
+
+ if (!I->isIntN(32)) {
+ Diag(Expr->getExprLoc(), diag::err_ice_too_large)
+ << toString(*I, 10, false) << 32 << /* Unsigned */ 1;
+ return false;
+ }
+
+ if (StrictlyUnsigned && I->isSigned() && I->isNegative()) {
+ Diag(getAttrLoc(AI), diag::err_attribute_requires_positive_integer)
+ << &AI << /*non-negative*/ 1;
+ return false;
+ }
+
+ Val = (uint32_t)I->getZExtValue();
+ return true;
+ }
+
+ bool isFunctionOrMethod(const Decl *D);
+ bool isFunctionOrMethodOrBlock(const Decl *D);
+
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
@@ -3705,41 +3768,6 @@ class Sema final : public SemaBase {
BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
- WebAssemblyImportNameAttr *
- mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL);
- WebAssemblyImportModuleAttr *
- mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL);
-
- /// Create an AMDGPUWavesPerEUAttr attribute.
- AMDGPUFlatWorkGroupSizeAttr *
- CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, Expr *Min,
- Expr *Max);
-
- /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
- /// attribute to a particular declaration.
- void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *Min, Expr *Max);
-
- /// Create an AMDGPUWavesPerEUAttr attribute.
- AMDGPUWavesPerEUAttr *
- CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *Min,
- Expr *Max);
-
- /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
- /// particular declaration.
- void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *Min, Expr *Max);
-
- /// Create an AMDGPUMaxNumWorkGroupsAttr attribute.
- AMDGPUMaxNumWorkGroupsAttr *
- CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI, Expr *XExpr,
- Expr *YExpr, Expr *ZExpr);
-
- /// addAMDGPUMaxNumWorkGroupsAttr - Adds an amdgpu_max_num_work_groups
- /// attribute to a particular declaration.
- void addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *XExpr, Expr *YExpr, Expr *ZExpr);
-
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
diff --git a/clang/include/clang/Sema/SemaAMDGPU.h b/clang/include/clang/Sema/SemaAMDGPU.h
new file mode 100644
index 0000000000000..271475323dfeb
--- /dev/null
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -0,0 +1,68 @@
+//===----- SemaAMDGPU.h --- AMDGPU target-specific routines ---*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to AMDGPU.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAAMDGPU_H
+#define LLVM_CLANG_SEMA_SEMAAMDGPU_H
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/AttributeCommonInfo.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaAMDGPU : public SemaBase {
+public:
+ SemaAMDGPU(Sema &S);
+
+ bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+
+ /// Create an AMDGPUWavesPerEUAttr attribute.
+ AMDGPUFlatWorkGroupSizeAttr *
+ CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, Expr *Min,
+ Expr *Max);
+
+ /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
+ /// attribute to a particular declaration.
+ void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *Min, Expr *Max);
+
+ /// Create an AMDGPUWavesPerEUAttr attribute.
+ AMDGPUWavesPerEUAttr *
+ CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *Min,
+ Expr *Max);
+
+ /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
+ /// particular declaration.
+ void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *Min, Expr *Max);
+
+ /// Create an AMDGPUMaxNumWorkGroupsAttr attribute.
+ AMDGPUMaxNumWorkGroupsAttr *
+ CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI, Expr *XExpr,
+ Expr *YExpr, Expr *ZExpr);
+
+ /// addAMDGPUMaxNumWorkGroupsAttr - Adds an amdgpu_max_num_work_groups
+ /// attribute to a particular declaration.
+ void addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *XExpr, Expr *YExpr, Expr *ZExpr);
+
+ void handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL);
+ void handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL);
+ void handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL);
+ void handleAMDGPUMaxNumWorkGroupsAttr(Decl *D, const ParsedAttr &AL);
+ void handleAMDGPUFlatWorkGroupSizeAttr(Decl *D, const ParsedAttr &AL);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAAMDGPU_H
diff --git a/clang/include/clang/Sema/SemaARM.h b/clang/include/clang/Sema/SemaARM.h
new file mode 100644
index 0000000000000..f7f29067ce320
--- /dev/null
+++ b/clang/include/clang/Sema/SemaARM.h
@@ -0,0 +1,62 @@
+//===----- SemaARM.h ------- ARM target-specific routines -----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to ARM.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAARM_H
+#define LLVM_CLANG_SEMA_SEMAARM_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaBase.h"
+#include "llvm/ADT/SmallVector.h"
+#include <tuple>
+
+namespace clang {
+
+class SemaARM : public SemaBase {
+public:
+ SemaARM(Sema &S);
+
+ enum ArmStreamingType {
+ ArmNonStreaming,
+ ArmStreaming,
+ ArmStreamingCompatible,
+ ArmStreamingOrSVE2p1
+ };
+
+ bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
+ unsigned MaxWidth);
+ bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool ParseSVEImmChecks(CallExpr *TheCall,
+ llvm::SmallVector<std::tuple<int, int, int>, 3> &ImmChecks);
+ bool CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
+ bool WantCDE);
+ bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+
+ bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum,
+ unsigned ExpectedFieldNum, bool AllowName);
+ bool BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
+};
+
+SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD);
+
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAARM_H
diff --git a/clang/include/clang/Sema/SemaBPF.h b/clang/include/clang/Sema/SemaBPF.h
new file mode 100644
index 0000000000000..a3bf59128d254
--- /dev/null
+++ b/clang/include/clang/Sema/SemaBPF.h
@@ -0,0 +1,28 @@
+//===----- SemaBPF.h ------- BPF target-specific routines -----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to BPF.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMABPF_H
+#define LLVM_CLANG_SEMA_SEMABPF_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaBPF : public SemaBase {
+public:
+ SemaBPF(Sema &S);
+
+ bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMABPF_H
diff --git a/clang/include/clang/Sema/SemaHexagon.h b/clang/include/clang/Sema/SemaHexagon.h
new file mode 100644
index 0000000000000..2d4a04f824bc2
--- /dev/null
+++ b/clang/include/clang/Sema/SemaHexagon.h
@@ -0,0 +1,29 @@
+//===----- SemaHexagon.h -- Hexagon target-specific routines --*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to Hexagon.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAHEXAGON_H
+#define LLVM_CLANG_SEMA_SEMAHEXAGON_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaHexagon : public SemaBase {
+public:
+ SemaHexagon(Sema &S);
+
+ bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAHEXAGON_H
diff --git a/clang/include/clang/Sema/SemaLoongArch.h b/clang/include/clang/Sema/SemaLoongArch.h
new file mode 100644
index 0000000000000..aef0df9e8710f
--- /dev/null
+++ b/clang/include/clang/Sema/SemaLoongArch.h
@@ -0,0 +1,30 @@
+//===-- SemaLoongArch.h -- LoongArch target-specific routines --*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to LoongArch.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMALOONGARCH_H
+#define LLVM_CLANG_SEMA_SEMALOONGARCH_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaLoongArch : public SemaBase {
+public:
+ SemaLoongArch(Sema &S);
+
+ bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMALOONGARCH_H
diff --git a/clang/include/clang/Sema/SemaMIPS.h b/clang/include/clang/Sema/SemaMIPS.h
new file mode 100644
index 0000000000000..3f1781b36efd9
--- /dev/null
+++ b/clang/include/clang/Sema/SemaMIPS.h
@@ -0,0 +1,33 @@
+//===----- SemaMIPS.h ------ MIPS target-specific routines ----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to MIPS.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAMIPS_H
+#define LLVM_CLANG_SEMA_SEMAMIPS_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaMIPS : public SemaBase {
+public:
+ SemaMIPS(Sema &S);
+
+ bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAMIPS_H
diff --git a/clang/include/clang/Sema/SemaNVPTX.h b/clang/include/clang/Sema/SemaNVPTX.h
new file mode 100644
index 0000000000000..ef1e701824b09
--- /dev/null
+++ b/clang/include/clang/Sema/SemaNVPTX.h
@@ -0,0 +1,31 @@
+//===----- SemaNVPTX.h ----- NVPTX target-specific routines ---*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to NVPTX.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMANVPTX_H
+#define LLVM_CLANG_SEMA_SEMANVPTX_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaNVPTX : public SemaBase {
+public:
+ SemaNVPTX(Sema &S);
+
+
+ bool CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMANVPTX_H
diff --git a/clang/include/clang/Sema/SemaPPC.h b/clang/include/clang/Sema/SemaPPC.h
new file mode 100644
index 0000000000000..394a50bedc3e4
--- /dev/null
+++ b/clang/include/clang/Sema/SemaPPC.h
@@ -0,0 +1,58 @@
+//===----- SemaPPC.h ------- PPC target-specific routines -----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to PowerPC.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAPPC_H
+#define LLVM_CLANG_SEMA_SEMAPPC_H
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaPPC : public SemaBase {
+public:
+ SemaPPC(Sema &S);
+
+ bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ // 16 byte ByVal alignment not due to a vector member is not honoured by XL
+ // on AIX. Emit a warning here that users are generating binary incompatible
+ // code to be safe.
+ // Here we try to get information about the alignment of the struct member
+ // from the struct passed to the caller function. We only warn when the struct
+ // is passed byval, hence the series of checks and early returns if we are a not
+ // passing a struct byval.
+ void checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg);
+
+ /// BuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
+ /// Emit an error and return true on failure; return false on success.
+ /// TypeStr is a string containing the type descriptor of the value returned by
+ /// the builtin and the descriptors of the expected type of the arguments.
+ bool BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
+ const char *TypeDesc);
+
+ bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
+
+ // Customized Sema Checking for VSX builtins that have the following signature:
+ // vector [...] builtinName(vector [...], vector [...], const int);
+ // Which takes the same type of vectors (any legal vector type) for the first
+ // two arguments and takes compile time constant for the third argument.
+ // Example builtins are :
+ // vector double vec_xxpermdi(vector double, vector double, int);
+ // vector short vec_xxsldwi(vector short, vector short, int);
+ bool BuiltinVSX(CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAPPC_H
diff --git a/clang/include/clang/Sema/SemaSystemZ.h b/clang/include/clang/Sema/SemaSystemZ.h
new file mode 100644
index 0000000000000..8945471d53d63
--- /dev/null
+++ b/clang/include/clang/Sema/SemaSystemZ.h
@@ -0,0 +1,28 @@
+//===----- SemaSystemZ.h -- SystemZ target-specific routines --*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to SystemZ.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMASYSTEMZ_H
+#define LLVM_CLANG_SEMA_SEMASYSTEMZ_H
+
+#include "clang/AST/Expr.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaSystemZ : public SemaBase {
+public:
+ SemaSystemZ(Sema &S);
+
+ bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMASYSTEMZ_H
diff --git a/clang/include/clang/Sema/SemaWasm.h b/clang/include/clang/Sema/SemaWasm.h
new file mode 100644
index 0000000000000..c3c781535024a
--- /dev/null
+++ b/clang/include/clang/Sema/SemaWasm.h
@@ -0,0 +1,52 @@
+//===----- SemaWasm.h ------ Wasm target-specific routines ----*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to Wasm.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAWASM_H
+#define LLVM_CLANG_SEMA_SEMAWASM_H
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+class SemaWasm : public SemaBase {
+public:
+ SemaWasm(Sema &S);
+
+ bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall);
+
+ bool BuiltinWasmRefNullExtern(CallExpr *TheCall);
+ bool BuiltinWasmRefNullFunc(CallExpr *TheCall);
+ bool BuiltinWasmTableGet(CallExpr *TheCall);
+ bool BuiltinWasmTableSet(CallExpr *TheCall);
+ bool BuiltinWasmTableSize(CallExpr *TheCall);
+ bool BuiltinWasmTableGrow(CallExpr *TheCall);
+ bool BuiltinWasmTableFill(CallExpr *TheCall);
+ bool BuiltinWasmTableCopy(CallExpr *TheCall);
+
+ WebAssemblyImportNameAttr *
+ mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL);
+ WebAssemblyImportModuleAttr *
+ mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL);
+
+ void handleWebAssemblyExportNameAttr(Decl *D, const ParsedAttr &AL);
+ void handleWebAssemblyImportModuleAttr(Decl *D, const ParsedAttr &AL);
+ void handleWebAssemblyImportNameAttr(Decl *D, const ParsedAttr &AL);
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAWASM_H
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index e959dd6378f46..a0f3735abc764 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -21,6 +21,7 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaAMDGPU.h"
#include "clang/Sema/SemaCodeCompletion.h"
#include "clang/Sema/SemaOpenMP.h"
#include "llvm/ADT/PointerIntPair.h"
@@ -3758,7 +3759,7 @@ OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) {
case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
if (!PA.checkExactlyNumArgs(Actions, 2))
continue;
- if (auto *A = Actions.CreateAMDGPUFlatWorkGroupSizeAttr(
+ if (auto *A = Actions.AMDGPU().CreateAMDGPUFlatWorkGroupSizeAttr(
PA, PA.getArgAsExpr(0), PA.getArgAsExpr(1)))
Attrs.push_back(A);
continue;
@@ -3766,7 +3767,7 @@ OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) {
if (!PA.checkAtLeastNumArgs(Actions, 1) ||
!PA.checkAtMostNumArgs(Actions, 2))
continue;
- if (auto *A = Actions.CreateAMDGPUWavesPerEUAttr(
+ if (auto *A = Actions.AMDGPU().CreateAMDGPUWavesPerEUAttr(
PA, PA.getArgAsExpr(0),
PA.getNumArgs() > 1 ? PA.getArgAsExpr(1) : nullptr))
Attrs.push_back(A);
diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt
index fe6471c81ff01..c9abf58fcbd29 100644
--- a/clang/lib/Sema/CMakeLists.txt
+++ b/clang/lib/Sema/CMakeLists.txt
@@ -26,10 +26,13 @@ add_clang_library(clangSema
Scope.cpp
ScopeInfo.cpp
Sema.cpp
+ SemaAMDGPU.cpp
+ SemaARM.cpp
SemaAccess.cpp
SemaAttr.cpp
SemaAPINotes.cpp
SemaAvailability.cpp
+ SemaBPF.cpp
SemaBase.cpp
SemaCXXScopeSpec.cpp
SemaCast.cpp
@@ -50,27 +53,34 @@ add_clang_library(clangSema
SemaExprObjC.cpp
SemaFixItUtils.cpp
SemaHLSL.cpp
+ SemaHexagon.cpp
SemaInit.cpp
SemaLambda.cpp
SemaLookup.cpp
+ SemaLoongArch.cpp
+ SemaMIPS.cpp
SemaModule.cpp
+ SemaNVPTX.cpp
SemaObjC.cpp
SemaObjCProperty.cpp
SemaOpenACC.cpp
SemaOpenMP.cpp
SemaOverload.cpp
+ SemaPPC.cpp
SemaPseudoObject.cpp
SemaRISCV.cpp
SemaStmt.cpp
SemaStmtAsm.cpp
SemaStmtAttr.cpp
SemaSYCL.cpp
+ SemaSystemZ.cpp
SemaTemplate.cpp
SemaTemplateDeduction.cpp
SemaTemplateInstantiate.cpp
SemaTemplateInstantiateDecl.cpp
SemaTemplateVariadic.cpp
SemaType.cpp
+ SemaWasm.cpp
SemaX86.cpp
TypeLocBuilder.cpp
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index d1fb21bb1ae1d..a96bf75f06ddf 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -41,17 +41,27 @@
#include "clang/Sema/RISCVIntrinsicManager.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaARM.h"
+#include "clang/Sema/SemaBPF.h"
#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaCodeCompletion.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/SemaHLSL.h"
+#include "clang/Sema/SemaHexagon.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaLoongArch.h"
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Sema/SemaNVPTX.h"
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenACC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPPC.h"
#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/SemaSYCL.h"
+#include "clang/Sema/SemaSystemZ.h"
+#include "clang/Sema/SemaWasm.h"
#include "clang/Sema/SemaX86.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -206,16 +216,26 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
CurScope(nullptr), Ident_super(nullptr),
+ AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)),
+ ARMPtr(std::make_unique<SemaARM>(*this)),
+ BPFPtr(std::make_unique<SemaBPF>(*this)),
CodeCompletionPtr(
std::make_unique<SemaCodeCompletion>(*this, CodeCompleter)),
CUDAPtr(std::make_unique<SemaCUDA>(*this)),
HLSLPtr(std::make_unique<SemaHLSL>(*this)),
+ HexagonPtr(std::make_unique<SemaHexagon>(*this)),
+ LoongArchPtr(std::make_unique<SemaLoongArch>(*this)),
+ MIPSPtr(std::make_unique<SemaMIPS>(*this)),
+ NVPTXPtr(std::make_unique<SemaNVPTX>(*this)),
ObjCPtr(std::make_unique<SemaObjC>(*this)),
OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
+ PPCPtr(std::make_unique<SemaPPC>(*this)),
PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
RISCVPtr(std::make_unique<SemaRISCV>(*this)),
SYCLPtr(std::make_unique<SemaSYCL>(*this)),
+ SystemZPtr(std::make_unique<SemaSystemZ>(*this)),
+ WasmPtr(std::make_unique<SemaWasm>(*this)),
X86Ptr(std::make_unique<SemaX86>(*this)),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
new file mode 100644
index 0000000000000..0c101e6a5c00c
--- /dev/null
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -0,0 +1,284 @@
+//===------ SemaAMDGPU.cpp ------- AMDGPU target-specific routines --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to AMDGPU.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include <cstdint>
+
+namespace clang {
+
+SemaAMDGPU::SemaAMDGPU(Sema &S) : SemaBase(S) {}
+
+bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // position of memory order and scope arguments in the builtin
+ unsigned OrderIndex, ScopeIndex;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_get_fpenv:
+ case AMDGPU::BI__builtin_amdgcn_set_fpenv:
+ return false;
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
+ case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
+ OrderIndex = 2;
+ ScopeIndex = 3;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_fence:
+ OrderIndex = 0;
+ ScopeIndex = 1;
+ break;
+ default:
+ return false;
+ }
+
+ ExprResult Arg = TheCall->getArg(OrderIndex);
+ auto ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult;
+
+ if (!ArgExpr->EvaluateAsInt(ArgResult, getASTContext()))
+ return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
+ << ArgExpr->getType();
+ auto Ord = ArgResult.Val.getInt().getZExtValue();
+
+ // Check validity of memory ordering as per C11 / C++11's memody model.
+ // Only fence needs check. Atomic dec/inc allow all memory orders.
+ if (!llvm::isValidAtomicOrderingCABI(Ord))
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << 0 << ArgExpr->getSourceRange();
+ switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
+ case llvm::AtomicOrderingCABI::relaxed:
+ case llvm::AtomicOrderingCABI::consume:
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << 0 << ArgExpr->getSourceRange();
+ break;
+ case llvm::AtomicOrderingCABI::acquire:
+ case llvm::AtomicOrderingCABI::release:
+ case llvm::AtomicOrderingCABI::acq_rel:
+ case llvm::AtomicOrderingCABI::seq_cst:
+ break;
+ }
+
+ Arg = TheCall->getArg(ScopeIndex);
+ ArgExpr = Arg.get();
+ Expr::EvalResult ArgResult1;
+ // Check that sync scope is a constant literal
+ if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, getASTContext()))
+ return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
+ << ArgExpr->getType();
+
+ return false;
+}
+
+static bool
+checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
+ const AMDGPUFlatWorkGroupSizeAttr &Attr) {
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
+ return false;
+
+ uint32_t Min = 0;
+ if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
+ return true;
+
+ uint32_t Max = 0;
+ if (!S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
+ return true;
+
+ if (Min == 0 && Max != 0) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 0;
+ return true;
+ }
+ if (Min > Max) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 1;
+ return true;
+ }
+
+ return false;
+}
+
+AMDGPUFlatWorkGroupSizeAttr *
+SemaAMDGPU::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ ASTContext &Context = getASTContext();
+ AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
+
+ if (checkAMDGPUFlatWorkGroupSizeArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
+ return nullptr;
+ return ::new (Context)
+ AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
+}
+
+void SemaAMDGPU::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
+ const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
+ D->addAttr(Attr);
+}
+
+void SemaAMDGPU::handleAMDGPUFlatWorkGroupSizeAttr(Decl *D, const ParsedAttr &AL) {
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ Expr *MaxExpr = AL.getArgAsExpr(1);
+
+ addAMDGPUFlatWorkGroupSizeAttr(D, AL, MinExpr, MaxExpr);
+}
+
+static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
+ Expr *MaxExpr,
+ const AMDGPUWavesPerEUAttr &Attr) {
+ if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
+ (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
+ return true;
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
+ return false;
+
+ uint32_t Min = 0;
+ if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
+ return true;
+
+ uint32_t Max = 0;
+ if (MaxExpr && !S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
+ return true;
+
+ if (Min == 0 && Max != 0) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 0;
+ return true;
+ }
+ if (Max != 0 && Min > Max) {
+ S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
+ << &Attr << 1;
+ return true;
+ }
+
+ return false;
+}
+
+AMDGPUWavesPerEUAttr *
+SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *MinExpr,
+ Expr *MaxExpr) {
+ ASTContext &Context = getASTContext();
+ AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
+
+ if (checkAMDGPUWavesPerEUArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
+ return nullptr;
+
+ return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
+}
+
+void SemaAMDGPU::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
+ D->addAttr(Attr);
+}
+
+void SemaAMDGPU::handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL) {
+ if (!AL.checkAtLeastNumArgs(SemaRef, 1) || !AL.checkAtMostNumArgs(SemaRef, 2))
+ return;
+
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
+
+ addAMDGPUWavesPerEUAttr(D, AL, MinExpr, MaxExpr);
+}
+
+void SemaAMDGPU::handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL) {
+ uint32_t NumSGPR = 0;
+ Expr *NumSGPRExpr = AL.getArgAsExpr(0);
+ if (!SemaRef.checkUInt32Argument(AL, NumSGPRExpr, NumSGPR))
+ return;
+
+ D->addAttr(::new (getASTContext()) AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
+}
+
+void SemaAMDGPU::handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL) {
+ uint32_t NumVGPR = 0;
+ Expr *NumVGPRExpr = AL.getArgAsExpr(0);
+ if (!SemaRef.checkUInt32Argument(AL, NumVGPRExpr, NumVGPR))
+ return;
+
+ D->addAttr(::new (getASTContext()) AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
+}
+
+static bool
+checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr,
+ Expr *ZExpr,
+ const AMDGPUMaxNumWorkGroupsAttr &Attr) {
+ if (S.DiagnoseUnexpandedParameterPack(XExpr) ||
+ (YExpr && S.DiagnoseUnexpandedParameterPack(YExpr)) ||
+ (ZExpr && S.DiagnoseUnexpandedParameterPack(ZExpr)))
+ return true;
+
+ // Accept template arguments for now as they depend on something else.
+ // We'll get to check them when they eventually get instantiated.
+ if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) ||
+ (ZExpr && ZExpr->isValueDependent()))
+ return false;
+
+ uint32_t NumWG = 0;
+ Expr *Exprs[3] = {XExpr, YExpr, ZExpr};
+ for (int i = 0; i < 3; i++) {
+ if (Exprs[i]) {
+ if (!S.checkUInt32Argument(Attr, Exprs[i], NumWG, i,
+ /*StrictlyUnsigned=*/true))
+ return true;
+ if (NumWG == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
+ << &Attr << Exprs[i]->getSourceRange();
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+AMDGPUMaxNumWorkGroupsAttr *
+SemaAMDGPU::CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI,
+ Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
+ ASTContext &Context = getASTContext();
+ AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
+
+ if (checkAMDGPUMaxNumWorkGroupsArguments(SemaRef, XExpr, YExpr, ZExpr, TmpAttr))
+ return nullptr;
+
+ return ::new (Context)
+ AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
+}
+
+void SemaAMDGPU::addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *XExpr, Expr *YExpr,
+ Expr *ZExpr) {
+ if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
+ D->addAttr(Attr);
+}
+
+void SemaAMDGPU::handleAMDGPUMaxNumWorkGroupsAttr(Decl *D, const ParsedAttr &AL) {
+ Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
+ Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(2) : nullptr;
+ addAMDGPUMaxNumWorkGroupsAttr(D, AL, AL.getArgAsExpr(0), YExpr, ZExpr);
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaARM.cpp b/clang/lib/Sema/SemaARM.cpp
new file mode 100644
index 0000000000000..ba40b391235f7
--- /dev/null
+++ b/clang/lib/Sema/SemaARM.cpp
@@ -0,0 +1,1074 @@
+//===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to ARM.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaARM.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
+
+/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
+bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+
+ if (BuiltinID == AArch64::BI__builtin_arm_irg) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ ExprResult SecArg = SemaRef.DefaultLvalueConversion(Arg1);
+ if (SecArg.isInvalid())
+ return true;
+ QualType SecArgType = SecArg.get()->getType();
+ if (!SecArgType->isIntegerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
+ << "second" << SecArgType << Arg1->getSourceRange();
+
+ // Derive the return type from the pointer argument.
+ TheCall->setType(FirstArgType);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_addg) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ Expr *Arg0 = TheCall->getArg(0);
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ // Derive the return type from the pointer argument.
+ TheCall->setType(FirstArgType);
+
+ // Second arg must be an constant in range [0,15]
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ Expr *Arg1 = TheCall->getArg(1);
+
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+
+ QualType SecArgType = Arg1->getType();
+ if (!SecArgType->isIntegerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
+ << "second" << SecArgType << Arg1->getSourceRange();
+ TheCall->setType(Context.IntTy);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
+ BuiltinID == AArch64::BI__builtin_arm_stg) {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+ Expr *Arg0 = TheCall->getArg(0);
+ ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
+ if (FirstArg.isInvalid())
+ return true;
+
+ QualType FirstArgType = FirstArg.get()->getType();
+ if (!FirstArgType->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
+ << "first" << FirstArgType << Arg0->getSourceRange();
+ TheCall->setArg(0, FirstArg.get());
+
+ // Derive the return type from the pointer argument.
+ if (BuiltinID == AArch64::BI__builtin_arm_ldg)
+ TheCall->setType(FirstArgType);
+ return false;
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_subp) {
+ Expr *ArgA = TheCall->getArg(0);
+ Expr *ArgB = TheCall->getArg(1);
+
+ ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(ArgA);
+ ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(ArgB);
+
+ if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
+ return true;
+
+ QualType ArgTypeA = ArgExprA.get()->getType();
+ QualType ArgTypeB = ArgExprB.get()->getType();
+
+ auto isNull = [&] (Expr *E) -> bool {
+ return E->isNullPointerConstant(
+ Context, Expr::NPC_ValueDependentIsNotNull); };
+
+ // argument should be either a pointer or null
+ if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
+ << "first" << ArgTypeA << ArgA->getSourceRange();
+
+ if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
+ << "second" << ArgTypeB << ArgB->getSourceRange();
+
+ // Ensure Pointee types are compatible
+ if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
+ ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
+ QualType pointeeA = ArgTypeA->getPointeeType();
+ QualType pointeeB = ArgTypeB->getPointeeType();
+ if (!Context.typesAreCompatible(
+ Context.getCanonicalType(pointeeA).getUnqualifiedType(),
+ Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
+ return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
+ << ArgB->getSourceRange();
+ }
+ }
+
+ // at least one argument should be pointer type
+ if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
+ return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
+
+ if (isNull(ArgA)) // adopt type of the other pointer
+ ArgExprA = SemaRef.ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
+
+ if (isNull(ArgB))
+ ArgExprB = SemaRef.ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
+
+ TheCall->setArg(0, ArgExprA.get());
+ TheCall->setArg(1, ArgExprB.get());
+ TheCall->setType(Context.LongLongTy);
+ return false;
+ }
+ assert(false && "Unhandled ARM MTE intrinsic");
+ return true;
+}
+
+/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
+/// TheCall is an ARM/AArch64 special register string literal.
+bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
+ int ArgNum, unsigned ExpectedFieldNum,
+ bool AllowName) {
+ bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp;
+ bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp;
+ assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check if the argument is a string literal.
+ if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
+ return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
+ << Arg->getSourceRange();
+
+ // Check the type of special register given.
+ StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
+ SmallVector<StringRef, 6> Fields;
+ Reg.split(Fields, ":");
+
+ if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
+ return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
+ << Arg->getSourceRange();
+
+ // If the string is the name of a register then we cannot check that it is
+ // valid here but if the string is of one the forms described in ACLE then we
+ // can check that the supplied fields are integers and within the valid
+ // ranges.
+ if (Fields.size() > 1) {
+ bool FiveFields = Fields.size() == 5;
+
+ bool ValidString = true;
+ if (IsARMBuiltin) {
+ ValidString &= Fields[0].starts_with_insensitive("cp") ||
+ Fields[0].starts_with_insensitive("p");
+ if (ValidString)
+ Fields[0] = Fields[0].drop_front(
+ Fields[0].starts_with_insensitive("cp") ? 2 : 1);
+
+ ValidString &= Fields[2].starts_with_insensitive("c");
+ if (ValidString)
+ Fields[2] = Fields[2].drop_front(1);
+
+ if (FiveFields) {
+ ValidString &= Fields[3].starts_with_insensitive("c");
+ if (ValidString)
+ Fields[3] = Fields[3].drop_front(1);
+ }
+ }
+
+ SmallVector<int, 5> Ranges;
+ if (FiveFields)
+ Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
+ else
+ Ranges.append({15, 7, 15});
+
+ for (unsigned i=0; i<Fields.size(); ++i) {
+ int IntField;
+ ValidString &= !Fields[i].getAsInteger(10, IntField);
+ ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
+ }
+
+ if (!ValidString)
+ return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
+ << Arg->getSourceRange();
+ } else if (IsAArch64Builtin && Fields.size() == 1) {
+ // This code validates writes to PSTATE registers.
+
+ // Not a write.
+ if (TheCall->getNumArgs() != 2)
+ return false;
+
+ // The 128-bit system register accesses do not touch PSTATE.
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128)
+ return false;
+
+ // These are the named PSTATE accesses using "MSR (immediate)" instructions,
+ // along with the upper limit on the immediates allowed.
+ auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
+ .CaseLower("spsel", 15)
+ .CaseLower("daifclr", 15)
+ .CaseLower("daifset", 15)
+ .CaseLower("pan", 15)
+ .CaseLower("uao", 15)
+ .CaseLower("dit", 15)
+ .CaseLower("ssbs", 15)
+ .CaseLower("tco", 15)
+ .CaseLower("allint", 1)
+ .CaseLower("pm", 1)
+ .Default(std::nullopt);
+
+ // If this is not a named PSTATE, just continue without validating, as this
+ // will be lowered to an "MSR (register)" instruction directly
+ if (!MaxLimit)
+ return false;
+
+ // Here we only allow constants in the range for that pstate, as required by
+ // the ACLE.
+ //
+ // While clang also accepts the names of system registers in its ACLE
+ // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
+ // as the value written via a register is different to the value used as an
+ // immediate to have the same effect. e.g., for the instruction `msr tco,
+ // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
+ // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
+ //
+ // If a programmer wants to codegen the MSR (register) form of `msr tco,
+ // xN`, they can still do so by specifying the register using five
+ // colon-separated numbers in a string.
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
+ }
+
+ return false;
+}
+
+// Get the valid immediate range for the specified NEON type code.
+static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
+ NeonTypeFlags Type(t);
+ int IsQuad = ForceQuad ? true : Type.isQuad();
+ switch (Type.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return shift ? 7 : (8 << IsQuad) - 1;
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ return shift ? 15 : (4 << IsQuad) - 1;
+ case NeonTypeFlags::Int32:
+ return shift ? 31 : (2 << IsQuad) - 1;
+ case NeonTypeFlags::Int64:
+ case NeonTypeFlags::Poly64:
+ return shift ? 63 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Poly128:
+ return shift ? 127 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Float16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ case NeonTypeFlags::Float32:
+ assert(!shift && "cannot shift float types!");
+ return (2 << IsQuad) - 1;
+ case NeonTypeFlags::Float64:
+ assert(!shift && "cannot shift float types!");
+ return (1 << IsQuad) - 1;
+ case NeonTypeFlags::BFloat16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+/// getNeonEltType - Return the QualType corresponding to the elements of
+/// the vector type specified by the NeonTypeFlags. This is used to check
+/// the pointer arguments for Neon load/store intrinsics.
+static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
+ bool IsPolyUnsigned, bool IsInt64Long) {
+ switch (Flags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Int16:
+ return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Int32:
+ return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
+ case NeonTypeFlags::Int64:
+ if (IsInt64Long)
+ return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ return Flags.isUnsigned() ? Context.UnsignedLongLongTy
+ : Context.LongLongTy;
+ case NeonTypeFlags::Poly8:
+ return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Poly16:
+ return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Poly64:
+ if (IsInt64Long)
+ return Context.UnsignedLongTy;
+ else
+ return Context.UnsignedLongLongTy;
+ case NeonTypeFlags::Poly128:
+ break;
+ case NeonTypeFlags::Float16:
+ return Context.HalfTy;
+ case NeonTypeFlags::Float32:
+ return Context.FloatTy;
+ case NeonTypeFlags::Float64:
+ return Context.DoubleTy;
+ case NeonTypeFlags::BFloat16:
+ return Context.BFloat16Ty;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+enum ArmSMEState : unsigned {
+ ArmNoState = 0,
+
+ ArmInZA = 0b01,
+ ArmOutZA = 0b10,
+ ArmInOutZA = 0b11,
+ ArmZAMask = 0b11,
+
+ ArmInZT0 = 0b01 << 2,
+ ArmOutZT0 = 0b10 << 2,
+ ArmInOutZT0 = 0b11 << 2,
+ ArmZT0Mask = 0b11 << 2
+};
+
+bool SemaARM::ParseSVEImmChecks(
+ CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
+ // Perform all the immediate checks for this builtin call.
+ bool HasError = false;
+ for (auto &I : ImmChecks) {
+ int ArgNum, CheckTy, ElementSizeInBits;
+ std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
+
+ typedef bool (*OptionSetCheckFnTy)(int64_t Value);
+
+ // Function that checks whether the operand (ArgNum) is an immediate
+ // that is one of the predefined values.
+ auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
+ int ErrDiag) -> bool {
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ llvm::APSInt Imm;
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Imm))
+ return true;
+
+ if (!CheckImm(Imm.getSExtValue()))
+ return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
+ return false;
+ };
+
+ switch ((SVETypeFlags::ImmCheckType)CheckTy) {
+ case SVETypeFlags::ImmCheck0_31:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_13:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_16:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_7:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_1:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_3:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 3))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_7:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, 7))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckExtract:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (2048 / ElementSizeInBits) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRight:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftRightNarrow:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits / 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckShiftLeft:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, ElementSizeInBits - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndex:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (1 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexCompRotate:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (2 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckLaneIndexDot:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ (128 / (4 * ElementSizeInBits)) - 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRot90_270:
+ if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
+ diag::err_rotation_argument_to_cadd))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheckComplexRotAll90:
+ if (CheckImmediateInSet(
+ [](int64_t V) {
+ return V == 0 || V == 90 || V == 180 || V == 270;
+ },
+ diag::err_rotation_argument_to_cmla))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_1:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_2:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_3:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_0:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_15:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_255:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck2_4_Mul2:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 2, 4) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, ArgNum, 2))
+ HasError = true;
+ break;
+ }
+ }
+
+ return HasError;
+}
+
+SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
+ if (FD->hasAttr<ArmLocallyStreamingAttr>())
+ return SemaARM::ArmStreaming;
+ if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
+ if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
+ if (FPT->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMEnabledMask)
+ return SemaARM::ArmStreaming;
+ if (FPT->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMCompatibleMask)
+ return SemaARM::ArmStreamingCompatible;
+ }
+ }
+ return SemaARM::ArmNonStreaming;
+}
+
+static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
+ const FunctionDecl *FD,
+ SemaARM::ArmStreamingType BuiltinType) {
+ SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
+ if (BuiltinType == SemaARM::ArmStreamingOrSVE2p1) {
+ // Check intrinsics that are available in [sve2p1 or sme/sme2].
+ llvm::StringMap<bool> CallerFeatureMap;
+ S.Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap))
+ BuiltinType = SemaARM::ArmStreamingCompatible;
+ else
+ BuiltinType = SemaARM::ArmStreaming;
+ }
+
+ if (FnType == SemaARM::ArmStreaming && BuiltinType == SemaARM::ArmNonStreaming) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "streaming";
+ }
+
+ if (FnType == SemaARM::ArmStreamingCompatible &&
+ BuiltinType != SemaARM::ArmStreamingCompatible) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "streaming compatible";
+ return;
+ }
+
+ if (FnType == SemaARM::ArmNonStreaming && BuiltinType == SemaARM::ArmStreaming) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "non-streaming";
+ }
+}
+
+static bool hasArmZAState(const FunctionDecl *FD) {
+ const auto *T = FD->getType()->getAs<FunctionProtoType>();
+ return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
+ FunctionType::ARM_None) ||
+ (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
+}
+
+static bool hasArmZT0State(const FunctionDecl *FD) {
+ const auto *T = FD->getType()->getAs<FunctionProtoType>();
+ return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
+ FunctionType::ARM_None) ||
+ (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
+}
+
+static ArmSMEState getSMEState(unsigned BuiltinID) {
+ switch (BuiltinID) {
+ default:
+ return ArmNoState;
+#define GET_SME_BUILTIN_GET_STATE
+#include "clang/Basic/arm_sme_builtins_za_state.inc"
+#undef GET_SME_BUILTIN_GET_STATE
+ }
+}
+
+bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
+ std::optional<ArmStreamingType> BuiltinType;
+
+ switch (BuiltinID) {
+#define GET_SME_STREAMING_ATTRS
+#include "clang/Basic/arm_sme_streaming_attrs.inc"
+#undef GET_SME_STREAMING_ATTRS
+ }
+
+ if (BuiltinType)
+ checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType);
+
+ if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
+ Diag(TheCall->getBeginLoc(),
+ diag::warn_attribute_arm_za_builtin_no_za_state)
+ << TheCall->getSourceRange();
+
+ if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
+ Diag(TheCall->getBeginLoc(),
+ diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
+ << TheCall->getSourceRange();
+ }
+
+ // Range check SME intrinsics that take immediate values.
+ SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SME_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sme_sema_rangechecks.inc"
+#undef GET_SME_IMMEDIATE_CHECK
+ }
+
+ return ParseSVEImmChecks(TheCall, ImmChecks);
+}
+
+bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
+ std::optional<ArmStreamingType> BuiltinType;
+
+ switch (BuiltinID) {
+#define GET_SVE_STREAMING_ATTRS
+#include "clang/Basic/arm_sve_streaming_attrs.inc"
+#undef GET_SVE_STREAMING_ATTRS
+ }
+ if (BuiltinType)
+ checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType);
+ }
+ // Range check SVE intrinsics that take immediate values.
+ SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SVE_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sve_sema_rangechecks.inc"
+#undef GET_SVE_IMMEDIATE_CHECK
+ }
+
+ return ParseSVEImmChecks(TheCall, ImmChecks);
+}
+
+bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall) {
+ if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
+
+ switch (BuiltinID) {
+ default:
+ break;
+#define GET_NEON_BUILTINS
+#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
+#define BUILTIN(id, ...) case NEON::BI##id:
+#include "clang/Basic/arm_neon.inc"
+ checkArmStreamingBuiltin(SemaRef, TheCall, FD, ArmNonStreaming);
+ break;
+#undef TARGET_BUILTIN
+#undef BUILTIN
+#undef GET_NEON_BUILTINS
+ }
+ }
+
+ llvm::APSInt Result;
+ uint64_t mask = 0;
+ unsigned TV = 0;
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ switch (BuiltinID) {
+#define GET_NEON_OVERLOAD_CHECK
+#include "clang/Basic/arm_neon.inc"
+#include "clang/Basic/arm_fp16.inc"
+#undef GET_NEON_OVERLOAD_CHECK
+ }
+
+ // For NEON intrinsics which are overloaded on vector element type, validate
+ // the immediate which specifies which variant to emit.
+ unsigned ImmArg = TheCall->getNumArgs()-1;
+ if (mask) {
+ if (SemaRef.BuiltinConstantArg(TheCall, ImmArg, Result))
+ return true;
+
+ TV = Result.getLimitedValue(64);
+ if ((TV > 63) || (mask & (1ULL << TV)) == 0)
+ return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
+ << TheCall->getArg(ImmArg)->getSourceRange();
+ }
+
+ if (PtrArgNum >= 0) {
+ // Check that pointer arguments have the specified type.
+ Expr *Arg = TheCall->getArg(PtrArgNum);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = ICE->getSubExpr();
+ ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(Arg);
+ QualType RHSTy = RHS.get()->getType();
+
+ llvm::Triple::ArchType Arch = TI.getTriple().getArch();
+ bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
+ Arch == llvm::Triple::aarch64_32 ||
+ Arch == llvm::Triple::aarch64_be;
+ bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
+ QualType EltTy =
+ getNeonEltType(NeonTypeFlags(TV), getASTContext(), IsPolyUnsigned, IsInt64Long);
+ if (HasConstPtr)
+ EltTy = EltTy.withConst();
+ QualType LHSTy = getASTContext().getPointerType(EltTy);
+ Sema::AssignConvertType ConvTy;
+ ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return true;
+ if (SemaRef.DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
+ RHS.get(), Sema::AA_Assigning))
+ return true;
+ }
+
+ // For NEON intrinsics which take an immediate value as part of the
+ // instruction, range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ #define GET_NEON_IMMEDIATE_CHECK
+ #include "clang/Basic/arm_neon.inc"
+ #include "clang/Basic/arm_fp16.inc"
+ #undef GET_NEON_IMMEDIATE_CHECK
+ }
+
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+ #include "clang/Basic/arm_mve_builtin_sema.inc"
+ }
+}
+
+bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ bool Err = false;
+ switch (BuiltinID) {
+ default:
+ return false;
+#include "clang/Basic/arm_cde_builtin_sema.inc"
+ }
+
+ if (Err)
+ return true;
+
+ return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
+}
+
+bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
+ const Expr *CoprocArg, bool WantCDE) {
+ ASTContext &Context = getASTContext();
+ if (SemaRef.isConstantEvaluatedContext())
+ return false;
+
+ // We can't check the value of a dependent argument.
+ if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
+ return false;
+
+ llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
+ int64_t CoprocNo = CoprocNoAP.getExtValue();
+ assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
+
+ uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
+ bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
+
+ if (IsCDECoproc != WantCDE)
+ return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
+ << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
+
+ return false;
+}
+
+bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
+ unsigned MaxWidth) {
+ assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) &&
+ "unexpected ARM builtin");
+ bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex;
+
+ ASTContext &Context = getASTContext();
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+
+ // Ensure that we have the proper number of arguments.
+ if (SemaRef.checkArgCount(TheCall, IsLdrex ? 1 : 2))
+ return true;
+
+ // Inspect the pointer argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
+ ExprResult PointerArgRes = SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
+ if (PointerArgRes.isInvalid())
+ return true;
+ PointerArg = PointerArgRes.get();
+
+ const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
+ // task is to insert the appropriate casts into the AST. First work out just
+ // what the appropriate type is.
+ QualType ValType = pointerType->getPointeeType();
+ QualType AddrType = ValType.getUnqualifiedType().withVolatile();
+ if (IsLdrex)
+ AddrType.addConst();
+
+ // Issue a warning if the cast is dodgy.
+ CastKind CastNeeded = CK_NoOp;
+ if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
+ CastNeeded = CK_BitCast;
+ Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
+ << PointerArg->getType() << Context.getPointerType(AddrType)
+ << Sema::AA_Passing << PointerArg->getSourceRange();
+ }
+
+ // Finally, do the cast and replace the argument with the corrected version.
+ AddrType = Context.getPointerType(AddrType);
+ PointerArgRes = SemaRef.ImpCastExprToType(PointerArg, AddrType, CastNeeded);
+ if (PointerArgRes.isInvalid())
+ return true;
+ PointerArg = PointerArgRes.get();
+
+ TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
+
+ // In general, we allow ints, floats and pointers to be loaded and stored.
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ // But ARM doesn't have instructions to deal with 128-bit versions.
+ if (Context.getTypeSize(ValType) > MaxWidth) {
+ assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
+ Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
+ << ValType << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (IsLdrex) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ // Initialize the argument to be stored.
+ ExprResult ValArg = TheCall->getArg(0);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+ TheCall->setArg(0, ValArg.get());
+
+ // __builtin_arm_strex always returns an int. It's marked as such in the .def,
+ // but the custom checker bypasses all default analysis.
+ TheCall->setType(Context.IntTy);
+ return false;
+}
+
+bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == ARM::BI__builtin_arm_ldaex ||
+ BuiltinID == ARM::BI__builtin_arm_strex ||
+ BuiltinID == ARM::BI__builtin_arm_stlex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == ARM::BI__builtin_arm_wsr64)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
+
+ if (BuiltinID == ARM::BI__builtin_arm_rsr ||
+ BuiltinID == ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == ARM::BI__builtin_arm_wsr ||
+ BuiltinID == ARM::BI__builtin_arm_wsrp)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+ if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+ if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ // FIXME: VFP Intrinsics should error if VFP not present.
+ switch (BuiltinID) {
+ default: return false;
+ case ARM::BI__builtin_arm_ssat:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 32);
+ case ARM::BI__builtin_arm_usat:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case ARM::BI__builtin_arm_ssat16:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16);
+ case ARM::BI__builtin_arm_usat16:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case ARM::BI__builtin_arm_vcvtr_f:
+ case ARM::BI__builtin_arm_vcvtr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case ARM::BI__builtin_arm_dmb:
+ case ARM::BI__builtin_arm_dsb:
+ case ARM::BI__builtin_arm_isb:
+ case ARM::BI__builtin_arm_dbg:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15);
+ case ARM::BI__builtin_arm_cdp:
+ case ARM::BI__builtin_arm_cdp2:
+ case ARM::BI__builtin_arm_mcr:
+ case ARM::BI__builtin_arm_mcr2:
+ case ARM::BI__builtin_arm_mrc:
+ case ARM::BI__builtin_arm_mrc2:
+ case ARM::BI__builtin_arm_mcrr:
+ case ARM::BI__builtin_arm_mcrr2:
+ case ARM::BI__builtin_arm_mrrc:
+ case ARM::BI__builtin_arm_mrrc2:
+ case ARM::BI__builtin_arm_ldc:
+ case ARM::BI__builtin_arm_ldcl:
+ case ARM::BI__builtin_arm_ldc2:
+ case ARM::BI__builtin_arm_ldc2l:
+ case ARM::BI__builtin_arm_stc:
+ case ARM::BI__builtin_arm_stcl:
+ case ARM::BI__builtin_arm_stc2:
+ case ARM::BI__builtin_arm_stc2l:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
+ CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
+ /*WantCDE*/ false);
+ }
+}
+
+bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == AArch64::BI__builtin_arm_ldaex ||
+ BuiltinID == AArch64::BI__builtin_arm_strex ||
+ BuiltinID == AArch64::BI__builtin_arm_stlex) {
+ return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 1);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ // Memory Tagging Extensions (MTE) Intrinsics
+ if (BuiltinID == AArch64::BI__builtin_arm_irg ||
+ BuiltinID == AArch64::BI__builtin_arm_addg ||
+ BuiltinID == AArch64::BI__builtin_arm_gmi ||
+ BuiltinID == AArch64::BI__builtin_arm_ldg ||
+ BuiltinID == AArch64::BI__builtin_arm_stg ||
+ BuiltinID == AArch64::BI__builtin_arm_subp) {
+ return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == AArch64::BI__builtin_arm_wsrp)
+ return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+
+ // Only check the valid encoding range. Any constant in this range would be
+ // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
+ // an exception for incorrect registers. This matches MSVC behavior.
+ if (BuiltinID == AArch64::BI_ReadStatusReg ||
+ BuiltinID == AArch64::BI_WriteStatusReg)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
+
+ if (BuiltinID == AArch64::BI__getReg)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
+
+ if (BuiltinID == AArch64::BI__break)
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
+
+ if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
+ return true;
+
+ if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case AArch64::BI__builtin_arm_dmb:
+ case AArch64::BI__builtin_arm_dsb:
+ case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
+ case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
+ }
+
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l);
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaBPF.cpp b/clang/lib/Sema/SemaBPF.cpp
new file mode 100644
index 0000000000000..7e9e9736b6e8f
--- /dev/null
+++ b/clang/lib/Sema/SemaBPF.cpp
@@ -0,0 +1,174 @@
+//===------ SemaBPF.cpp ---------- BPF target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to BPF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaBPF.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+#include <optional>
+
+namespace clang {
+
+SemaBPF::SemaBPF(Sema &S) : SemaBase(S) {}
+
+static bool isValidPreserveFieldInfoArg(Expr *Arg) {
+ if (Arg->getType()->getAsPlaceholderType())
+ return false;
+
+ // The first argument needs to be a record field access.
+ // If it is an array element access, we delay decision
+ // to BPF backend to check whether the access is a
+ // field access or not.
+ return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
+ isa<MemberExpr>(Arg->IgnoreParens()) ||
+ isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
+}
+
+static bool isValidPreserveTypeInfoArg(Expr *Arg) {
+ QualType ArgType = Arg->getType();
+ if (ArgType->getAsPlaceholderType())
+ return false;
+
+ // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
+ // format:
+ // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
+ // 2. <type> var;
+ // __builtin_preserve_type_info(var, flag);
+ if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
+ !isa<UnaryOperator>(Arg->IgnoreParens()))
+ return false;
+
+ // Typedef type.
+ if (ArgType->getAs<TypedefType>())
+ return true;
+
+ // Record type or Enum type.
+ const Type *Ty = ArgType->getUnqualifiedDesugaredType();
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ if (!RT->getDecl()->getDeclName().isEmpty())
+ return true;
+ } else if (const auto *ET = Ty->getAs<EnumType>()) {
+ if (!ET->getDecl()->getDeclName().isEmpty())
+ return true;
+ }
+
+ return false;
+}
+
+static bool isValidPreserveEnumValueArg(Expr *Arg) {
+ QualType ArgType = Arg->getType();
+ if (ArgType->getAsPlaceholderType())
+ return false;
+
+ // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
+ // format:
+ // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
+ // flag);
+ const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
+ if (!UO)
+ return false;
+
+ const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
+ if (!CE)
+ return false;
+ if (CE->getCastKind() != CK_IntegralToPointer &&
+ CE->getCastKind() != CK_NullToPointer)
+ return false;
+
+ // The integer must be from an EnumConstantDecl.
+ const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
+ if (!DR)
+ return false;
+
+ const EnumConstantDecl *Enumerator =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+ if (!Enumerator)
+ return false;
+
+ // The type must be EnumType.
+ const Type *Ty = ArgType->getUnqualifiedDesugaredType();
+ const auto *ET = Ty->getAs<EnumType>();
+ if (!ET)
+ return false;
+
+ // The enum value must be supported.
+ return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
+}
+
+bool SemaBPF::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
+ BuiltinID == BPF::BI__builtin_btf_type_id ||
+ BuiltinID == BPF::BI__builtin_preserve_type_info ||
+ BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
+ "unexpected BPF builtin");
+ ASTContext &Context = getASTContext();
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ // The second argument needs to be a constant int
+ Expr *Arg = TheCall->getArg(1);
+ std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
+ diag::kind kind;
+ if (!Value) {
+ if (BuiltinID == BPF::BI__builtin_preserve_field_info)
+ kind = diag::err_preserve_field_info_not_const;
+ else if (BuiltinID == BPF::BI__builtin_btf_type_id)
+ kind = diag::err_btf_type_id_not_const;
+ else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
+ kind = diag::err_preserve_type_info_not_const;
+ else
+ kind = diag::err_preserve_enum_value_not_const;
+ Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
+ return true;
+ }
+
+ // The first argument
+ Arg = TheCall->getArg(0);
+ bool InvalidArg = false;
+ bool ReturnUnsignedInt = true;
+ if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
+ if (!isValidPreserveFieldInfoArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_field_info_not_field;
+ }
+ } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
+ if (!isValidPreserveTypeInfoArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_type_info_invalid;
+ }
+ } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
+ if (!isValidPreserveEnumValueArg(Arg)) {
+ InvalidArg = true;
+ kind = diag::err_preserve_enum_value_invalid;
+ }
+ ReturnUnsignedInt = false;
+ } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
+ ReturnUnsignedInt = false;
+ }
+
+ if (InvalidArg) {
+ Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
+ return true;
+ }
+
+ if (ReturnUnsignedInt)
+ TheCall->setType(Context.UnsignedIntTy);
+ else
+ TheCall->setType(Context.UnsignedLongTy);
+ return false;
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index dd48490e6dd42..555b65a921533 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -61,9 +61,19 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaAMDGPU.h"
+#include "clang/Sema/SemaARM.h"
+#include "clang/Sema/SemaBPF.h"
+#include "clang/Sema/SemaHexagon.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaLoongArch.h"
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Sema/SemaNVPTX.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaPPC.h"
#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaSystemZ.h"
+#include "clang/Sema/SemaWasm.h"
#include "clang/Sema/SemaX86.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
@@ -2259,23 +2269,23 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return ARM().CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
- return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return ARM().CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
- return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
+ return BPF().CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::hexagon:
- return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
+ return Hexagon().CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return MIPS().CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::systemz:
- return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
+ return SystemZ().CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return X86().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
@@ -2283,21 +2293,21 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
- return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return PPC().CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::amdgcn:
- return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
+ return AMDGPU().CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
- return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return LoongArch().CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
- return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return Wasm().CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
- return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return NVPTX().CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
}
}
@@ -3287,1988 +3297,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return TheCallResult;
}
-// Get the valid immediate range for the specified NEON type code.
-static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
- NeonTypeFlags Type(t);
- int IsQuad = ForceQuad ? true : Type.isQuad();
- switch (Type.getEltType()) {
- case NeonTypeFlags::Int8:
- case NeonTypeFlags::Poly8:
- return shift ? 7 : (8 << IsQuad) - 1;
- case NeonTypeFlags::Int16:
- case NeonTypeFlags::Poly16:
- return shift ? 15 : (4 << IsQuad) - 1;
- case NeonTypeFlags::Int32:
- return shift ? 31 : (2 << IsQuad) - 1;
- case NeonTypeFlags::Int64:
- case NeonTypeFlags::Poly64:
- return shift ? 63 : (1 << IsQuad) - 1;
- case NeonTypeFlags::Poly128:
- return shift ? 127 : (1 << IsQuad) - 1;
- case NeonTypeFlags::Float16:
- assert(!shift && "cannot shift float types!");
- return (4 << IsQuad) - 1;
- case NeonTypeFlags::Float32:
- assert(!shift && "cannot shift float types!");
- return (2 << IsQuad) - 1;
- case NeonTypeFlags::Float64:
- assert(!shift && "cannot shift float types!");
- return (1 << IsQuad) - 1;
- case NeonTypeFlags::BFloat16:
- assert(!shift && "cannot shift float types!");
- return (4 << IsQuad) - 1;
- }
- llvm_unreachable("Invalid NeonTypeFlag!");
-}
-
-/// getNeonEltType - Return the QualType corresponding to the elements of
-/// the vector type specified by the NeonTypeFlags. This is used to check
-/// the pointer arguments for Neon load/store intrinsics.
-static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
- bool IsPolyUnsigned, bool IsInt64Long) {
- switch (Flags.getEltType()) {
- case NeonTypeFlags::Int8:
- return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
- case NeonTypeFlags::Int16:
- return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
- case NeonTypeFlags::Int32:
- return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
- case NeonTypeFlags::Int64:
- if (IsInt64Long)
- return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
- else
- return Flags.isUnsigned() ? Context.UnsignedLongLongTy
- : Context.LongLongTy;
- case NeonTypeFlags::Poly8:
- return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
- case NeonTypeFlags::Poly16:
- return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
- case NeonTypeFlags::Poly64:
- if (IsInt64Long)
- return Context.UnsignedLongTy;
- else
- return Context.UnsignedLongLongTy;
- case NeonTypeFlags::Poly128:
- break;
- case NeonTypeFlags::Float16:
- return Context.HalfTy;
- case NeonTypeFlags::Float32:
- return Context.FloatTy;
- case NeonTypeFlags::Float64:
- return Context.DoubleTy;
- case NeonTypeFlags::BFloat16:
- return Context.BFloat16Ty;
- }
- llvm_unreachable("Invalid NeonTypeFlag!");
-}
-
-enum ArmStreamingType {
- ArmNonStreaming,
- ArmStreaming,
- ArmStreamingCompatible,
- ArmStreamingOrSVE2p1
-};
-
-enum ArmSMEState : unsigned {
- ArmNoState = 0,
-
- ArmInZA = 0b01,
- ArmOutZA = 0b10,
- ArmInOutZA = 0b11,
- ArmZAMask = 0b11,
-
- ArmInZT0 = 0b01 << 2,
- ArmOutZT0 = 0b10 << 2,
- ArmInOutZT0 = 0b11 << 2,
- ArmZT0Mask = 0b11 << 2
-};
-
-bool Sema::ParseSVEImmChecks(
- CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
- // Perform all the immediate checks for this builtin call.
- bool HasError = false;
- for (auto &I : ImmChecks) {
- int ArgNum, CheckTy, ElementSizeInBits;
- std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
-
- typedef bool (*OptionSetCheckFnTy)(int64_t Value);
-
- // Function that checks whether the operand (ArgNum) is an immediate
- // that is one of the predefined values.
- auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
- int ErrDiag) -> bool {
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- llvm::APSInt Imm;
- if (BuiltinConstantArg(TheCall, ArgNum, Imm))
- return true;
-
- if (!CheckImm(Imm.getSExtValue()))
- return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
- return false;
- };
-
- switch ((SVETypeFlags::ImmCheckType)CheckTy) {
- case SVETypeFlags::ImmCheck0_31:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_13:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_16:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_7:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_1:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 1, 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_3:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 1, 3))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck1_7:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 1, 7))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckExtract:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (2048 / ElementSizeInBits) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckShiftRight:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckShiftRightNarrow:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits / 2))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckShiftLeft:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, ElementSizeInBits - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckLaneIndex:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (1 * ElementSizeInBits)) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckLaneIndexCompRotate:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (2 * ElementSizeInBits)) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckLaneIndexDot:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (4 * ElementSizeInBits)) - 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckComplexRot90_270:
- if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
- diag::err_rotation_argument_to_cadd))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheckComplexRotAll90:
- if (CheckImmediateInSet(
- [](int64_t V) {
- return V == 0 || V == 90 || V == 180 || V == 270;
- },
- diag::err_rotation_argument_to_cmla))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_1:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_2:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_3:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_0:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_15:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck0_255:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
- HasError = true;
- break;
- case SVETypeFlags::ImmCheck2_4_Mul2:
- if (BuiltinConstantArgRange(TheCall, ArgNum, 2, 4) ||
- BuiltinConstantArgMultiple(TheCall, ArgNum, 2))
- HasError = true;
- break;
- }
- }
-
- return HasError;
-}
-
-static ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
- if (FD->hasAttr<ArmLocallyStreamingAttr>())
- return ArmStreaming;
- if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
- if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
- if (FPT->getAArch64SMEAttributes() &
- FunctionType::SME_PStateSMEnabledMask)
- return ArmStreaming;
- if (FPT->getAArch64SMEAttributes() &
- FunctionType::SME_PStateSMCompatibleMask)
- return ArmStreamingCompatible;
- }
- }
- return ArmNonStreaming;
-}
-
-static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
- const FunctionDecl *FD,
- ArmStreamingType BuiltinType) {
- ArmStreamingType FnType = getArmStreamingFnType(FD);
- if (BuiltinType == ArmStreamingOrSVE2p1) {
- // Check intrinsics that are available in [sve2p1 or sme/sme2].
- llvm::StringMap<bool> CallerFeatureMap;
- S.Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap))
- BuiltinType = ArmStreamingCompatible;
- else
- BuiltinType = ArmStreaming;
- }
-
- if (FnType == ArmStreaming && BuiltinType == ArmNonStreaming) {
- S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
- << TheCall->getSourceRange() << "streaming";
- }
-
- if (FnType == ArmStreamingCompatible &&
- BuiltinType != ArmStreamingCompatible) {
- S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
- << TheCall->getSourceRange() << "streaming compatible";
- return;
- }
-
- if (FnType == ArmNonStreaming && BuiltinType == ArmStreaming) {
- S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
- << TheCall->getSourceRange() << "non-streaming";
- }
-}
-
-static bool hasArmZAState(const FunctionDecl *FD) {
- const auto *T = FD->getType()->getAs<FunctionProtoType>();
- return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
- FunctionType::ARM_None) ||
- (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
-}
-
-static bool hasArmZT0State(const FunctionDecl *FD) {
- const auto *T = FD->getType()->getAs<FunctionProtoType>();
- return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
- FunctionType::ARM_None) ||
- (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
-}
-
-static ArmSMEState getSMEState(unsigned BuiltinID) {
- switch (BuiltinID) {
- default:
- return ArmNoState;
-#define GET_SME_BUILTIN_GET_STATE
-#include "clang/Basic/arm_sme_builtins_za_state.inc"
-#undef GET_SME_BUILTIN_GET_STATE
- }
-}
-
-bool Sema::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- if (const FunctionDecl *FD = getCurFunctionDecl()) {
- std::optional<ArmStreamingType> BuiltinType;
-
- switch (BuiltinID) {
-#define GET_SME_STREAMING_ATTRS
-#include "clang/Basic/arm_sme_streaming_attrs.inc"
-#undef GET_SME_STREAMING_ATTRS
- }
-
- if (BuiltinType)
- checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
-
- if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
- Diag(TheCall->getBeginLoc(),
- diag::warn_attribute_arm_za_builtin_no_za_state)
- << TheCall->getSourceRange();
-
- if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
- Diag(TheCall->getBeginLoc(),
- diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
- << TheCall->getSourceRange();
- }
-
- // Range check SME intrinsics that take immediate values.
- SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
-
- switch (BuiltinID) {
- default:
- return false;
-#define GET_SME_IMMEDIATE_CHECK
-#include "clang/Basic/arm_sme_sema_rangechecks.inc"
-#undef GET_SME_IMMEDIATE_CHECK
- }
-
- return ParseSVEImmChecks(TheCall, ImmChecks);
-}
-
-bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- if (const FunctionDecl *FD = getCurFunctionDecl()) {
- std::optional<ArmStreamingType> BuiltinType;
-
- switch (BuiltinID) {
-#define GET_SVE_STREAMING_ATTRS
-#include "clang/Basic/arm_sve_streaming_attrs.inc"
-#undef GET_SVE_STREAMING_ATTRS
- }
- if (BuiltinType)
- checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
- }
- // Range check SVE intrinsics that take immediate values.
- SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
-
- switch (BuiltinID) {
- default:
- return false;
-#define GET_SVE_IMMEDIATE_CHECK
-#include "clang/Basic/arm_sve_sema_rangechecks.inc"
-#undef GET_SVE_IMMEDIATE_CHECK
- }
-
- return ParseSVEImmChecks(TheCall, ImmChecks);
-}
-
-bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall) {
- if (const FunctionDecl *FD = getCurFunctionDecl()) {
-
- switch (BuiltinID) {
- default:
- break;
-#define GET_NEON_BUILTINS
-#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
-#define BUILTIN(id, ...) case NEON::BI##id:
-#include "clang/Basic/arm_neon.inc"
- checkArmStreamingBuiltin(*this, TheCall, FD, ArmNonStreaming);
- break;
-#undef TARGET_BUILTIN
-#undef BUILTIN
-#undef GET_NEON_BUILTINS
- }
- }
-
- llvm::APSInt Result;
- uint64_t mask = 0;
- unsigned TV = 0;
- int PtrArgNum = -1;
- bool HasConstPtr = false;
- switch (BuiltinID) {
-#define GET_NEON_OVERLOAD_CHECK
-#include "clang/Basic/arm_neon.inc"
-#include "clang/Basic/arm_fp16.inc"
-#undef GET_NEON_OVERLOAD_CHECK
- }
-
- // For NEON intrinsics which are overloaded on vector element type, validate
- // the immediate which specifies which variant to emit.
- unsigned ImmArg = TheCall->getNumArgs()-1;
- if (mask) {
- if (BuiltinConstantArg(TheCall, ImmArg, Result))
- return true;
-
- TV = Result.getLimitedValue(64);
- if ((TV > 63) || (mask & (1ULL << TV)) == 0)
- return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
- << TheCall->getArg(ImmArg)->getSourceRange();
- }
-
- if (PtrArgNum >= 0) {
- // Check that pointer arguments have the specified type.
- Expr *Arg = TheCall->getArg(PtrArgNum);
- if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
- Arg = ICE->getSubExpr();
- ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
- QualType RHSTy = RHS.get()->getType();
-
- llvm::Triple::ArchType Arch = TI.getTriple().getArch();
- bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
- Arch == llvm::Triple::aarch64_32 ||
- Arch == llvm::Triple::aarch64_be;
- bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
- QualType EltTy =
- getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
- if (HasConstPtr)
- EltTy = EltTy.withConst();
- QualType LHSTy = Context.getPointerType(EltTy);
- AssignConvertType ConvTy;
- ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
- if (RHS.isInvalid())
- return true;
- if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
- RHS.get(), AA_Assigning))
- return true;
- }
-
- // For NEON intrinsics which take an immediate value as part of the
- // instruction, range check them here.
- unsigned i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default:
- return false;
- #define GET_NEON_IMMEDIATE_CHECK
- #include "clang/Basic/arm_neon.inc"
- #include "clang/Basic/arm_fp16.inc"
- #undef GET_NEON_IMMEDIATE_CHECK
- }
-
- return BuiltinConstantArgRange(TheCall, i, l, u + l);
-}
-
-bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- switch (BuiltinID) {
- default:
- return false;
- #include "clang/Basic/arm_mve_builtin_sema.inc"
- }
-}
-
-bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- bool Err = false;
- switch (BuiltinID) {
- default:
- return false;
-#include "clang/Basic/arm_cde_builtin_sema.inc"
- }
-
- if (Err)
- return true;
-
- return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
-}
-
-bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
- const Expr *CoprocArg, bool WantCDE) {
- if (isConstantEvaluatedContext())
- return false;
-
- // We can't check the value of a dependent argument.
- if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
- return false;
-
- llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
- int64_t CoprocNo = CoprocNoAP.getExtValue();
- assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
-
- uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
- bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
-
- if (IsCDECoproc != WantCDE)
- return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
- << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
-
- return false;
-}
-
-bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
- unsigned MaxWidth) {
- assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex ||
- BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex ||
- BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex ||
- BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) &&
- "unexpected ARM builtin");
- bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex ||
- BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex;
-
- DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
-
- // Ensure that we have the proper number of arguments.
- if (checkArgCount(TheCall, IsLdrex ? 1 : 2))
- return true;
-
- // Inspect the pointer argument of the atomic builtin. This should always be
- // a pointer type, whose element is an integral scalar or pointer type.
- // Because it is a pointer type, we don't have to worry about any implicit
- // casts here.
- Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
- ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
- if (PointerArgRes.isInvalid())
- return true;
- PointerArg = PointerArgRes.get();
-
- const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
- if (!pointerType) {
- Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
- // task is to insert the appropriate casts into the AST. First work out just
- // what the appropriate type is.
- QualType ValType = pointerType->getPointeeType();
- QualType AddrType = ValType.getUnqualifiedType().withVolatile();
- if (IsLdrex)
- AddrType.addConst();
-
- // Issue a warning if the cast is dodgy.
- CastKind CastNeeded = CK_NoOp;
- if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
- CastNeeded = CK_BitCast;
- Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
- << PointerArg->getType() << Context.getPointerType(AddrType)
- << AA_Passing << PointerArg->getSourceRange();
- }
-
- // Finally, do the cast and replace the argument with the corrected version.
- AddrType = Context.getPointerType(AddrType);
- PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
- if (PointerArgRes.isInvalid())
- return true;
- PointerArg = PointerArgRes.get();
-
- TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
-
- // In general, we allow ints, floats and pointers to be loaded and stored.
- if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
- !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
- Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- // But ARM doesn't have instructions to deal with 128-bit versions.
- if (Context.getTypeSize(ValType) > MaxWidth) {
- assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
- Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- switch (ValType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- // okay
- break;
-
- case Qualifiers::OCL_Weak:
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Autoreleasing:
- Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
- << ValType << PointerArg->getSourceRange();
- return true;
- }
-
- if (IsLdrex) {
- TheCall->setType(ValType);
- return false;
- }
-
- // Initialize the argument to be stored.
- ExprResult ValArg = TheCall->getArg(0);
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- Context, ValType, /*consume*/ false);
- ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
- if (ValArg.isInvalid())
- return true;
- TheCall->setArg(0, ValArg.get());
-
- // __builtin_arm_strex always returns an int. It's marked as such in the .def,
- // but the custom checker bypasses all default analysis.
- TheCall->setType(Context.IntTy);
- return false;
-}
-
-bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex ||
- BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
- }
-
- if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
- return BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 1);
- }
-
- if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64)
- return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
-
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsrp)
- return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
-
- if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
- return true;
- if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
- return true;
- if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
- return true;
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- // FIXME: VFP Intrinsics should error if VFP not present.
- switch (BuiltinID) {
- default: return false;
- case ARM::BI__builtin_arm_ssat:
- return BuiltinConstantArgRange(TheCall, 1, 1, 32);
- case ARM::BI__builtin_arm_usat:
- return BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case ARM::BI__builtin_arm_ssat16:
- return BuiltinConstantArgRange(TheCall, 1, 1, 16);
- case ARM::BI__builtin_arm_usat16:
- return BuiltinConstantArgRange(TheCall, 1, 0, 15);
- case ARM::BI__builtin_arm_vcvtr_f:
- case ARM::BI__builtin_arm_vcvtr_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 1);
- case ARM::BI__builtin_arm_dmb:
- case ARM::BI__builtin_arm_dsb:
- case ARM::BI__builtin_arm_isb:
- case ARM::BI__builtin_arm_dbg:
- return BuiltinConstantArgRange(TheCall, 0, 0, 15);
- case ARM::BI__builtin_arm_cdp:
- case ARM::BI__builtin_arm_cdp2:
- case ARM::BI__builtin_arm_mcr:
- case ARM::BI__builtin_arm_mcr2:
- case ARM::BI__builtin_arm_mrc:
- case ARM::BI__builtin_arm_mrc2:
- case ARM::BI__builtin_arm_mcrr:
- case ARM::BI__builtin_arm_mcrr2:
- case ARM::BI__builtin_arm_mrrc:
- case ARM::BI__builtin_arm_mrrc2:
- case ARM::BI__builtin_arm_ldc:
- case ARM::BI__builtin_arm_ldcl:
- case ARM::BI__builtin_arm_ldc2:
- case ARM::BI__builtin_arm_ldc2l:
- case ARM::BI__builtin_arm_stc:
- case ARM::BI__builtin_arm_stcl:
- case ARM::BI__builtin_arm_stc2:
- case ARM::BI__builtin_arm_stc2l:
- return BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
- CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
- /*WantCDE*/ false);
- }
-}
-
-bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex ||
- BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
- return BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 1) ||
- BuiltinConstantArgRange(TheCall, 4, 0, 1);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr128)
- return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
-
- // Memory Tagging Extensions (MTE) Intrinsics
- if (BuiltinID == AArch64::BI__builtin_arm_irg ||
- BuiltinID == AArch64::BI__builtin_arm_addg ||
- BuiltinID == AArch64::BI__builtin_arm_gmi ||
- BuiltinID == AArch64::BI__builtin_arm_ldg ||
- BuiltinID == AArch64::BI__builtin_arm_stg ||
- BuiltinID == AArch64::BI__builtin_arm_subp) {
- return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp)
- return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
-
- // Only check the valid encoding range. Any constant in this range would be
- // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
- // an exception for incorrect registers. This matches MSVC behavior.
- if (BuiltinID == AArch64::BI_ReadStatusReg ||
- BuiltinID == AArch64::BI_WriteStatusReg)
- return BuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
-
- if (BuiltinID == AArch64::BI__getReg)
- return BuiltinConstantArgRange(TheCall, 0, 0, 31);
-
- if (BuiltinID == AArch64::BI__break)
- return BuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
-
- if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
- return true;
-
- if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
- return true;
-
- if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
- return true;
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- unsigned i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default: return false;
- case AArch64::BI__builtin_arm_dmb:
- case AArch64::BI__builtin_arm_dsb:
- case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
- case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
- }
-
- return BuiltinConstantArgRange(TheCall, i, l, u + l);
-}
-
-static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
- if (Arg->getType()->getAsPlaceholderType())
- return false;
-
- // The first argument needs to be a record field access.
- // If it is an array element access, we delay decision
- // to BPF backend to check whether the access is a
- // field access or not.
- return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
- isa<MemberExpr>(Arg->IgnoreParens()) ||
- isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
-}
-
-static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
- QualType ArgType = Arg->getType();
- if (ArgType->getAsPlaceholderType())
- return false;
-
- // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
- // format:
- // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
- // 2. <type> var;
- // __builtin_preserve_type_info(var, flag);
- if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
- !isa<UnaryOperator>(Arg->IgnoreParens()))
- return false;
-
- // Typedef type.
- if (ArgType->getAs<TypedefType>())
- return true;
-
- // Record type or Enum type.
- const Type *Ty = ArgType->getUnqualifiedDesugaredType();
- if (const auto *RT = Ty->getAs<RecordType>()) {
- if (!RT->getDecl()->getDeclName().isEmpty())
- return true;
- } else if (const auto *ET = Ty->getAs<EnumType>()) {
- if (!ET->getDecl()->getDeclName().isEmpty())
- return true;
- }
-
- return false;
-}
-
-static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
- QualType ArgType = Arg->getType();
- if (ArgType->getAsPlaceholderType())
- return false;
-
- // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
- // format:
- // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
- // flag);
- const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
- if (!UO)
- return false;
-
- const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
- if (!CE)
- return false;
- if (CE->getCastKind() != CK_IntegralToPointer &&
- CE->getCastKind() != CK_NullToPointer)
- return false;
-
- // The integer must be from an EnumConstantDecl.
- const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
- if (!DR)
- return false;
-
- const EnumConstantDecl *Enumerator =
- dyn_cast<EnumConstantDecl>(DR->getDecl());
- if (!Enumerator)
- return false;
-
- // The type must be EnumType.
- const Type *Ty = ArgType->getUnqualifiedDesugaredType();
- const auto *ET = Ty->getAs<EnumType>();
- if (!ET)
- return false;
-
- // The enum value must be supported.
- return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
-}
-
-bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
- BuiltinID == BPF::BI__builtin_btf_type_id ||
- BuiltinID == BPF::BI__builtin_preserve_type_info ||
- BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
- "unexpected BPF builtin");
-
- if (checkArgCount(TheCall, 2))
- return true;
-
- // The second argument needs to be a constant int
- Expr *Arg = TheCall->getArg(1);
- std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
- diag::kind kind;
- if (!Value) {
- if (BuiltinID == BPF::BI__builtin_preserve_field_info)
- kind = diag::err_preserve_field_info_not_const;
- else if (BuiltinID == BPF::BI__builtin_btf_type_id)
- kind = diag::err_btf_type_id_not_const;
- else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
- kind = diag::err_preserve_type_info_not_const;
- else
- kind = diag::err_preserve_enum_value_not_const;
- Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
- return true;
- }
-
- // The first argument
- Arg = TheCall->getArg(0);
- bool InvalidArg = false;
- bool ReturnUnsignedInt = true;
- if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
- if (!isValidBPFPreserveFieldInfoArg(Arg)) {
- InvalidArg = true;
- kind = diag::err_preserve_field_info_not_field;
- }
- } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
- if (!isValidBPFPreserveTypeInfoArg(Arg)) {
- InvalidArg = true;
- kind = diag::err_preserve_type_info_invalid;
- }
- } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
- if (!isValidBPFPreserveEnumValueArg(Arg)) {
- InvalidArg = true;
- kind = diag::err_preserve_enum_value_invalid;
- }
- ReturnUnsignedInt = false;
- } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
- ReturnUnsignedInt = false;
- }
-
- if (InvalidArg) {
- Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
- return true;
- }
-
- if (ReturnUnsignedInt)
- TheCall->setType(Context.UnsignedIntTy);
- else
- TheCall->setType(Context.UnsignedLongTy);
- return false;
-}
-
-bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
- struct ArgInfo {
- uint8_t OpNum;
- bool IsSigned;
- uint8_t BitWidth;
- uint8_t Align;
- };
- struct BuiltinInfo {
- unsigned BuiltinID;
- ArgInfo Infos[2];
- };
-
- static BuiltinInfo Infos[] = {
- { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
- { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
- { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
- { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
- { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
- { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
- { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
- { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
-
- { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
- { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
-
- { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
- {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
- {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
- { 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
- { 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
- { 3, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
- {{ 2, false, 4, 0 },
- { 3, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
- { 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
- { 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
- {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
- {{ 1, false, 4, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
- {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
- {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
- {{ 3, false, 1, 0 }} },
-
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
- {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
- {{ 2, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
- {{ 3, false, 2, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
- {{ 3, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
- {{ 3, false, 3, 0 }} },
- };
-
- // Use a dynamically initialized static to sort the table exactly once on
- // first run.
- static const bool SortOnce =
- (llvm::sort(Infos,
- [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
- return LHS.BuiltinID < RHS.BuiltinID;
- }),
- true);
- (void)SortOnce;
-
- const BuiltinInfo *F = llvm::partition_point(
- Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
- if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
- return false;
-
- bool Error = false;
-
- for (const ArgInfo &A : F->Infos) {
- // Ignore empty ArgInfo elements.
- if (A.BitWidth == 0)
- continue;
-
- int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
- int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
- if (!A.Align) {
- Error |= BuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
- } else {
- unsigned M = 1 << A.Align;
- Min *= M;
- Max *= M;
- Error |= BuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
- Error |= BuiltinConstantArgMultiple(TheCall, A.OpNum, M);
- }
- }
- return Error;
-}
-
-bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
-}
-
-bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- switch (BuiltinID) {
- default:
- break;
- // Basic intrinsics.
- case LoongArch::BI__builtin_loongarch_cacop_d:
- case LoongArch::BI__builtin_loongarch_cacop_w: {
- BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
- BuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), llvm::maxIntN(12));
- break;
- }
- case LoongArch::BI__builtin_loongarch_break:
- case LoongArch::BI__builtin_loongarch_dbar:
- case LoongArch::BI__builtin_loongarch_ibar:
- case LoongArch::BI__builtin_loongarch_syscall:
- // Check if immediate is in [0, 32767].
- return BuiltinConstantArgRange(TheCall, 0, 0, 32767);
- case LoongArch::BI__builtin_loongarch_csrrd_w:
- case LoongArch::BI__builtin_loongarch_csrrd_d:
- return BuiltinConstantArgRange(TheCall, 0, 0, 16383);
- case LoongArch::BI__builtin_loongarch_csrwr_w:
- case LoongArch::BI__builtin_loongarch_csrwr_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 16383);
- case LoongArch::BI__builtin_loongarch_csrxchg_w:
- case LoongArch::BI__builtin_loongarch_csrxchg_d:
- return BuiltinConstantArgRange(TheCall, 2, 0, 16383);
- case LoongArch::BI__builtin_loongarch_lddir_d:
- case LoongArch::BI__builtin_loongarch_ldpte_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case LoongArch::BI__builtin_loongarch_movfcsr2gr:
- case LoongArch::BI__builtin_loongarch_movgr2fcsr:
- return BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2));
-
- // LSX intrinsics.
- case LoongArch::BI__builtin_lsx_vbitclri_b:
- case LoongArch::BI__builtin_lsx_vbitrevi_b:
- case LoongArch::BI__builtin_lsx_vbitseti_b:
- case LoongArch::BI__builtin_lsx_vsat_b:
- case LoongArch::BI__builtin_lsx_vsat_bu:
- case LoongArch::BI__builtin_lsx_vslli_b:
- case LoongArch::BI__builtin_lsx_vsrai_b:
- case LoongArch::BI__builtin_lsx_vsrari_b:
- case LoongArch::BI__builtin_lsx_vsrli_b:
- case LoongArch::BI__builtin_lsx_vsllwil_h_b:
- case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
- case LoongArch::BI__builtin_lsx_vrotri_b:
- case LoongArch::BI__builtin_lsx_vsrlri_b:
- return BuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lsx_vbitclri_h:
- case LoongArch::BI__builtin_lsx_vbitrevi_h:
- case LoongArch::BI__builtin_lsx_vbitseti_h:
- case LoongArch::BI__builtin_lsx_vsat_h:
- case LoongArch::BI__builtin_lsx_vsat_hu:
- case LoongArch::BI__builtin_lsx_vslli_h:
- case LoongArch::BI__builtin_lsx_vsrai_h:
- case LoongArch::BI__builtin_lsx_vsrari_h:
- case LoongArch::BI__builtin_lsx_vsrli_h:
- case LoongArch::BI__builtin_lsx_vsllwil_w_h:
- case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
- case LoongArch::BI__builtin_lsx_vrotri_h:
- case LoongArch::BI__builtin_lsx_vsrlri_h:
- return BuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lsx_vssrarni_b_h:
- case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
- case LoongArch::BI__builtin_lsx_vssrani_b_h:
- case LoongArch::BI__builtin_lsx_vssrani_bu_h:
- case LoongArch::BI__builtin_lsx_vsrarni_b_h:
- case LoongArch::BI__builtin_lsx_vsrlni_b_h:
- case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
- case LoongArch::BI__builtin_lsx_vssrlni_b_h:
- case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
- case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
- case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
- case LoongArch::BI__builtin_lsx_vsrani_b_h:
- return BuiltinConstantArgRange(TheCall, 2, 0, 15);
- case LoongArch::BI__builtin_lsx_vslei_bu:
- case LoongArch::BI__builtin_lsx_vslei_hu:
- case LoongArch::BI__builtin_lsx_vslei_wu:
- case LoongArch::BI__builtin_lsx_vslei_du:
- case LoongArch::BI__builtin_lsx_vslti_bu:
- case LoongArch::BI__builtin_lsx_vslti_hu:
- case LoongArch::BI__builtin_lsx_vslti_wu:
- case LoongArch::BI__builtin_lsx_vslti_du:
- case LoongArch::BI__builtin_lsx_vmaxi_bu:
- case LoongArch::BI__builtin_lsx_vmaxi_hu:
- case LoongArch::BI__builtin_lsx_vmaxi_wu:
- case LoongArch::BI__builtin_lsx_vmaxi_du:
- case LoongArch::BI__builtin_lsx_vmini_bu:
- case LoongArch::BI__builtin_lsx_vmini_hu:
- case LoongArch::BI__builtin_lsx_vmini_wu:
- case LoongArch::BI__builtin_lsx_vmini_du:
- case LoongArch::BI__builtin_lsx_vaddi_bu:
- case LoongArch::BI__builtin_lsx_vaddi_hu:
- case LoongArch::BI__builtin_lsx_vaddi_wu:
- case LoongArch::BI__builtin_lsx_vaddi_du:
- case LoongArch::BI__builtin_lsx_vbitclri_w:
- case LoongArch::BI__builtin_lsx_vbitrevi_w:
- case LoongArch::BI__builtin_lsx_vbitseti_w:
- case LoongArch::BI__builtin_lsx_vsat_w:
- case LoongArch::BI__builtin_lsx_vsat_wu:
- case LoongArch::BI__builtin_lsx_vslli_w:
- case LoongArch::BI__builtin_lsx_vsrai_w:
- case LoongArch::BI__builtin_lsx_vsrari_w:
- case LoongArch::BI__builtin_lsx_vsrli_w:
- case LoongArch::BI__builtin_lsx_vsllwil_d_w:
- case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
- case LoongArch::BI__builtin_lsx_vsrlri_w:
- case LoongArch::BI__builtin_lsx_vrotri_w:
- case LoongArch::BI__builtin_lsx_vsubi_bu:
- case LoongArch::BI__builtin_lsx_vsubi_hu:
- case LoongArch::BI__builtin_lsx_vbsrl_v:
- case LoongArch::BI__builtin_lsx_vbsll_v:
- case LoongArch::BI__builtin_lsx_vsubi_wu:
- case LoongArch::BI__builtin_lsx_vsubi_du:
- return BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case LoongArch::BI__builtin_lsx_vssrarni_h_w:
- case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
- case LoongArch::BI__builtin_lsx_vssrani_h_w:
- case LoongArch::BI__builtin_lsx_vssrani_hu_w:
- case LoongArch::BI__builtin_lsx_vsrarni_h_w:
- case LoongArch::BI__builtin_lsx_vsrani_h_w:
- case LoongArch::BI__builtin_lsx_vfrstpi_b:
- case LoongArch::BI__builtin_lsx_vfrstpi_h:
- case LoongArch::BI__builtin_lsx_vsrlni_h_w:
- case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
- case LoongArch::BI__builtin_lsx_vssrlni_h_w:
- case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
- case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
- case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
- return BuiltinConstantArgRange(TheCall, 2, 0, 31);
- case LoongArch::BI__builtin_lsx_vbitclri_d:
- case LoongArch::BI__builtin_lsx_vbitrevi_d:
- case LoongArch::BI__builtin_lsx_vbitseti_d:
- case LoongArch::BI__builtin_lsx_vsat_d:
- case LoongArch::BI__builtin_lsx_vsat_du:
- case LoongArch::BI__builtin_lsx_vslli_d:
- case LoongArch::BI__builtin_lsx_vsrai_d:
- case LoongArch::BI__builtin_lsx_vsrli_d:
- case LoongArch::BI__builtin_lsx_vsrari_d:
- case LoongArch::BI__builtin_lsx_vrotri_d:
- case LoongArch::BI__builtin_lsx_vsrlri_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 63);
- case LoongArch::BI__builtin_lsx_vssrarni_w_d:
- case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
- case LoongArch::BI__builtin_lsx_vssrani_w_d:
- case LoongArch::BI__builtin_lsx_vssrani_wu_d:
- case LoongArch::BI__builtin_lsx_vsrarni_w_d:
- case LoongArch::BI__builtin_lsx_vsrlni_w_d:
- case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
- case LoongArch::BI__builtin_lsx_vssrlni_w_d:
- case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
- case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
- case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
- case LoongArch::BI__builtin_lsx_vsrani_w_d:
- return BuiltinConstantArgRange(TheCall, 2, 0, 63);
- case LoongArch::BI__builtin_lsx_vssrarni_d_q:
- case LoongArch::BI__builtin_lsx_vssrarni_du_q:
- case LoongArch::BI__builtin_lsx_vssrani_d_q:
- case LoongArch::BI__builtin_lsx_vssrani_du_q:
- case LoongArch::BI__builtin_lsx_vsrarni_d_q:
- case LoongArch::BI__builtin_lsx_vssrlni_d_q:
- case LoongArch::BI__builtin_lsx_vssrlni_du_q:
- case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
- case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
- case LoongArch::BI__builtin_lsx_vsrani_d_q:
- case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
- case LoongArch::BI__builtin_lsx_vsrlni_d_q:
- return BuiltinConstantArgRange(TheCall, 2, 0, 127);
- case LoongArch::BI__builtin_lsx_vseqi_b:
- case LoongArch::BI__builtin_lsx_vseqi_h:
- case LoongArch::BI__builtin_lsx_vseqi_w:
- case LoongArch::BI__builtin_lsx_vseqi_d:
- case LoongArch::BI__builtin_lsx_vslti_b:
- case LoongArch::BI__builtin_lsx_vslti_h:
- case LoongArch::BI__builtin_lsx_vslti_w:
- case LoongArch::BI__builtin_lsx_vslti_d:
- case LoongArch::BI__builtin_lsx_vslei_b:
- case LoongArch::BI__builtin_lsx_vslei_h:
- case LoongArch::BI__builtin_lsx_vslei_w:
- case LoongArch::BI__builtin_lsx_vslei_d:
- case LoongArch::BI__builtin_lsx_vmaxi_b:
- case LoongArch::BI__builtin_lsx_vmaxi_h:
- case LoongArch::BI__builtin_lsx_vmaxi_w:
- case LoongArch::BI__builtin_lsx_vmaxi_d:
- case LoongArch::BI__builtin_lsx_vmini_b:
- case LoongArch::BI__builtin_lsx_vmini_h:
- case LoongArch::BI__builtin_lsx_vmini_w:
- case LoongArch::BI__builtin_lsx_vmini_d:
- return BuiltinConstantArgRange(TheCall, 1, -16, 15);
- case LoongArch::BI__builtin_lsx_vandi_b:
- case LoongArch::BI__builtin_lsx_vnori_b:
- case LoongArch::BI__builtin_lsx_vori_b:
- case LoongArch::BI__builtin_lsx_vshuf4i_b:
- case LoongArch::BI__builtin_lsx_vshuf4i_h:
- case LoongArch::BI__builtin_lsx_vshuf4i_w:
- case LoongArch::BI__builtin_lsx_vxori_b:
- return BuiltinConstantArgRange(TheCall, 1, 0, 255);
- case LoongArch::BI__builtin_lsx_vbitseli_b:
- case LoongArch::BI__builtin_lsx_vshuf4i_d:
- case LoongArch::BI__builtin_lsx_vextrins_b:
- case LoongArch::BI__builtin_lsx_vextrins_h:
- case LoongArch::BI__builtin_lsx_vextrins_w:
- case LoongArch::BI__builtin_lsx_vextrins_d:
- case LoongArch::BI__builtin_lsx_vpermi_w:
- return BuiltinConstantArgRange(TheCall, 2, 0, 255);
- case LoongArch::BI__builtin_lsx_vpickve2gr_b:
- case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
- case LoongArch::BI__builtin_lsx_vreplvei_b:
- return BuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
- return BuiltinConstantArgRange(TheCall, 2, 0, 15);
- case LoongArch::BI__builtin_lsx_vpickve2gr_h:
- case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
- case LoongArch::BI__builtin_lsx_vreplvei_h:
- return BuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
- return BuiltinConstantArgRange(TheCall, 2, 0, 7);
- case LoongArch::BI__builtin_lsx_vpickve2gr_w:
- case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
- case LoongArch::BI__builtin_lsx_vreplvei_w:
- return BuiltinConstantArgRange(TheCall, 1, 0, 3);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- case LoongArch::BI__builtin_lsx_vpickve2gr_d:
- case LoongArch::BI__builtin_lsx_vpickve2gr_du:
- case LoongArch::BI__builtin_lsx_vreplvei_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 1);
- case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
- return BuiltinConstantArgRange(TheCall, 2, 0, 1);
- case LoongArch::BI__builtin_lsx_vstelm_b:
- return BuiltinConstantArgRange(TheCall, 2, -128, 127) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 15);
- case LoongArch::BI__builtin_lsx_vstelm_h:
- return BuiltinConstantArgRange(TheCall, 2, -256, 254) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 7);
- case LoongArch::BI__builtin_lsx_vstelm_w:
- return BuiltinConstantArgRange(TheCall, 2, -512, 508) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 3);
- case LoongArch::BI__builtin_lsx_vstelm_d:
- return BuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 1);
- case LoongArch::BI__builtin_lsx_vldrepl_b:
- case LoongArch::BI__builtin_lsx_vld:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2047);
- case LoongArch::BI__builtin_lsx_vldrepl_h:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2046);
- case LoongArch::BI__builtin_lsx_vldrepl_w:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2044);
- case LoongArch::BI__builtin_lsx_vldrepl_d:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2040);
- case LoongArch::BI__builtin_lsx_vst:
- return BuiltinConstantArgRange(TheCall, 2, -2048, 2047);
- case LoongArch::BI__builtin_lsx_vldi:
- return BuiltinConstantArgRange(TheCall, 0, -4096, 4095);
- case LoongArch::BI__builtin_lsx_vrepli_b:
- case LoongArch::BI__builtin_lsx_vrepli_h:
- case LoongArch::BI__builtin_lsx_vrepli_w:
- case LoongArch::BI__builtin_lsx_vrepli_d:
- return BuiltinConstantArgRange(TheCall, 0, -512, 511);
-
- // LASX intrinsics.
- case LoongArch::BI__builtin_lasx_xvbitclri_b:
- case LoongArch::BI__builtin_lasx_xvbitrevi_b:
- case LoongArch::BI__builtin_lasx_xvbitseti_b:
- case LoongArch::BI__builtin_lasx_xvsat_b:
- case LoongArch::BI__builtin_lasx_xvsat_bu:
- case LoongArch::BI__builtin_lasx_xvslli_b:
- case LoongArch::BI__builtin_lasx_xvsrai_b:
- case LoongArch::BI__builtin_lasx_xvsrari_b:
- case LoongArch::BI__builtin_lasx_xvsrli_b:
- case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
- case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
- case LoongArch::BI__builtin_lasx_xvrotri_b:
- case LoongArch::BI__builtin_lasx_xvsrlri_b:
- return BuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lasx_xvbitclri_h:
- case LoongArch::BI__builtin_lasx_xvbitrevi_h:
- case LoongArch::BI__builtin_lasx_xvbitseti_h:
- case LoongArch::BI__builtin_lasx_xvsat_h:
- case LoongArch::BI__builtin_lasx_xvsat_hu:
- case LoongArch::BI__builtin_lasx_xvslli_h:
- case LoongArch::BI__builtin_lasx_xvsrai_h:
- case LoongArch::BI__builtin_lasx_xvsrari_h:
- case LoongArch::BI__builtin_lasx_xvsrli_h:
- case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
- case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
- case LoongArch::BI__builtin_lasx_xvrotri_h:
- case LoongArch::BI__builtin_lasx_xvsrlri_h:
- return BuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
- case LoongArch::BI__builtin_lasx_xvssrani_b_h:
- case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
- case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
- case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
- case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
- case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
- case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
- case LoongArch::BI__builtin_lasx_xvsrani_b_h:
- return BuiltinConstantArgRange(TheCall, 2, 0, 15);
- case LoongArch::BI__builtin_lasx_xvslei_bu:
- case LoongArch::BI__builtin_lasx_xvslei_hu:
- case LoongArch::BI__builtin_lasx_xvslei_wu:
- case LoongArch::BI__builtin_lasx_xvslei_du:
- case LoongArch::BI__builtin_lasx_xvslti_bu:
- case LoongArch::BI__builtin_lasx_xvslti_hu:
- case LoongArch::BI__builtin_lasx_xvslti_wu:
- case LoongArch::BI__builtin_lasx_xvslti_du:
- case LoongArch::BI__builtin_lasx_xvmaxi_bu:
- case LoongArch::BI__builtin_lasx_xvmaxi_hu:
- case LoongArch::BI__builtin_lasx_xvmaxi_wu:
- case LoongArch::BI__builtin_lasx_xvmaxi_du:
- case LoongArch::BI__builtin_lasx_xvmini_bu:
- case LoongArch::BI__builtin_lasx_xvmini_hu:
- case LoongArch::BI__builtin_lasx_xvmini_wu:
- case LoongArch::BI__builtin_lasx_xvmini_du:
- case LoongArch::BI__builtin_lasx_xvaddi_bu:
- case LoongArch::BI__builtin_lasx_xvaddi_hu:
- case LoongArch::BI__builtin_lasx_xvaddi_wu:
- case LoongArch::BI__builtin_lasx_xvaddi_du:
- case LoongArch::BI__builtin_lasx_xvbitclri_w:
- case LoongArch::BI__builtin_lasx_xvbitrevi_w:
- case LoongArch::BI__builtin_lasx_xvbitseti_w:
- case LoongArch::BI__builtin_lasx_xvsat_w:
- case LoongArch::BI__builtin_lasx_xvsat_wu:
- case LoongArch::BI__builtin_lasx_xvslli_w:
- case LoongArch::BI__builtin_lasx_xvsrai_w:
- case LoongArch::BI__builtin_lasx_xvsrari_w:
- case LoongArch::BI__builtin_lasx_xvsrli_w:
- case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
- case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
- case LoongArch::BI__builtin_lasx_xvsrlri_w:
- case LoongArch::BI__builtin_lasx_xvrotri_w:
- case LoongArch::BI__builtin_lasx_xvsubi_bu:
- case LoongArch::BI__builtin_lasx_xvsubi_hu:
- case LoongArch::BI__builtin_lasx_xvsubi_wu:
- case LoongArch::BI__builtin_lasx_xvsubi_du:
- case LoongArch::BI__builtin_lasx_xvbsrl_v:
- case LoongArch::BI__builtin_lasx_xvbsll_v:
- return BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
- case LoongArch::BI__builtin_lasx_xvssrani_h_w:
- case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
- case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
- case LoongArch::BI__builtin_lasx_xvsrani_h_w:
- case LoongArch::BI__builtin_lasx_xvfrstpi_b:
- case LoongArch::BI__builtin_lasx_xvfrstpi_h:
- case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
- case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
- case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
- case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
- return BuiltinConstantArgRange(TheCall, 2, 0, 31);
- case LoongArch::BI__builtin_lasx_xvbitclri_d:
- case LoongArch::BI__builtin_lasx_xvbitrevi_d:
- case LoongArch::BI__builtin_lasx_xvbitseti_d:
- case LoongArch::BI__builtin_lasx_xvsat_d:
- case LoongArch::BI__builtin_lasx_xvsat_du:
- case LoongArch::BI__builtin_lasx_xvslli_d:
- case LoongArch::BI__builtin_lasx_xvsrai_d:
- case LoongArch::BI__builtin_lasx_xvsrli_d:
- case LoongArch::BI__builtin_lasx_xvsrari_d:
- case LoongArch::BI__builtin_lasx_xvrotri_d:
- case LoongArch::BI__builtin_lasx_xvsrlri_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 63);
- case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
- case LoongArch::BI__builtin_lasx_xvssrani_w_d:
- case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
- case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
- case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
- case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
- case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
- case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
- case LoongArch::BI__builtin_lasx_xvsrani_w_d:
- return BuiltinConstantArgRange(TheCall, 2, 0, 63);
- case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
- case LoongArch::BI__builtin_lasx_xvssrani_d_q:
- case LoongArch::BI__builtin_lasx_xvssrani_du_q:
- case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
- case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
- case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
- case LoongArch::BI__builtin_lasx_xvsrani_d_q:
- case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
- case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
- return BuiltinConstantArgRange(TheCall, 2, 0, 127);
- case LoongArch::BI__builtin_lasx_xvseqi_b:
- case LoongArch::BI__builtin_lasx_xvseqi_h:
- case LoongArch::BI__builtin_lasx_xvseqi_w:
- case LoongArch::BI__builtin_lasx_xvseqi_d:
- case LoongArch::BI__builtin_lasx_xvslti_b:
- case LoongArch::BI__builtin_lasx_xvslti_h:
- case LoongArch::BI__builtin_lasx_xvslti_w:
- case LoongArch::BI__builtin_lasx_xvslti_d:
- case LoongArch::BI__builtin_lasx_xvslei_b:
- case LoongArch::BI__builtin_lasx_xvslei_h:
- case LoongArch::BI__builtin_lasx_xvslei_w:
- case LoongArch::BI__builtin_lasx_xvslei_d:
- case LoongArch::BI__builtin_lasx_xvmaxi_b:
- case LoongArch::BI__builtin_lasx_xvmaxi_h:
- case LoongArch::BI__builtin_lasx_xvmaxi_w:
- case LoongArch::BI__builtin_lasx_xvmaxi_d:
- case LoongArch::BI__builtin_lasx_xvmini_b:
- case LoongArch::BI__builtin_lasx_xvmini_h:
- case LoongArch::BI__builtin_lasx_xvmini_w:
- case LoongArch::BI__builtin_lasx_xvmini_d:
- return BuiltinConstantArgRange(TheCall, 1, -16, 15);
- case LoongArch::BI__builtin_lasx_xvandi_b:
- case LoongArch::BI__builtin_lasx_xvnori_b:
- case LoongArch::BI__builtin_lasx_xvori_b:
- case LoongArch::BI__builtin_lasx_xvshuf4i_b:
- case LoongArch::BI__builtin_lasx_xvshuf4i_h:
- case LoongArch::BI__builtin_lasx_xvshuf4i_w:
- case LoongArch::BI__builtin_lasx_xvxori_b:
- case LoongArch::BI__builtin_lasx_xvpermi_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 255);
- case LoongArch::BI__builtin_lasx_xvbitseli_b:
- case LoongArch::BI__builtin_lasx_xvshuf4i_d:
- case LoongArch::BI__builtin_lasx_xvextrins_b:
- case LoongArch::BI__builtin_lasx_xvextrins_h:
- case LoongArch::BI__builtin_lasx_xvextrins_w:
- case LoongArch::BI__builtin_lasx_xvextrins_d:
- case LoongArch::BI__builtin_lasx_xvpermi_q:
- case LoongArch::BI__builtin_lasx_xvpermi_w:
- return BuiltinConstantArgRange(TheCall, 2, 0, 255);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
- return BuiltinConstantArgRange(TheCall, 1, 0, 15);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
- case LoongArch::BI__builtin_lasx_xvpickve_w_f:
- case LoongArch::BI__builtin_lasx_xvpickve_w:
- return BuiltinConstantArgRange(TheCall, 1, 0, 7);
- case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
- case LoongArch::BI__builtin_lasx_xvinsve0_w:
- return BuiltinConstantArgRange(TheCall, 2, 0, 7);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
- case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
- case LoongArch::BI__builtin_lasx_xvpickve_d_f:
- case LoongArch::BI__builtin_lasx_xvpickve_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 3);
- case LoongArch::BI__builtin_lasx_xvinsve0_d:
- case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- case LoongArch::BI__builtin_lasx_xvstelm_b:
- return BuiltinConstantArgRange(TheCall, 2, -128, 127) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 31);
- case LoongArch::BI__builtin_lasx_xvstelm_h:
- return BuiltinConstantArgRange(TheCall, 2, -256, 254) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 15);
- case LoongArch::BI__builtin_lasx_xvstelm_w:
- return BuiltinConstantArgRange(TheCall, 2, -512, 508) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 7);
- case LoongArch::BI__builtin_lasx_xvstelm_d:
- return BuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
- BuiltinConstantArgRange(TheCall, 3, 0, 3);
- case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
- return BuiltinConstantArgRange(TheCall, 1, 0, 1);
- case LoongArch::BI__builtin_lasx_xvldrepl_b:
- case LoongArch::BI__builtin_lasx_xvld:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2047);
- case LoongArch::BI__builtin_lasx_xvldrepl_h:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2046);
- case LoongArch::BI__builtin_lasx_xvldrepl_w:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2044);
- case LoongArch::BI__builtin_lasx_xvldrepl_d:
- return BuiltinConstantArgRange(TheCall, 1, -2048, 2040);
- case LoongArch::BI__builtin_lasx_xvst:
- return BuiltinConstantArgRange(TheCall, 2, -2048, 2047);
- case LoongArch::BI__builtin_lasx_xvldi:
- return BuiltinConstantArgRange(TheCall, 0, -4096, 4095);
- case LoongArch::BI__builtin_lasx_xvrepli_b:
- case LoongArch::BI__builtin_lasx_xvrepli_h:
- case LoongArch::BI__builtin_lasx_xvrepli_w:
- case LoongArch::BI__builtin_lasx_xvrepli_d:
- return BuiltinConstantArgRange(TheCall, 0, -512, 511);
- }
- return false;
-}
-
-bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall) {
- return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
- CheckMipsBuiltinArgument(BuiltinID, TheCall);
-}
-
-bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
-
- if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
- BuiltinID <= Mips::BI__builtin_mips_lwx) {
- if (!TI.hasFeature("dsp"))
- return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
- }
-
- if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
- BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
- if (!TI.hasFeature("dspr2"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_mips_builtin_requires_dspr2);
- }
-
- if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
- BuiltinID <= Mips::BI__builtin_msa_xori_b) {
- if (!TI.hasFeature("msa"))
- return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
- }
-
- return false;
-}
-
-// CheckMipsBuiltinArgument - Checks the constant value passed to the
-// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
-// ordering for DSP is unspecified. MSA is ordered by the data format used
-// by the underlying instruction i.e., df/m, df/n and then by size.
-//
-// FIXME: The size tests here should instead be tablegen'd along with the
-// definitions from include/clang/Basic/BuiltinsMips.def.
-// FIXME: GCC is strict on signedness for some of these intrinsics, we should
-// be too.
-bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
- unsigned i = 0, l = 0, u = 0, m = 0;
- switch (BuiltinID) {
- default: return false;
- case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
- case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
- case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
- case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
- case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
- case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
- case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
- // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
- // df/m field.
- // These intrinsics take an unsigned 3 bit immediate.
- case Mips::BI__builtin_msa_bclri_b:
- case Mips::BI__builtin_msa_bnegi_b:
- case Mips::BI__builtin_msa_bseti_b:
- case Mips::BI__builtin_msa_sat_s_b:
- case Mips::BI__builtin_msa_sat_u_b:
- case Mips::BI__builtin_msa_slli_b:
- case Mips::BI__builtin_msa_srai_b:
- case Mips::BI__builtin_msa_srari_b:
- case Mips::BI__builtin_msa_srli_b:
- case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
- case Mips::BI__builtin_msa_binsli_b:
- case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
- // These intrinsics take an unsigned 4 bit immediate.
- case Mips::BI__builtin_msa_bclri_h:
- case Mips::BI__builtin_msa_bnegi_h:
- case Mips::BI__builtin_msa_bseti_h:
- case Mips::BI__builtin_msa_sat_s_h:
- case Mips::BI__builtin_msa_sat_u_h:
- case Mips::BI__builtin_msa_slli_h:
- case Mips::BI__builtin_msa_srai_h:
- case Mips::BI__builtin_msa_srari_h:
- case Mips::BI__builtin_msa_srli_h:
- case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
- case Mips::BI__builtin_msa_binsli_h:
- case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
- // These intrinsics take an unsigned 5 bit immediate.
- // The first block of intrinsics actually have an unsigned 5 bit field,
- // not a df/n field.
- case Mips::BI__builtin_msa_cfcmsa:
- case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
- case Mips::BI__builtin_msa_clei_u_b:
- case Mips::BI__builtin_msa_clei_u_h:
- case Mips::BI__builtin_msa_clei_u_w:
- case Mips::BI__builtin_msa_clei_u_d:
- case Mips::BI__builtin_msa_clti_u_b:
- case Mips::BI__builtin_msa_clti_u_h:
- case Mips::BI__builtin_msa_clti_u_w:
- case Mips::BI__builtin_msa_clti_u_d:
- case Mips::BI__builtin_msa_maxi_u_b:
- case Mips::BI__builtin_msa_maxi_u_h:
- case Mips::BI__builtin_msa_maxi_u_w:
- case Mips::BI__builtin_msa_maxi_u_d:
- case Mips::BI__builtin_msa_mini_u_b:
- case Mips::BI__builtin_msa_mini_u_h:
- case Mips::BI__builtin_msa_mini_u_w:
- case Mips::BI__builtin_msa_mini_u_d:
- case Mips::BI__builtin_msa_addvi_b:
- case Mips::BI__builtin_msa_addvi_h:
- case Mips::BI__builtin_msa_addvi_w:
- case Mips::BI__builtin_msa_addvi_d:
- case Mips::BI__builtin_msa_bclri_w:
- case Mips::BI__builtin_msa_bnegi_w:
- case Mips::BI__builtin_msa_bseti_w:
- case Mips::BI__builtin_msa_sat_s_w:
- case Mips::BI__builtin_msa_sat_u_w:
- case Mips::BI__builtin_msa_slli_w:
- case Mips::BI__builtin_msa_srai_w:
- case Mips::BI__builtin_msa_srari_w:
- case Mips::BI__builtin_msa_srli_w:
- case Mips::BI__builtin_msa_srlri_w:
- case Mips::BI__builtin_msa_subvi_b:
- case Mips::BI__builtin_msa_subvi_h:
- case Mips::BI__builtin_msa_subvi_w:
- case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
- case Mips::BI__builtin_msa_binsli_w:
- case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
- // These intrinsics take an unsigned 6 bit immediate.
- case Mips::BI__builtin_msa_bclri_d:
- case Mips::BI__builtin_msa_bnegi_d:
- case Mips::BI__builtin_msa_bseti_d:
- case Mips::BI__builtin_msa_sat_s_d:
- case Mips::BI__builtin_msa_sat_u_d:
- case Mips::BI__builtin_msa_slli_d:
- case Mips::BI__builtin_msa_srai_d:
- case Mips::BI__builtin_msa_srari_d:
- case Mips::BI__builtin_msa_srli_d:
- case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
- case Mips::BI__builtin_msa_binsli_d:
- case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
- // These intrinsics take a signed 5 bit immediate.
- case Mips::BI__builtin_msa_ceqi_b:
- case Mips::BI__builtin_msa_ceqi_h:
- case Mips::BI__builtin_msa_ceqi_w:
- case Mips::BI__builtin_msa_ceqi_d:
- case Mips::BI__builtin_msa_clti_s_b:
- case Mips::BI__builtin_msa_clti_s_h:
- case Mips::BI__builtin_msa_clti_s_w:
- case Mips::BI__builtin_msa_clti_s_d:
- case Mips::BI__builtin_msa_clei_s_b:
- case Mips::BI__builtin_msa_clei_s_h:
- case Mips::BI__builtin_msa_clei_s_w:
- case Mips::BI__builtin_msa_clei_s_d:
- case Mips::BI__builtin_msa_maxi_s_b:
- case Mips::BI__builtin_msa_maxi_s_h:
- case Mips::BI__builtin_msa_maxi_s_w:
- case Mips::BI__builtin_msa_maxi_s_d:
- case Mips::BI__builtin_msa_mini_s_b:
- case Mips::BI__builtin_msa_mini_s_h:
- case Mips::BI__builtin_msa_mini_s_w:
- case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
- // These intrinsics take an unsigned 8 bit immediate.
- case Mips::BI__builtin_msa_andi_b:
- case Mips::BI__builtin_msa_nori_b:
- case Mips::BI__builtin_msa_ori_b:
- case Mips::BI__builtin_msa_shf_b:
- case Mips::BI__builtin_msa_shf_h:
- case Mips::BI__builtin_msa_shf_w:
- case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
- case Mips::BI__builtin_msa_bseli_b:
- case Mips::BI__builtin_msa_bmnzi_b:
- case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
- // df/n format
- // These intrinsics take an unsigned 4 bit immediate.
- case Mips::BI__builtin_msa_copy_s_b:
- case Mips::BI__builtin_msa_copy_u_b:
- case Mips::BI__builtin_msa_insve_b:
- case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
- case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
- // These intrinsics take an unsigned 3 bit immediate.
- case Mips::BI__builtin_msa_copy_s_h:
- case Mips::BI__builtin_msa_copy_u_h:
- case Mips::BI__builtin_msa_insve_h:
- case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
- case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
- // These intrinsics take an unsigned 2 bit immediate.
- case Mips::BI__builtin_msa_copy_s_w:
- case Mips::BI__builtin_msa_copy_u_w:
- case Mips::BI__builtin_msa_insve_w:
- case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
- case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
- // These intrinsics take an unsigned 1 bit immediate.
- case Mips::BI__builtin_msa_copy_s_d:
- case Mips::BI__builtin_msa_copy_u_d:
- case Mips::BI__builtin_msa_insve_d:
- case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
- case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
- // Memory offsets and immediate loads.
- // These intrinsics take a signed 10 bit immediate.
- case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
- case Mips::BI__builtin_msa_ldi_h:
- case Mips::BI__builtin_msa_ldi_w:
- case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
- case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
- case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
- case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
- case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
- case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
- case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
- case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
- case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
- case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
- }
-
- if (!m)
- return BuiltinConstantArgRange(TheCall, i, l, u);
-
- return BuiltinConstantArgRange(TheCall, i, l, u) ||
- BuiltinConstantArgMultiple(TheCall, i, m);
-}
-
-/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
-/// advancing the pointer over the consumed characters. The decoded type is
-/// returned. If the decoded type represents a constant integer with a
-/// constraint on its value then Mask is set to that value. The type descriptors
-/// used in Str are specific to PPC MMA builtins and are documented in the file
-/// defining the PPC builtins.
-static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
- unsigned &Mask) {
- bool RequireICE = false;
- ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
- switch (*Str++) {
- case 'V':
- return Context.getVectorType(Context.UnsignedCharTy, 16,
- VectorKind::AltiVecVector);
- case 'i': {
- char *End;
- unsigned size = strtoul(Str, &End, 10);
- assert(End != Str && "Missing constant parameter constraint");
- Str = End;
- Mask = size;
- return Context.IntTy;
- }
- case 'W': {
- char *End;
- unsigned size = strtoul(Str, &End, 10);
- assert(End != Str && "Missing PowerPC MMA type size");
- Str = End;
- QualType Type;
- switch (size) {
- #define PPC_VECTOR_TYPE(typeName, Id, size) \
- case size: Type = Context.Id##Ty; break;
- #include "clang/Basic/PPCTypes.def"
- default: llvm_unreachable("Invalid PowerPC MMA vector type");
- }
- bool CheckVectorArgs = false;
- while (!CheckVectorArgs) {
- switch (*Str++) {
- case '*':
- Type = Context.getPointerType(Type);
- break;
- case 'C':
- Type = Type.withConst();
- break;
- default:
- CheckVectorArgs = true;
- --Str;
- break;
- }
- }
- return Type;
- }
- default:
- return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
- }
-}
-
-static bool isPPC_64Builtin(unsigned BuiltinID) {
- // These builtins only work on PPC 64bit targets.
- switch (BuiltinID) {
- case PPC::BI__builtin_divde:
- case PPC::BI__builtin_divdeu:
- case PPC::BI__builtin_bpermd:
- case PPC::BI__builtin_pdepd:
- case PPC::BI__builtin_pextd:
- case PPC::BI__builtin_ppc_ldarx:
- case PPC::BI__builtin_ppc_stdcx:
- case PPC::BI__builtin_ppc_tdw:
- case PPC::BI__builtin_ppc_trapd:
- case PPC::BI__builtin_ppc_cmpeqb:
- case PPC::BI__builtin_ppc_setb:
- case PPC::BI__builtin_ppc_mulhd:
- case PPC::BI__builtin_ppc_mulhdu:
- case PPC::BI__builtin_ppc_maddhd:
- case PPC::BI__builtin_ppc_maddhdu:
- case PPC::BI__builtin_ppc_maddld:
- case PPC::BI__builtin_ppc_load8r:
- case PPC::BI__builtin_ppc_store8r:
- case PPC::BI__builtin_ppc_insert_exp:
- case PPC::BI__builtin_ppc_extract_sig:
- case PPC::BI__builtin_ppc_addex:
- case PPC::BI__builtin_darn:
- case PPC::BI__builtin_darn_raw:
- case PPC::BI__builtin_ppc_compare_and_swaplp:
- case PPC::BI__builtin_ppc_fetch_and_addlp:
- case PPC::BI__builtin_ppc_fetch_and_andlp:
- case PPC::BI__builtin_ppc_fetch_and_orlp:
- case PPC::BI__builtin_ppc_fetch_and_swaplp:
- return true;
- }
- return false;
-}
-
/// Returns true if the argument consists of one contiguous run of 1s with any
/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
@@ -5287,186 +3315,10 @@ bool Sema::ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
// Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
if (Result.isShiftedMask() || (~Result).isShiftedMask())
return false;
-
- return Diag(TheCall->getBeginLoc(),
- diag::err_argument_not_contiguous_bit_field)
- << ArgNum << Arg->getSourceRange();
-}
-
-bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
- unsigned i = 0, l = 0, u = 0;
- bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
- llvm::APSInt Result;
-
- if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
- return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
- << TheCall->getSourceRange();
-
- switch (BuiltinID) {
- default: return false;
- case PPC::BI__builtin_altivec_crypto_vshasigmaw:
- case PPC::BI__builtin_altivec_crypto_vshasigmad:
- return BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 15);
- case PPC::BI__builtin_altivec_dss:
- return BuiltinConstantArgRange(TheCall, 0, 0, 3);
- case PPC::BI__builtin_tbegin:
- case PPC::BI__builtin_tend:
- return BuiltinConstantArgRange(TheCall, 0, 0, 1);
- case PPC::BI__builtin_tsr:
- return BuiltinConstantArgRange(TheCall, 0, 0, 7);
- case PPC::BI__builtin_tabortwc:
- case PPC::BI__builtin_tabortdc:
- return BuiltinConstantArgRange(TheCall, 0, 0, 31);
- case PPC::BI__builtin_tabortwci:
- case PPC::BI__builtin_tabortdci:
- return BuiltinConstantArgRange(TheCall, 0, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31);
- // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
- // __builtin_(un)pack_longdouble are available only if long double uses IBM
- // extended double representation.
- case PPC::BI__builtin_unpack_longdouble:
- if (BuiltinConstantArgRange(TheCall, 1, 0, 1))
- return true;
- [[fallthrough]];
- case PPC::BI__builtin_pack_longdouble:
- if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
- return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
- << "ibmlongdouble";
- return false;
- case PPC::BI__builtin_altivec_dst:
- case PPC::BI__builtin_altivec_dstt:
- case PPC::BI__builtin_altivec_dstst:
- case PPC::BI__builtin_altivec_dststt:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- case PPC::BI__builtin_vsx_xxpermdi:
- case PPC::BI__builtin_vsx_xxsldwi:
- return BuiltinVSX(TheCall);
- case PPC::BI__builtin_unpack_vector_int128:
- return BuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_altivec_vgnb:
- return BuiltinConstantArgRange(TheCall, 1, 2, 7);
- case PPC::BI__builtin_vsx_xxeval:
- return BuiltinConstantArgRange(TheCall, 3, 0, 255);
- case PPC::BI__builtin_altivec_vsldbi:
- return BuiltinConstantArgRange(TheCall, 2, 0, 7);
- case PPC::BI__builtin_altivec_vsrdbi:
- return BuiltinConstantArgRange(TheCall, 2, 0, 7);
- case PPC::BI__builtin_vsx_xxpermx:
- return BuiltinConstantArgRange(TheCall, 3, 0, 7);
- case PPC::BI__builtin_ppc_tw:
- case PPC::BI__builtin_ppc_tdw:
- return BuiltinConstantArgRange(TheCall, 2, 1, 31);
- case PPC::BI__builtin_ppc_cmprb:
- return BuiltinConstantArgRange(TheCall, 0, 0, 1);
- // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
- // be a constant that represents a contiguous bit field.
- case PPC::BI__builtin_ppc_rlwnm:
- return ValueIsRunOfOnes(TheCall, 2);
- case PPC::BI__builtin_ppc_rlwimi:
- return BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
- ValueIsRunOfOnes(TheCall, 3);
- case PPC::BI__builtin_ppc_rldimi:
- return BuiltinConstantArgRange(TheCall, 2, 0, 63) ||
- ValueIsRunOfOnes(TheCall, 3);
- case PPC::BI__builtin_ppc_addex: {
- if (BuiltinConstantArgRange(TheCall, 2, 0, 3))
- return true;
- // Output warning for reserved values 1 to 3.
- int ArgValue =
- TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
- if (ArgValue != 0)
- Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
- << ArgValue;
- return false;
- }
- case PPC::BI__builtin_ppc_mtfsb0:
- case PPC::BI__builtin_ppc_mtfsb1:
- return BuiltinConstantArgRange(TheCall, 0, 0, 31);
- case PPC::BI__builtin_ppc_mtfsf:
- return BuiltinConstantArgRange(TheCall, 0, 0, 255);
- case PPC::BI__builtin_ppc_mtfsfi:
- return BuiltinConstantArgRange(TheCall, 0, 0, 7) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 15);
- case PPC::BI__builtin_ppc_alignx:
- return BuiltinConstantArgPower2(TheCall, 0);
- case PPC::BI__builtin_ppc_rdlam:
- return ValueIsRunOfOnes(TheCall, 2);
- case PPC::BI__builtin_vsx_ldrmb:
- case PPC::BI__builtin_vsx_strmb:
- return BuiltinConstantArgRange(TheCall, 1, 1, 16);
- case PPC::BI__builtin_altivec_vcntmbb:
- case PPC::BI__builtin_altivec_vcntmbh:
- case PPC::BI__builtin_altivec_vcntmbw:
- case PPC::BI__builtin_altivec_vcntmbd:
- return BuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_vsx_xxgenpcvbm:
- case PPC::BI__builtin_vsx_xxgenpcvhm:
- case PPC::BI__builtin_vsx_xxgenpcvwm:
- case PPC::BI__builtin_vsx_xxgenpcvdm:
- return BuiltinConstantArgRange(TheCall, 1, 0, 3);
- case PPC::BI__builtin_ppc_test_data_class: {
- // Check if the first argument of the __builtin_ppc_test_data_class call is
- // valid. The argument must be 'float' or 'double' or '__float128'.
- QualType ArgType = TheCall->getArg(0)->getType();
- if (ArgType != QualType(Context.FloatTy) &&
- ArgType != QualType(Context.DoubleTy) &&
- ArgType != QualType(Context.Float128Ty))
- return Diag(TheCall->getBeginLoc(),
- diag::err_ppc_invalid_test_data_class_type);
- return BuiltinConstantArgRange(TheCall, 1, 0, 127);
- }
- case PPC::BI__builtin_ppc_maxfe:
- case PPC::BI__builtin_ppc_minfe:
- case PPC::BI__builtin_ppc_maxfl:
- case PPC::BI__builtin_ppc_minfl:
- case PPC::BI__builtin_ppc_maxfs:
- case PPC::BI__builtin_ppc_minfs: {
- if (Context.getTargetInfo().getTriple().isOSAIX() &&
- (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
- BuiltinID == PPC::BI__builtin_ppc_minfe))
- return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
- << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
- << false << Context.getTargetInfo().getTriple().str();
- // Argument type should be exact.
- QualType ArgType = QualType(Context.LongDoubleTy);
- if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
- BuiltinID == PPC::BI__builtin_ppc_minfl)
- ArgType = QualType(Context.DoubleTy);
- else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
- BuiltinID == PPC::BI__builtin_ppc_minfs)
- ArgType = QualType(Context.FloatTy);
- for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
- if (TheCall->getArg(I)->getType() != ArgType)
- return Diag(TheCall->getBeginLoc(),
- diag::err_typecheck_convert_incompatible)
- << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
- return false;
- }
-#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
- case PPC::BI__builtin_##Name: \
- return BuiltinPPCMMACall(TheCall, BuiltinID, Types);
-#include "clang/Basic/BuiltinsPPC.def"
- }
- return BuiltinConstantArgRange(TheCall, i, l, u);
-}
-
-// Check if the given type is a non-pointer PPC MMA type. This function is used
-// in Sema to prevent invalid uses of restricted PPC MMA types.
-bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
- if (Type->isPointerType() || Type->isArrayType())
- return false;
-
- QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
-#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
- if (false
-#include "clang/Basic/PPCTypes.def"
- ) {
- Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
- return true;
- }
- return false;
+
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_argument_not_contiguous_bit_field)
+ << ArgNum << Arg->getSourceRange();
}
// Helper function for CheckHLSLBuiltinFunctionCall
@@ -5691,179 +3543,6 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return false;
}
-bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- // position of memory order and scope arguments in the builtin
- unsigned OrderIndex, ScopeIndex;
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_get_fpenv:
- case AMDGPU::BI__builtin_amdgcn_set_fpenv:
- return false;
- case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
- case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
- case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
- case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
- OrderIndex = 2;
- ScopeIndex = 3;
- break;
- case AMDGPU::BI__builtin_amdgcn_fence:
- OrderIndex = 0;
- ScopeIndex = 1;
- break;
- default:
- return false;
- }
-
- ExprResult Arg = TheCall->getArg(OrderIndex);
- auto ArgExpr = Arg.get();
- Expr::EvalResult ArgResult;
-
- if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
- return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
- << ArgExpr->getType();
- auto Ord = ArgResult.Val.getInt().getZExtValue();
-
- // Check validity of memory ordering as per C11 / C++11's memody model.
- // Only fence needs check. Atomic dec/inc allow all memory orders.
- if (!llvm::isValidAtomicOrderingCABI(Ord))
- return Diag(ArgExpr->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << 0 << ArgExpr->getSourceRange();
- switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
- case llvm::AtomicOrderingCABI::relaxed:
- case llvm::AtomicOrderingCABI::consume:
- if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
- return Diag(ArgExpr->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << 0 << ArgExpr->getSourceRange();
- break;
- case llvm::AtomicOrderingCABI::acquire:
- case llvm::AtomicOrderingCABI::release:
- case llvm::AtomicOrderingCABI::acq_rel:
- case llvm::AtomicOrderingCABI::seq_cst:
- break;
- }
-
- Arg = TheCall->getArg(ScopeIndex);
- ArgExpr = Arg.get();
- Expr::EvalResult ArgResult1;
- // Check that sync scope is a constant literal
- if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
- return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
- << ArgExpr->getType();
-
- return false;
-}
-
-bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
- if (BuiltinID == SystemZ::BI__builtin_tabort) {
- Expr *Arg = TheCall->getArg(0);
- if (std::optional<llvm::APSInt> AbortCode =
- Arg->getIntegerConstantExpr(Context))
- if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
- return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
- << Arg->getSourceRange();
- }
-
- // For intrinsics which take an immediate value as part of the instruction,
- // range check them here.
- unsigned i = 0, l = 0, u = 0;
- switch (BuiltinID) {
- default: return false;
- case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_verimb:
- case SystemZ::BI__builtin_s390_verimh:
- case SystemZ::BI__builtin_s390_verimf:
- case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
- case SystemZ::BI__builtin_s390_vfaeb:
- case SystemZ::BI__builtin_s390_vfaeh:
- case SystemZ::BI__builtin_s390_vfaef:
- case SystemZ::BI__builtin_s390_vfaebs:
- case SystemZ::BI__builtin_s390_vfaehs:
- case SystemZ::BI__builtin_s390_vfaefs:
- case SystemZ::BI__builtin_s390_vfaezb:
- case SystemZ::BI__builtin_s390_vfaezh:
- case SystemZ::BI__builtin_s390_vfaezf:
- case SystemZ::BI__builtin_s390_vfaezbs:
- case SystemZ::BI__builtin_s390_vfaezhs:
- case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vfisb:
- case SystemZ::BI__builtin_s390_vfidb:
- return BuiltinConstantArgRange(TheCall, 1, 0, 15) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 15);
- case SystemZ::BI__builtin_s390_vftcisb:
- case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
- case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vstrcb:
- case SystemZ::BI__builtin_s390_vstrch:
- case SystemZ::BI__builtin_s390_vstrcf:
- case SystemZ::BI__builtin_s390_vstrczb:
- case SystemZ::BI__builtin_s390_vstrczh:
- case SystemZ::BI__builtin_s390_vstrczf:
- case SystemZ::BI__builtin_s390_vstrcbs:
- case SystemZ::BI__builtin_s390_vstrchs:
- case SystemZ::BI__builtin_s390_vstrcfs:
- case SystemZ::BI__builtin_s390_vstrczbs:
- case SystemZ::BI__builtin_s390_vstrczhs:
- case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vfminsb:
- case SystemZ::BI__builtin_s390_vfmaxsb:
- case SystemZ::BI__builtin_s390_vfmindb:
- case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
- case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
- case SystemZ::BI__builtin_s390_vclfnhs:
- case SystemZ::BI__builtin_s390_vclfnls:
- case SystemZ::BI__builtin_s390_vcfn:
- case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
- case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
- }
- return BuiltinConstantArgRange(TheCall, i, l, u);
-}
-
-bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_ref_null_extern:
- return BuiltinWasmRefNullExtern(TheCall);
- case WebAssembly::BI__builtin_wasm_ref_null_func:
- return BuiltinWasmRefNullFunc(TheCall);
- case WebAssembly::BI__builtin_wasm_table_get:
- return BuiltinWasmTableGet(TheCall);
- case WebAssembly::BI__builtin_wasm_table_set:
- return BuiltinWasmTableSet(TheCall);
- case WebAssembly::BI__builtin_wasm_table_size:
- return BuiltinWasmTableSize(TheCall);
- case WebAssembly::BI__builtin_wasm_table_grow:
- return BuiltinWasmTableGrow(TheCall);
- case WebAssembly::BI__builtin_wasm_table_fill:
- return BuiltinWasmTableFill(TheCall);
- case WebAssembly::BI__builtin_wasm_table_copy:
- return BuiltinWasmTableCopy(TheCall);
- }
-
- return false;
-}
-
-bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- switch (BuiltinID) {
- case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
- case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
- case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
- case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
- return checkArgCountAtMost(TheCall, 3);
- }
-
- return false;
-}
-
/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
/// parameter with the FormatAttr's correct format_idx and firstDataArg.
/// Returns true when the format fits the function and the FormatStringInfo has
@@ -6091,40 +3770,6 @@ static void CheckNonNullArguments(Sema &S,
}
}
-// 16 byte ByVal alignment not due to a vector member is not honoured by XL
-// on AIX. Emit a warning here that users are generating binary incompatible
-// code to be safe.
-// Here we try to get information about the alignment of the struct member
-// from the struct passed to the caller function. We only warn when the struct
-// is passed byval, hence the series of checks and early returns if we are a not
-// passing a struct byval.
-void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
- const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens());
- if (!ICE)
- return;
-
- const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
- if (!DR)
- return;
-
- const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
- if (!PD || !PD->getType()->isRecordType())
- return;
-
- QualType ArgType = Arg->getType();
- for (const FieldDecl *FD :
- ArgType->castAs<RecordType>()->getDecl()->fields()) {
- if (const auto *AA = FD->getAttr<AlignedAttr>()) {
- CharUnits Alignment =
- Context.toCharUnitsFromBits(AA->getAlignment(Context));
- if (Alignment.getQuantity() == 16) {
- Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
- Diag(Loc, diag::note_misaligned_member_used_here) << PD;
- }
- }
- }
-}
-
/// Warn if a pointer or reference argument passed to a function points to an
/// object that is less aligned than the parameter. This can happen when
/// creating a typedef with a lower alignment than the original type and then
@@ -6241,7 +3886,7 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
FDecl->hasLinkage() &&
FDecl->getFormalLinkage() != Linkage::Internal &&
CallType == VariadicDoesNotApply)
- checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
+ PPC().checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
QualType ParamTy = Proto->getParamType(ArgIdx);
if (ParamTy->isSizelessVectorType())
@@ -6277,10 +3922,10 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
bool IsCalleeStreamingCompatible =
ExtInfo.AArch64SMEAttributes &
FunctionType::SME_PStateSMCompatibleMask;
- ArmStreamingType CallerFnType = getArmStreamingFnType(CallerFD);
+ SemaARM::ArmStreamingType CallerFnType = getArmStreamingFnType(CallerFD);
if (!IsCalleeStreamingCompatible &&
- (CallerFnType == ArmStreamingCompatible ||
- ((CallerFnType == ArmStreaming) ^ IsCalleeStreaming))) {
+ (CallerFnType == SemaARM::ArmStreamingCompatible ||
+ ((CallerFnType == SemaARM::ArmStreaming) ^ IsCalleeStreaming))) {
if (IsScalableArg)
Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
<< /*IsArg=*/true;
@@ -7136,35 +4781,6 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
return false;
}
-bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
- if (TheCall->getNumArgs() != 0)
- return true;
-
- TheCall->setType(Context.getWebAssemblyExternrefType());
-
- return false;
-}
-
-bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
- if (TheCall->getNumArgs() != 0) {
- Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
- << /*is non object*/ 0;
- return true;
- }
-
- // This custom type checking code ensures that the nodes are as expected
- // in order to later on generate the necessary builtin.
- QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
- QualType Type = Context.getPointerType(Pointee);
- Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
- Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
- Context.getPointerType(Pointee));
- TheCall->setType(Type);
-
- return false;
-}
-
/// We have a call to a function like __sync_fetch_and_add, which is an
/// overloaded function based on the pointer type of its first argument.
/// The main BuildCallExpr routines have already promoted the types of
@@ -8040,55 +5656,6 @@ bool Sema::BuiltinComplex(CallExpr *TheCall) {
return false;
}
-// Customized Sema Checking for VSX builtins that have the following signature:
-// vector [...] builtinName(vector [...], vector [...], const int);
-// Which takes the same type of vectors (any legal vector type) for the first
-// two arguments and takes compile time constant for the third argument.
-// Example builtins are :
-// vector double vec_xxpermdi(vector double, vector double, int);
-// vector short vec_xxsldwi(vector short, vector short, int);
-bool Sema::BuiltinVSX(CallExpr *TheCall) {
- unsigned ExpectedNumArgs = 3;
- if (checkArgCount(TheCall, ExpectedNumArgs))
- return true;
-
- // Check the third argument is a compile time constant
- if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
- return Diag(TheCall->getBeginLoc(),
- diag::err_vsx_builtin_nonconstant_argument)
- << 3 /* argument index */ << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(2)->getBeginLoc(),
- TheCall->getArg(2)->getEndLoc());
-
- QualType Arg1Ty = TheCall->getArg(0)->getType();
- QualType Arg2Ty = TheCall->getArg(1)->getType();
-
- // Check the type of argument 1 and argument 2 are vectors.
- SourceLocation BuiltinLoc = TheCall->getBeginLoc();
- if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
- (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
- return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
- << SourceRange(TheCall->getArg(0)->getBeginLoc(),
- TheCall->getArg(1)->getEndLoc());
- }
-
- // Check the first two arguments are the same type.
- if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
- return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
- << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
- << SourceRange(TheCall->getArg(0)->getBeginLoc(),
- TheCall->getArg(1)->getEndLoc());
- }
-
- // When default clang type checking is turned off and the customized type
- // checking is used, the returning type of the function must be explicitly
- // set. Otherwise it is _Bool by default.
- TheCall->setType(Arg1Ty);
-
- return false;
-}
-
/// BuiltinShuffleVector - Handle __builtin_shufflevector.
// This is declared to take (...), so we have to check everything.
ExprResult Sema::BuiltinShuffleVector(CallExpr *TheCall) {
@@ -8606,360 +6173,6 @@ bool Sema::BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
<< Arg->getSourceRange();
}
-/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
-bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
- if (BuiltinID == AArch64::BI__builtin_arm_irg) {
- if (checkArgCount(TheCall, 2))
- return true;
- Expr *Arg0 = TheCall->getArg(0);
- Expr *Arg1 = TheCall->getArg(1);
-
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
- TheCall->setArg(0, FirstArg.get());
-
- ExprResult SecArg = DefaultLvalueConversion(Arg1);
- if (SecArg.isInvalid())
- return true;
- QualType SecArgType = SecArg.get()->getType();
- if (!SecArgType->isIntegerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
- << "second" << SecArgType << Arg1->getSourceRange();
-
- // Derive the return type from the pointer argument.
- TheCall->setType(FirstArgType);
- return false;
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_addg) {
- if (checkArgCount(TheCall, 2))
- return true;
-
- Expr *Arg0 = TheCall->getArg(0);
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
- TheCall->setArg(0, FirstArg.get());
-
- // Derive the return type from the pointer argument.
- TheCall->setType(FirstArgType);
-
- // Second arg must be an constant in range [0,15]
- return BuiltinConstantArgRange(TheCall, 1, 0, 15);
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
- if (checkArgCount(TheCall, 2))
- return true;
- Expr *Arg0 = TheCall->getArg(0);
- Expr *Arg1 = TheCall->getArg(1);
-
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
-
- QualType SecArgType = Arg1->getType();
- if (!SecArgType->isIntegerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
- << "second" << SecArgType << Arg1->getSourceRange();
- TheCall->setType(Context.IntTy);
- return false;
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
- BuiltinID == AArch64::BI__builtin_arm_stg) {
- if (checkArgCount(TheCall, 1))
- return true;
- Expr *Arg0 = TheCall->getArg(0);
- ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
- if (FirstArg.isInvalid())
- return true;
-
- QualType FirstArgType = FirstArg.get()->getType();
- if (!FirstArgType->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
- TheCall->setArg(0, FirstArg.get());
-
- // Derive the return type from the pointer argument.
- if (BuiltinID == AArch64::BI__builtin_arm_ldg)
- TheCall->setType(FirstArgType);
- return false;
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_subp) {
- Expr *ArgA = TheCall->getArg(0);
- Expr *ArgB = TheCall->getArg(1);
-
- ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA);
- ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB);
-
- if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
- return true;
-
- QualType ArgTypeA = ArgExprA.get()->getType();
- QualType ArgTypeB = ArgExprB.get()->getType();
-
- auto isNull = [&] (Expr *E) -> bool {
- return E->isNullPointerConstant(
- Context, Expr::NPC_ValueDependentIsNotNull); };
-
- // argument should be either a pointer or null
- if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
- << "first" << ArgTypeA << ArgA->getSourceRange();
-
- if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
- << "second" << ArgTypeB << ArgB->getSourceRange();
-
- // Ensure Pointee types are compatible
- if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
- ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
- QualType pointeeA = ArgTypeA->getPointeeType();
- QualType pointeeB = ArgTypeB->getPointeeType();
- if (!Context.typesAreCompatible(
- Context.getCanonicalType(pointeeA).getUnqualifiedType(),
- Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
- return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
- << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
- << ArgB->getSourceRange();
- }
- }
-
- // at least one argument should be pointer type
- if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
- return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
- << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
-
- if (isNull(ArgA)) // adopt type of the other pointer
- ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
-
- if (isNull(ArgB))
- ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
-
- TheCall->setArg(0, ArgExprA.get());
- TheCall->setArg(1, ArgExprB.get());
- TheCall->setType(Context.LongLongTy);
- return false;
- }
- assert(false && "Unhandled ARM MTE intrinsic");
- return true;
-}
-
-/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
-/// TheCall is an ARM/AArch64 special register string literal.
-bool Sema::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
- int ArgNum, unsigned ExpectedFieldNum,
- bool AllowName) {
- bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsrp;
- bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp;
- assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check if the argument is a string literal.
- if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
- << Arg->getSourceRange();
-
- // Check the type of special register given.
- StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
- SmallVector<StringRef, 6> Fields;
- Reg.split(Fields, ":");
-
- if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
- return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
- << Arg->getSourceRange();
-
- // If the string is the name of a register then we cannot check that it is
- // valid here but if the string is of one the forms described in ACLE then we
- // can check that the supplied fields are integers and within the valid
- // ranges.
- if (Fields.size() > 1) {
- bool FiveFields = Fields.size() == 5;
-
- bool ValidString = true;
- if (IsARMBuiltin) {
- ValidString &= Fields[0].starts_with_insensitive("cp") ||
- Fields[0].starts_with_insensitive("p");
- if (ValidString)
- Fields[0] = Fields[0].drop_front(
- Fields[0].starts_with_insensitive("cp") ? 2 : 1);
-
- ValidString &= Fields[2].starts_with_insensitive("c");
- if (ValidString)
- Fields[2] = Fields[2].drop_front(1);
-
- if (FiveFields) {
- ValidString &= Fields[3].starts_with_insensitive("c");
- if (ValidString)
- Fields[3] = Fields[3].drop_front(1);
- }
- }
-
- SmallVector<int, 5> Ranges;
- if (FiveFields)
- Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
- else
- Ranges.append({15, 7, 15});
-
- for (unsigned i=0; i<Fields.size(); ++i) {
- int IntField;
- ValidString &= !Fields[i].getAsInteger(10, IntField);
- ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
- }
-
- if (!ValidString)
- return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
- << Arg->getSourceRange();
- } else if (IsAArch64Builtin && Fields.size() == 1) {
- // This code validates writes to PSTATE registers.
-
- // Not a write.
- if (TheCall->getNumArgs() != 2)
- return false;
-
- // The 128-bit system register accesses do not touch PSTATE.
- if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr128)
- return false;
-
- // These are the named PSTATE accesses using "MSR (immediate)" instructions,
- // along with the upper limit on the immediates allowed.
- auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
- .CaseLower("spsel", 15)
- .CaseLower("daifclr", 15)
- .CaseLower("daifset", 15)
- .CaseLower("pan", 15)
- .CaseLower("uao", 15)
- .CaseLower("dit", 15)
- .CaseLower("ssbs", 15)
- .CaseLower("tco", 15)
- .CaseLower("allint", 1)
- .CaseLower("pm", 1)
- .Default(std::nullopt);
-
- // If this is not a named PSTATE, just continue without validating, as this
- // will be lowered to an "MSR (register)" instruction directly
- if (!MaxLimit)
- return false;
-
- // Here we only allow constants in the range for that pstate, as required by
- // the ACLE.
- //
- // While clang also accepts the names of system registers in its ACLE
- // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
- // as the value written via a register is different to the value used as an
- // immediate to have the same effect. e.g., for the instruction `msr tco,
- // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
- // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
- //
- // If a programmer wants to codegen the MSR (register) form of `msr tco,
- // xN`, they can still do so by specifying the register using five
- // colon-separated numbers in a string.
- return BuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
- }
-
- return false;
-}
-
-/// BuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
-/// Emit an error and return true on failure; return false on success.
-/// TypeStr is a string containing the type descriptor of the value returned by
-/// the builtin and the descriptors of the expected type of the arguments.
-bool Sema::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
- const char *TypeStr) {
-
- assert((TypeStr[0] != '\0') &&
- "Invalid types in PPC MMA builtin declaration");
-
- unsigned Mask = 0;
- unsigned ArgNum = 0;
-
- // The first type in TypeStr is the type of the value returned by the
- // builtin. So we first read that type and change the type of TheCall.
- QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
- TheCall->setType(type);
-
- while (*TypeStr != '\0') {
- Mask = 0;
- QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
- if (ArgNum >= TheCall->getNumArgs()) {
- ArgNum++;
- break;
- }
-
- Expr *Arg = TheCall->getArg(ArgNum);
- QualType PassedType = Arg->getType();
- QualType StrippedRVType = PassedType.getCanonicalType();
-
- // Strip Restrict/Volatile qualifiers.
- if (StrippedRVType.isRestrictQualified() ||
- StrippedRVType.isVolatileQualified())
- StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
-
- // The only case where the argument type and expected type are allowed to
- // mismatch is if the argument type is a non-void pointer (or array) and
- // expected type is a void pointer.
- if (StrippedRVType != ExpectedType)
- if (!(ExpectedType->isVoidPointerType() &&
- (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
- return Diag(Arg->getBeginLoc(),
- diag::err_typecheck_convert_incompatible)
- << PassedType << ExpectedType << 1 << 0 << 0;
-
- // If the value of the Mask is not 0, we have a constraint in the size of
- // the integer argument so here we ensure the argument is a constant that
- // is in the valid range.
- if (Mask != 0 && BuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
- return true;
-
- ArgNum++;
- }
-
- // In case we exited early from the previous loop, there are other types to
- // read from TypeStr. So we need to read them all to ensure we have the right
- // number of arguments in TheCall and if it is not the case, to display a
- // better error message.
- while (*TypeStr != '\0') {
- (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
- ArgNum++;
- }
- if (checkArgCount(TheCall, ArgNum))
- return true;
-
- return false;
-}
-
/// BuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
/// This checks that the target supports __builtin_longjmp and
/// that val is a constant 1.
@@ -12689,7 +9902,7 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
// PPC MMA non-pointer types are not allowed as return type. Checking the type
// here prevent the user from using a PPC MMA type as trailing return type.
if (Context.getTargetInfo().getTriple().isPPC64())
- CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
+ PPC().CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
}
/// Check for comparisons of floating-point values using == and !=. Issue a
@@ -18367,168 +15580,6 @@ ExprResult Sema::BuiltinMatrixColumnMajorStore(CallExpr *TheCall,
return CallResult;
}
-/// Checks the argument at the given index is a WebAssembly table and if it
-/// is, sets ElTy to the element type.
-static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
- QualType &ElTy) {
- Expr *ArgExpr = E->getArg(ArgIndex);
- const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
- if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
- return S.Diag(ArgExpr->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_be_table_type)
- << ArgIndex + 1 << ArgExpr->getSourceRange();
- }
- ElTy = ATy->getElementType();
- return false;
-}
-
-/// Checks the argument at the given index is an integer.
-static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
- unsigned ArgIndex) {
- Expr *ArgExpr = E->getArg(ArgIndex);
- if (!ArgExpr->getType()->isIntegerType()) {
- return S.Diag(ArgExpr->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_be_integer_type)
- << ArgIndex + 1 << ArgExpr->getSourceRange();
- }
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, and the second
-/// is an index to use as index into the table.
-bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
- if (checkArgCount(TheCall, 2))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
- return true;
-
- // If all is well, we set the type of TheCall to be the type of the
- // element of the table.
- // i.e. a table.get on an externref table has type externref,
- // or whatever the type of the table element is.
- TheCall->setType(ElTy);
-
- return false;
-}
-
-/// Check that the first argumnet is a WebAssembly table, the second is
-/// an index to use as index into the table and the third is the reference
-/// type to set into the table.
-bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
- if (checkArgCount(TheCall, 3))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
- return true;
-
- if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType()))
- return true;
-
- return false;
-}
-
-/// Check that the argument is a WebAssembly table.
-bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
- if (checkArgCount(TheCall, 1))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, the second is the
-/// value to use for new elements (of a type matching the table type), the
-/// third value is an integer.
-bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
- if (checkArgCount(TheCall, 3))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- Expr *NewElemArg = TheCall->getArg(1);
- if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
- return Diag(NewElemArg->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_match_table_element_type)
- << 2 << 1 << NewElemArg->getSourceRange();
- }
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2))
- return true;
-
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, the second is an
-/// integer, the third is the value to use to fill the table (of a type
-/// matching the table type), and the fourth is an integer.
-bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
- if (checkArgCount(TheCall, 4))
- return true;
-
- QualType ElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
- return true;
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
- return true;
-
- Expr *NewElemArg = TheCall->getArg(2);
- if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
- return Diag(NewElemArg->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_match_table_element_type)
- << 3 << 1 << NewElemArg->getSourceRange();
- }
-
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3))
- return true;
-
- return false;
-}
-
-/// Check that the first argument is a WebAssembly table, the second is also a
-/// WebAssembly table (of the same element type), and the third to fifth
-/// arguments are integers.
-bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
- if (checkArgCount(TheCall, 5))
- return true;
-
- QualType XElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy))
- return true;
-
- QualType YElTy;
- if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy))
- return true;
-
- Expr *TableYArg = TheCall->getArg(1);
- if (!Context.hasSameType(XElTy, YElTy)) {
- return Diag(TableYArg->getBeginLoc(),
- diag::err_wasm_builtin_arg_must_match_table_element_type)
- << 2 << 1 << TableYArg->getSourceRange();
- }
-
- for (int I = 2; I <= 4; I++) {
- if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I))
- return true;
- }
-
- return false;
-}
-
/// \brief Enforce the bounds of a TCB
/// CheckTCBEnforcement - Enforces that every function in a named TCB only
/// directly calls other functions in the same TCB as marked by the enforce_tcb
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index b5c3a27ab06e9..373809751c9c3 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -50,7 +50,9 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPPC.h"
#include "clang/Sema/SemaRISCV.h"
+#include "clang/Sema/SemaWasm.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallString.h"
@@ -2930,9 +2932,9 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid(), UA->getGuidDecl());
else if (const auto *IMA = dyn_cast<WebAssemblyImportModuleAttr>(Attr))
- NewAttr = S.mergeImportModuleAttr(D, *IMA);
+ NewAttr = S.Wasm().mergeImportModuleAttr(D, *IMA);
else if (const auto *INA = dyn_cast<WebAssemblyImportNameAttr>(Attr))
- NewAttr = S.mergeImportNameAttr(D, *INA);
+ NewAttr = S.Wasm().mergeImportNameAttr(D, *INA);
else if (const auto *TCBA = dyn_cast<EnforceTCBAttr>(Attr))
NewAttr = S.mergeEnforceTCBAttr(D, *TCBA);
else if (const auto *TCBLA = dyn_cast<EnforceTCBLeafAttr>(Attr))
@@ -8896,7 +8898,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// PPC MMA non-pointer types are not allowed as non-local variable types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
!NewVD->isLocalVarDecl() &&
- CheckPPCMMAType(T, NewVD->getLocation())) {
+ PPC().CheckPPCMMAType(T, NewVD->getLocation())) {
NewVD->setInvalidDecl();
return;
}
@@ -12057,7 +12059,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// PPC MMA non-pointer types are not allowed as function return types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
- CheckPPCMMAType(NewFD->getReturnType(), NewFD->getLocation())) {
+ PPC().CheckPPCMMAType(NewFD->getReturnType(), NewFD->getLocation())) {
NewFD->setInvalidDecl();
}
@@ -15349,7 +15351,7 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// PPC MMA non-pointer types are not allowed as function argument types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
- CheckPPCMMAType(New->getOriginalType(), New->getLocation())) {
+ PPC().CheckPPCMMAType(New->getOriginalType(), New->getLocation())) {
New->setInvalidDecl();
}
@@ -18764,7 +18766,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// PPC MMA non-pointer types are not allowed as field types.
if (Context.getTargetInfo().getTriple().isPPC64() &&
- CheckPPCMMAType(T, NewFD->getLocation()))
+ PPC().CheckPPCMMAType(T, NewFD->getLocation()))
NewFD->setInvalidDecl();
NewFD->setAccess(AS);
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index ca5938083917f..cf0c69bc5193c 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -40,10 +40,12 @@
#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaAMDGPU.h"
#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaHLSL.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaWasm.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringExtras.h"
@@ -74,13 +76,13 @@ namespace AttributeLangSupport {
/// isFunctionOrMethod - Return true if the given decl has function
/// type (function or function-typed variable) or an Objective-C
/// method.
-static bool isFunctionOrMethod(const Decl *D) {
+bool Sema::isFunctionOrMethod(const Decl *D) {
return (D->getFunctionType() != nullptr) || isa<ObjCMethodDecl>(D);
}
/// Return true if the given decl has function type (function or
/// function-typed variable) or an Objective-C method or a block.
-static bool isFunctionOrMethodOrBlock(const Decl *D) {
+bool Sema::isFunctionOrMethodOrBlock(const Decl *D) {
return isFunctionOrMethod(D) || isa<BlockDecl>(D);
}
@@ -207,52 +209,7 @@ static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
return AL.getNumArgs() + AL.hasParsedType();
}
-/// A helper function to provide Attribute Location for the Attr types
-/// AND the ParsedAttr.
-template <typename AttrInfo>
-static std::enable_if_t<std::is_base_of_v<Attr, AttrInfo>, SourceLocation>
-getAttrLoc(const AttrInfo &AL) {
- return AL.getLocation();
-}
-static SourceLocation getAttrLoc(const ParsedAttr &AL) { return AL.getLoc(); }
-
-/// If Expr is a valid integer constant, get the value of the integer
-/// expression and return success or failure. May output an error.
-///
-/// Negative argument is implicitly converted to unsigned, unless
-/// \p StrictlyUnsigned is true.
-template <typename AttrInfo>
-static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
- uint32_t &Val, unsigned Idx = UINT_MAX,
- bool StrictlyUnsigned = false) {
- std::optional<llvm::APSInt> I = llvm::APSInt(32);
- if (Expr->isTypeDependent() ||
- !(I = Expr->getIntegerConstantExpr(S.Context))) {
- if (Idx != UINT_MAX)
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
- << &AI << Idx << AANT_ArgumentIntegerConstant
- << Expr->getSourceRange();
- else
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_type)
- << &AI << AANT_ArgumentIntegerConstant << Expr->getSourceRange();
- return false;
- }
-
- if (!I->isIntN(32)) {
- S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
- << toString(*I, 10, false) << 32 << /* Unsigned */ 1;
- return false;
- }
-
- if (StrictlyUnsigned && I->isSigned() && I->isNegative()) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_requires_positive_integer)
- << &AI << /*non-negative*/ 1;
- return false;
- }
-
- Val = (uint32_t)I->getZExtValue();
- return true;
-}
+SourceLocation Sema::getAttrLoc(const ParsedAttr &AL) { return AL.getLoc(); }
/// Wrapper around checkUInt32Argument, with an extra check to be sure
/// that the result will fit into a regular (signed) int. All args have the same
@@ -261,7 +218,7 @@ template <typename AttrInfo>
static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Expr,
int &Val, unsigned Idx = UINT_MAX) {
uint32_t UVal;
- if (!checkUInt32Argument(S, AI, Expr, UVal, Idx))
+ if (!S.checkUInt32Argument(AI, Expr, UVal, Idx))
return false;
if (UVal > (uint32_t)std::numeric_limits<int>::max()) {
@@ -310,7 +267,7 @@ template <typename AttrInfo>
static bool checkFunctionOrMethodParameterIndex(
Sema &S, const Decl *D, const AttrInfo &AI, unsigned AttrArgNum,
const Expr *IdxExpr, ParamIdx &Idx, bool CanIndexImplicitThis = false) {
- assert(isFunctionOrMethodOrBlock(D));
+ assert(S.isFunctionOrMethodOrBlock(D));
// In C++ the implicit 'this' function parameter also counts.
// Parameters are counted from one.
@@ -323,7 +280,7 @@ static bool checkFunctionOrMethodParameterIndex(
std::optional<llvm::APSInt> IdxInt;
if (IdxExpr->isTypeDependent() ||
!(IdxInt = IdxExpr->getIntegerConstantExpr(S.Context))) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
+ S.Diag(S.getAttrLoc(AI), diag::err_attribute_argument_n_type)
<< &AI << AttrArgNum << AANT_ArgumentIntegerConstant
<< IdxExpr->getSourceRange();
return false;
@@ -331,13 +288,13 @@ static bool checkFunctionOrMethodParameterIndex(
unsigned IdxSource = IdxInt->getLimitedValue(UINT_MAX);
if (IdxSource < 1 || (!IV && IdxSource > NumParams)) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_argument_out_of_bounds)
+ S.Diag(S.getAttrLoc(AI), diag::err_attribute_argument_out_of_bounds)
<< &AI << AttrArgNum << IdxExpr->getSourceRange();
return false;
}
if (HasImplicitThisParam && !CanIndexImplicitThis) {
if (IdxSource == 1) {
- S.Diag(getAttrLoc(AI), diag::err_attribute_invalid_implicit_this_argument)
+ S.Diag(S.getAttrLoc(AI), diag::err_attribute_invalid_implicit_this_argument)
<< &AI << IdxExpr->getSourceRange();
return false;
}
@@ -845,7 +802,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
return;
- assert(isFunctionOrMethod(D) && hasFunctionProto(D));
+ assert(S.isFunctionOrMethod(D) && hasFunctionProto(D));
QualType RetTy = getFunctionOrMethodResultType(D);
if (!RetTy->isPointerType()) {
@@ -1100,7 +1057,7 @@ static void handleDiagnoseAsBuiltinAttr(Sema &S, Decl *D,
const Expr *IndexExpr = AL.getArgAsExpr(I);
uint32_t Index;
- if (!checkUInt32Argument(S, AL, IndexExpr, Index, I + 1, false))
+ if (!S.checkUInt32Argument(AL, IndexExpr, Index, I + 1, false))
return;
if (Index > DeclFD->getNumParams()) {
@@ -1210,7 +1167,7 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t Type;
- if (!checkUInt32Argument(S, AL, E, Type, /*Idx=*/1))
+ if (!S.checkUInt32Argument(AL, E, Type, /*Idx=*/1))
return;
// pass_object_size's argument is passed in as the second argument of
@@ -2295,7 +2252,7 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The checking path for 'noreturn' and 'analyzer_noreturn' are different
// because 'analyzer_noreturn' does not impact the type.
- if (!isFunctionOrMethodOrBlock(D)) {
+ if (!S.isFunctionOrMethodOrBlock(D)) {
ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (!VD || (!VD->getType()->isBlockPointerType() &&
!VD->getType()->isFunctionPointerType())) {
@@ -2399,7 +2356,7 @@ static void handleConstructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
if (AL.getNumArgs() &&
- !checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
+ !S.checkUInt32Argument(AL, AL.getArgAsExpr(0), priority))
return;
D->addAttr(::new (S.Context) ConstructorAttr(S.Context, AL, priority));
@@ -2408,7 +2365,7 @@ static void handleConstructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleDestructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t priority = DestructorAttr::DefaultPriority;
if (AL.getNumArgs() &&
- !checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
+ !S.checkUInt32Argument(AL, AL.getArgAsExpr(0), priority))
return;
D->addAttr(::new (S.Context) DestructorAttr(S.Context, AL, priority));
@@ -3297,7 +3254,7 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
const Expr *E = AL.getArgAsExpr(i);
- if (!checkUInt32Argument(S, AL, E, WGSize[i], i,
+ if (!S.checkUInt32Argument(AL, E, WGSize[i], i,
/*StrictlyUnsigned=*/true))
return;
if (WGSize[i] == 0) {
@@ -3321,7 +3278,7 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleSubGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t SGSize;
const Expr *E = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, E, SGSize))
+ if (!S.checkUInt32Argument(AL, E, SGSize))
return;
if (SGSize == 0) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
@@ -3790,7 +3747,7 @@ static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleMinVectorWidthAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t VecWidth;
- if (!checkUInt32Argument(S, AL, E, VecWidth)) {
+ if (!S.checkUInt32Argument(AL, E, VecWidth)) {
AL.setInvalid();
return;
}
@@ -4003,7 +3960,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t prioritynum;
- if (!checkUInt32Argument(S, AL, E, prioritynum)) {
+ if (!S.checkUInt32Argument(AL, E, prioritynum)) {
AL.setInvalid();
return;
}
@@ -4102,7 +4059,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// checks for the 2nd argument
Expr *IdxExpr = AL.getArgAsExpr(1);
uint32_t Idx;
- if (!checkUInt32Argument(S, AL, IdxExpr, Idx, 2))
+ if (!S.checkUInt32Argument(AL, IdxExpr, Idx, 2))
return;
if (Idx < 1 || Idx > NumArgs) {
@@ -4139,7 +4096,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// check the 3rd argument
Expr *FirstArgExpr = AL.getArgAsExpr(2);
uint32_t FirstArg;
- if (!checkUInt32Argument(S, AL, FirstArgExpr, FirstArg, 3))
+ if (!S.checkUInt32Argument(AL, FirstArgExpr, FirstArg, 3))
return;
// FirstArg == 0 is is always valid.
@@ -4227,7 +4184,7 @@ static void handleCallbackAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *IdxExpr = AL.getArgAsExpr(I);
// If the expression is not parseable as an int32_t we have a problem.
- if (!checkUInt32Argument(S, AL, IdxExpr, (uint32_t &)ArgIdx, I + 1,
+ if (!S.checkUInt32Argument(AL, IdxExpr, (uint32_t &)ArgIdx, I + 1,
false)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
<< AL << (I + 1) << IdxExpr->getSourceRange();
@@ -5727,7 +5684,7 @@ bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
uint32_t NP;
Expr *NumParamsExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(*this, AL, NumParamsExpr, NP)) {
+ if (!checkUInt32Argument(AL, NumParamsExpr, NP)) {
AL.setInvalid();
return true;
}
@@ -5923,14 +5880,14 @@ static void handleXRayLogArgsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handlePatchableFunctionEntryAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
uint32_t Count = 0, Offset = 0;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), Count, 0, true))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(0), Count, 0, true))
return;
if (AL.getNumArgs() == 2) {
Expr *Arg = AL.getArgAsExpr(1);
- if (!checkUInt32Argument(S, AL, Arg, Offset, 1, true))
+ if (!S.checkUInt32Argument(AL, Arg, Offset, 1, true))
return;
if (Count < Offset) {
- S.Diag(getAttrLoc(AL), diag::err_attribute_argument_out_of_range)
+ S.Diag(S.getAttrLoc(AL), diag::err_attribute_argument_out_of_range)
<< &AL << 0 << Count << Arg->getBeginLoc();
return;
}
@@ -6776,7 +6733,7 @@ static void handleSwiftAsyncError(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
Expr *IdxExpr = AL.getArgAsExpr(1);
- if (!checkUInt32Argument(S, AL, IdxExpr, ParamIdx))
+ if (!S.checkUInt32Argument(AL, IdxExpr, ParamIdx))
return;
break;
}
@@ -7278,7 +7235,7 @@ static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
uint32_t X;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), X))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(0), X))
return;
if (X > 1024) {
S.Diag(AL.getArgAsExpr(0)->getExprLoc(),
@@ -7286,7 +7243,7 @@ static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
uint32_t Y;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(1), Y))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(1), Y))
return;
if (Y > 1024) {
S.Diag(AL.getArgAsExpr(1)->getExprLoc(),
@@ -7294,7 +7251,7 @@ static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
uint32_t Z;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(2), Z))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(2), Z))
return;
if (Z > ZMax) {
S.Diag(AL.getArgAsExpr(2)->getExprLoc(),
@@ -7348,10 +7305,10 @@ static void handleHLSLPackOffsetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
uint32_t SubComponent;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), SubComponent))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(0), SubComponent))
return;
uint32_t Component;
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(1), Component))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(1), Component))
return;
QualType T = cast<VarDecl>(D)->getType().getCanonicalType();
@@ -7602,7 +7559,7 @@ static void handleARMInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// MSP430 'interrupt' attribute is applied to
// a function with no parameters and void return type.
- if (!isFunctionOrMethod(D)) {
+ if (!S.isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
<< AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
@@ -7675,7 +7632,7 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// e) The attribute itself must either have no argument or one of the
// valid interrupt types, see [MipsInterruptDocs].
- if (!isFunctionOrMethod(D)) {
+ if (!S.isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
<< AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
@@ -7748,7 +7705,7 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// c) Must take 1 or 2 arguments.
// d) The 1st argument must be a pointer.
// e) The 2nd argument (if any) must be an unsigned integer.
- if (!isFunctionOrMethod(D) || !hasFunctionProto(D) || isInstanceMethod(D) ||
+ if (!S.isFunctionOrMethod(D) || !hasFunctionProto(D) || isInstanceMethod(D) ||
CXXMethodDecl::isStaticOverloadedOperator(
cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
@@ -7807,7 +7764,7 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
+ if (!S.isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
<< AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
@@ -7820,7 +7777,7 @@ static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
+ if (!S.isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
<< AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
@@ -7875,102 +7832,6 @@ BTFDeclTagAttr *Sema::mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL) {
return ::new (Context) BTFDeclTagAttr(Context, AL, AL.getBTFDeclTag());
}
-static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (!isFunctionOrMethod(D)) {
- S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
- return;
- }
-
- auto *FD = cast<FunctionDecl>(D);
- if (FD->isThisDeclarationADefinition()) {
- S.Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
- return;
- }
-
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
-
- D->addAttr(::new (S.Context) WebAssemblyExportNameAttr(S.Context, AL, Str));
- D->addAttr(UsedAttr::CreateImplicit(S.Context));
-}
-
-WebAssemblyImportModuleAttr *
-Sema::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
- if (ExistingAttr->getImportModule() == AL.getImportModule())
- return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 0
- << ExistingAttr->getImportModule() << AL.getImportModule();
- Diag(AL.getLoc(), diag::note_previous_attribute);
- return nullptr;
- }
- if (FD->hasBody()) {
- Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
- return nullptr;
- }
- return ::new (Context) WebAssemblyImportModuleAttr(Context, AL,
- AL.getImportModule());
-}
-
-WebAssemblyImportNameAttr *
-Sema::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
- if (ExistingAttr->getImportName() == AL.getImportName())
- return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 1
- << ExistingAttr->getImportName() << AL.getImportName();
- Diag(AL.getLoc(), diag::note_previous_attribute);
- return nullptr;
- }
- if (FD->hasBody()) {
- Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
- return nullptr;
- }
- return ::new (Context) WebAssemblyImportNameAttr(Context, AL,
- AL.getImportName());
-}
-
-static void
-handleWebAssemblyImportModuleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
- if (FD->hasBody()) {
- S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
- return;
- }
-
- FD->addAttr(::new (S.Context)
- WebAssemblyImportModuleAttr(S.Context, AL, Str));
-}
-
-static void
-handleWebAssemblyImportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- auto *FD = cast<FunctionDecl>(D);
-
- StringRef Str;
- SourceLocation ArgLoc;
- if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
- return;
- if (FD->hasBody()) {
- S.Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
- return;
- }
-
- FD->addAttr(::new (S.Context) WebAssemblyImportNameAttr(S.Context, AL, Str));
-}
-
static void handleRISCVInterruptAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
// Warn about repeated attributes.
@@ -8059,200 +7920,6 @@ static void handleInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
-static bool
-checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
- const AMDGPUFlatWorkGroupSizeAttr &Attr) {
- // Accept template arguments for now as they depend on something else.
- // We'll get to check them when they eventually get instantiated.
- if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
- return false;
-
- uint32_t Min = 0;
- if (!checkUInt32Argument(S, Attr, MinExpr, Min, 0))
- return true;
-
- uint32_t Max = 0;
- if (!checkUInt32Argument(S, Attr, MaxExpr, Max, 1))
- return true;
-
- if (Min == 0 && Max != 0) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 0;
- return true;
- }
- if (Min > Max) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 1;
- return true;
- }
-
- return false;
-}
-
-AMDGPUFlatWorkGroupSizeAttr *
-Sema::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
- AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
-
- if (checkAMDGPUFlatWorkGroupSizeArguments(*this, MinExpr, MaxExpr, TmpAttr))
- return nullptr;
- return ::new (Context)
- AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
-}
-
-void Sema::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
- const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
- if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
- D->addAttr(Attr);
-}
-
-static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- Expr *MinExpr = AL.getArgAsExpr(0);
- Expr *MaxExpr = AL.getArgAsExpr(1);
-
- S.addAMDGPUFlatWorkGroupSizeAttr(D, AL, MinExpr, MaxExpr);
-}
-
-static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
- Expr *MaxExpr,
- const AMDGPUWavesPerEUAttr &Attr) {
- if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
- (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
- return true;
-
- // Accept template arguments for now as they depend on something else.
- // We'll get to check them when they eventually get instantiated.
- if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
- return false;
-
- uint32_t Min = 0;
- if (!checkUInt32Argument(S, Attr, MinExpr, Min, 0))
- return true;
-
- uint32_t Max = 0;
- if (MaxExpr && !checkUInt32Argument(S, Attr, MaxExpr, Max, 1))
- return true;
-
- if (Min == 0 && Max != 0) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 0;
- return true;
- }
- if (Max != 0 && Min > Max) {
- S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
- << &Attr << 1;
- return true;
- }
-
- return false;
-}
-
-AMDGPUWavesPerEUAttr *
-Sema::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *MinExpr,
- Expr *MaxExpr) {
- AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
-
- if (checkAMDGPUWavesPerEUArguments(*this, MinExpr, MaxExpr, TmpAttr))
- return nullptr;
-
- return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
-}
-
-void Sema::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
- if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
- D->addAttr(Attr);
-}
-
-static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
- return;
-
- Expr *MinExpr = AL.getArgAsExpr(0);
- Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
-
- S.addAMDGPUWavesPerEUAttr(D, AL, MinExpr, MaxExpr);
-}
-
-static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- uint32_t NumSGPR = 0;
- Expr *NumSGPRExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, NumSGPRExpr, NumSGPR))
- return;
-
- D->addAttr(::new (S.Context) AMDGPUNumSGPRAttr(S.Context, AL, NumSGPR));
-}
-
-static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- uint32_t NumVGPR = 0;
- Expr *NumVGPRExpr = AL.getArgAsExpr(0);
- if (!checkUInt32Argument(S, AL, NumVGPRExpr, NumVGPR))
- return;
-
- D->addAttr(::new (S.Context) AMDGPUNumVGPRAttr(S.Context, AL, NumVGPR));
-}
-
-static bool
-checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr,
- Expr *ZExpr,
- const AMDGPUMaxNumWorkGroupsAttr &Attr) {
- if (S.DiagnoseUnexpandedParameterPack(XExpr) ||
- (YExpr && S.DiagnoseUnexpandedParameterPack(YExpr)) ||
- (ZExpr && S.DiagnoseUnexpandedParameterPack(ZExpr)))
- return true;
-
- // Accept template arguments for now as they depend on something else.
- // We'll get to check them when they eventually get instantiated.
- if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) ||
- (ZExpr && ZExpr->isValueDependent()))
- return false;
-
- uint32_t NumWG = 0;
- Expr *Exprs[3] = {XExpr, YExpr, ZExpr};
- for (int i = 0; i < 3; i++) {
- if (Exprs[i]) {
- if (!checkUInt32Argument(S, Attr, Exprs[i], NumWG, i,
- /*StrictlyUnsigned=*/true))
- return true;
- if (NumWG == 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
- << &Attr << Exprs[i]->getSourceRange();
- return true;
- }
- }
- }
-
- return false;
-}
-
-AMDGPUMaxNumWorkGroupsAttr *
-Sema::CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI,
- Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
- AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
-
- if (checkAMDGPUMaxNumWorkGroupsArguments(*this, XExpr, YExpr, ZExpr, TmpAttr))
- return nullptr;
-
- return ::new (Context)
- AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
-}
-
-void Sema::addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *XExpr, Expr *YExpr,
- Expr *ZExpr) {
- if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
- D->addAttr(Attr);
-}
-
-static void handleAMDGPUMaxNumWorkGroupsAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
- Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(2) : nullptr;
- S.addAMDGPUMaxNumWorkGroupsAttr(D, AL, AL.getArgAsExpr(0), YExpr, ZExpr);
-}
-
static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
// If we try to apply it to a function pointer, don't warn, but don't
@@ -8279,7 +7946,7 @@ static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
static void handleLayoutVersion(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t Version;
Expr *VersionExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
- if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), Version))
+ if (!S.checkUInt32Argument(AL, AL.getArgAsExpr(0), Version))
return;
// TODO: Investigate what happens with the next major version of MSVC.
@@ -9327,19 +8994,19 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleDLLAttr(S, D, AL);
break;
case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
- handleAMDGPUFlatWorkGroupSizeAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUFlatWorkGroupSizeAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUWavesPerEU:
- handleAMDGPUWavesPerEUAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUWavesPerEUAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUNumSGPR:
- handleAMDGPUNumSGPRAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUNumSGPRAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUNumVGPR:
- handleAMDGPUNumVGPRAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUNumVGPRAttr(D, AL);
break;
case ParsedAttr::AT_AMDGPUMaxNumWorkGroups:
- handleAMDGPUMaxNumWorkGroupsAttr(S, D, AL);
+ S.AMDGPU().handleAMDGPUMaxNumWorkGroupsAttr(D, AL);
break;
case ParsedAttr::AT_AVRSignal:
handleAVRSignalAttr(S, D, AL);
@@ -9354,13 +9021,13 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleBTFDeclTagAttr(S, D, AL);
break;
case ParsedAttr::AT_WebAssemblyExportName:
- handleWebAssemblyExportNameAttr(S, D, AL);
+ S.Wasm().handleWebAssemblyExportNameAttr(D, AL);
break;
case ParsedAttr::AT_WebAssemblyImportModule:
- handleWebAssemblyImportModuleAttr(S, D, AL);
+ S.Wasm().handleWebAssemblyImportModuleAttr(D, AL);
break;
case ParsedAttr::AT_WebAssemblyImportName:
- handleWebAssemblyImportNameAttr(S, D, AL);
+ S.Wasm().handleWebAssemblyImportNameAttr(D, AL);
break;
case ParsedAttr::AT_IBOutlet:
handleIBOutlet(S, D, AL);
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index d3e9dcb4f4399..9678ec4e0d698 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -42,6 +42,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaLambda.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaPPC.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
@@ -931,7 +932,7 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
// PPC MMA non-pointer types are not allowed as throw expr types.
if (Ex && Context.getTargetInfo().getTriple().isPPC64())
- CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
+ PPC().CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
return new (Context)
CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
diff --git a/clang/lib/Sema/SemaHexagon.cpp b/clang/lib/Sema/SemaHexagon.cpp
new file mode 100644
index 0000000000000..e18da784fd078
--- /dev/null
+++ b/clang/lib/Sema/SemaHexagon.cpp
@@ -0,0 +1,289 @@
+//===------ SemaHexagon.cpp ------ Hexagon target-specific routines -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to Hexagon.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaHexagon.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cstdint>
+#include <iterator>
+
+namespace clang {
+
+SemaHexagon::SemaHexagon(Sema &S) : SemaBase(S) {}
+
+bool SemaHexagon::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
+ struct ArgInfo {
+ uint8_t OpNum;
+ bool IsSigned;
+ uint8_t BitWidth;
+ uint8_t Align;
+ };
+ struct BuiltinInfo {
+ unsigned BuiltinID;
+ ArgInfo Infos[2];
+ };
+
+ static BuiltinInfo Infos[] = {
+ { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
+ { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
+ { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
+ { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
+ { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
+ {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
+ {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
+ { 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
+ { 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
+ { 3, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
+ { 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
+ { 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
+ {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
+ {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
+ {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
+ {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
+ {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
+ {{ 3, false, 3, 0 }} },
+ };
+
+ // Use a dynamically initialized static to sort the table exactly once on
+ // first run.
+ static const bool SortOnce =
+ (llvm::sort(Infos,
+ [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ }),
+ true);
+ (void)SortOnce;
+
+ const BuiltinInfo *F = llvm::partition_point(
+ Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
+ if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
+ return false;
+
+ bool Error = false;
+
+ for (const ArgInfo &A : F->Infos) {
+ // Ignore empty ArgInfo elements.
+ if (A.BitWidth == 0)
+ continue;
+
+ int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
+ int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
+ if (!A.Align) {
+ Error |= SemaRef.BuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
+ } else {
+ unsigned M = 1 << A.Align;
+ Min *= M;
+ Max *= M;
+ Error |= SemaRef.BuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
+ Error |= SemaRef.BuiltinConstantArgMultiple(TheCall, A.OpNum, M);
+ }
+ }
+ return Error;
+}
+
+bool SemaHexagon::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaLoongArch.cpp b/clang/lib/Sema/SemaLoongArch.cpp
new file mode 100644
index 0000000000000..e373f6ba1a06c
--- /dev/null
+++ b/clang/lib/Sema/SemaLoongArch.cpp
@@ -0,0 +1,515 @@
+//===------ SemaLoongArch.cpp ---- LoongArch target-specific routines -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to LoongArch.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaLoongArch.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace clang {
+
+SemaLoongArch::SemaLoongArch(Sema &S) : SemaBase(S) {}
+
+bool SemaLoongArch::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ break;
+ // Basic intrinsics.
+ case LoongArch::BI__builtin_loongarch_cacop_d:
+ case LoongArch::BI__builtin_loongarch_cacop_w: {
+ SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), llvm::maxIntN(12));
+ break;
+ }
+ case LoongArch::BI__builtin_loongarch_break:
+ case LoongArch::BI__builtin_loongarch_dbar:
+ case LoongArch::BI__builtin_loongarch_ibar:
+ case LoongArch::BI__builtin_loongarch_syscall:
+ // Check if immediate is in [0, 32767].
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 32767);
+ case LoongArch::BI__builtin_loongarch_csrrd_w:
+ case LoongArch::BI__builtin_loongarch_csrrd_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_csrwr_w:
+ case LoongArch::BI__builtin_loongarch_csrwr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_csrxchg_w:
+ case LoongArch::BI__builtin_loongarch_csrxchg_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_lddir_d:
+ case LoongArch::BI__builtin_loongarch_ldpte_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_loongarch_movfcsr2gr:
+ case LoongArch::BI__builtin_loongarch_movgr2fcsr:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2));
+
+ // LSX intrinsics.
+ case LoongArch::BI__builtin_lsx_vbitclri_b:
+ case LoongArch::BI__builtin_lsx_vbitrevi_b:
+ case LoongArch::BI__builtin_lsx_vbitseti_b:
+ case LoongArch::BI__builtin_lsx_vsat_b:
+ case LoongArch::BI__builtin_lsx_vsat_bu:
+ case LoongArch::BI__builtin_lsx_vslli_b:
+ case LoongArch::BI__builtin_lsx_vsrai_b:
+ case LoongArch::BI__builtin_lsx_vsrari_b:
+ case LoongArch::BI__builtin_lsx_vsrli_b:
+ case LoongArch::BI__builtin_lsx_vsllwil_h_b:
+ case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
+ case LoongArch::BI__builtin_lsx_vrotri_b:
+ case LoongArch::BI__builtin_lsx_vsrlri_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lsx_vbitclri_h:
+ case LoongArch::BI__builtin_lsx_vbitrevi_h:
+ case LoongArch::BI__builtin_lsx_vbitseti_h:
+ case LoongArch::BI__builtin_lsx_vsat_h:
+ case LoongArch::BI__builtin_lsx_vsat_hu:
+ case LoongArch::BI__builtin_lsx_vslli_h:
+ case LoongArch::BI__builtin_lsx_vsrai_h:
+ case LoongArch::BI__builtin_lsx_vsrari_h:
+ case LoongArch::BI__builtin_lsx_vsrli_h:
+ case LoongArch::BI__builtin_lsx_vsllwil_w_h:
+ case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
+ case LoongArch::BI__builtin_lsx_vrotri_h:
+ case LoongArch::BI__builtin_lsx_vsrlri_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lsx_vssrarni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
+ case LoongArch::BI__builtin_lsx_vssrani_b_h:
+ case LoongArch::BI__builtin_lsx_vssrani_bu_h:
+ case LoongArch::BI__builtin_lsx_vsrarni_b_h:
+ case LoongArch::BI__builtin_lsx_vsrlni_b_h:
+ case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
+ case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
+ case LoongArch::BI__builtin_lsx_vsrani_b_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lsx_vslei_bu:
+ case LoongArch::BI__builtin_lsx_vslei_hu:
+ case LoongArch::BI__builtin_lsx_vslei_wu:
+ case LoongArch::BI__builtin_lsx_vslei_du:
+ case LoongArch::BI__builtin_lsx_vslti_bu:
+ case LoongArch::BI__builtin_lsx_vslti_hu:
+ case LoongArch::BI__builtin_lsx_vslti_wu:
+ case LoongArch::BI__builtin_lsx_vslti_du:
+ case LoongArch::BI__builtin_lsx_vmaxi_bu:
+ case LoongArch::BI__builtin_lsx_vmaxi_hu:
+ case LoongArch::BI__builtin_lsx_vmaxi_wu:
+ case LoongArch::BI__builtin_lsx_vmaxi_du:
+ case LoongArch::BI__builtin_lsx_vmini_bu:
+ case LoongArch::BI__builtin_lsx_vmini_hu:
+ case LoongArch::BI__builtin_lsx_vmini_wu:
+ case LoongArch::BI__builtin_lsx_vmini_du:
+ case LoongArch::BI__builtin_lsx_vaddi_bu:
+ case LoongArch::BI__builtin_lsx_vaddi_hu:
+ case LoongArch::BI__builtin_lsx_vaddi_wu:
+ case LoongArch::BI__builtin_lsx_vaddi_du:
+ case LoongArch::BI__builtin_lsx_vbitclri_w:
+ case LoongArch::BI__builtin_lsx_vbitrevi_w:
+ case LoongArch::BI__builtin_lsx_vbitseti_w:
+ case LoongArch::BI__builtin_lsx_vsat_w:
+ case LoongArch::BI__builtin_lsx_vsat_wu:
+ case LoongArch::BI__builtin_lsx_vslli_w:
+ case LoongArch::BI__builtin_lsx_vsrai_w:
+ case LoongArch::BI__builtin_lsx_vsrari_w:
+ case LoongArch::BI__builtin_lsx_vsrli_w:
+ case LoongArch::BI__builtin_lsx_vsllwil_d_w:
+ case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
+ case LoongArch::BI__builtin_lsx_vsrlri_w:
+ case LoongArch::BI__builtin_lsx_vrotri_w:
+ case LoongArch::BI__builtin_lsx_vsubi_bu:
+ case LoongArch::BI__builtin_lsx_vsubi_hu:
+ case LoongArch::BI__builtin_lsx_vbsrl_v:
+ case LoongArch::BI__builtin_lsx_vbsll_v:
+ case LoongArch::BI__builtin_lsx_vsubi_wu:
+ case LoongArch::BI__builtin_lsx_vsubi_du:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_lsx_vssrarni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
+ case LoongArch::BI__builtin_lsx_vssrani_h_w:
+ case LoongArch::BI__builtin_lsx_vssrani_hu_w:
+ case LoongArch::BI__builtin_lsx_vsrarni_h_w:
+ case LoongArch::BI__builtin_lsx_vsrani_h_w:
+ case LoongArch::BI__builtin_lsx_vfrstpi_b:
+ case LoongArch::BI__builtin_lsx_vfrstpi_h:
+ case LoongArch::BI__builtin_lsx_vsrlni_h_w:
+ case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
+ case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case LoongArch::BI__builtin_lsx_vbitclri_d:
+ case LoongArch::BI__builtin_lsx_vbitrevi_d:
+ case LoongArch::BI__builtin_lsx_vbitseti_d:
+ case LoongArch::BI__builtin_lsx_vsat_d:
+ case LoongArch::BI__builtin_lsx_vsat_du:
+ case LoongArch::BI__builtin_lsx_vslli_d:
+ case LoongArch::BI__builtin_lsx_vsrai_d:
+ case LoongArch::BI__builtin_lsx_vsrli_d:
+ case LoongArch::BI__builtin_lsx_vsrari_d:
+ case LoongArch::BI__builtin_lsx_vrotri_d:
+ case LoongArch::BI__builtin_lsx_vsrlri_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63);
+ case LoongArch::BI__builtin_lsx_vssrarni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
+ case LoongArch::BI__builtin_lsx_vssrani_w_d:
+ case LoongArch::BI__builtin_lsx_vssrani_wu_d:
+ case LoongArch::BI__builtin_lsx_vsrarni_w_d:
+ case LoongArch::BI__builtin_lsx_vsrlni_w_d:
+ case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
+ case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
+ case LoongArch::BI__builtin_lsx_vsrani_w_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 63);
+ case LoongArch::BI__builtin_lsx_vssrarni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrarni_du_q:
+ case LoongArch::BI__builtin_lsx_vssrani_d_q:
+ case LoongArch::BI__builtin_lsx_vssrani_du_q:
+ case LoongArch::BI__builtin_lsx_vsrarni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlni_du_q:
+ case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
+ case LoongArch::BI__builtin_lsx_vsrani_d_q:
+ case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
+ case LoongArch::BI__builtin_lsx_vsrlni_d_q:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 127);
+ case LoongArch::BI__builtin_lsx_vseqi_b:
+ case LoongArch::BI__builtin_lsx_vseqi_h:
+ case LoongArch::BI__builtin_lsx_vseqi_w:
+ case LoongArch::BI__builtin_lsx_vseqi_d:
+ case LoongArch::BI__builtin_lsx_vslti_b:
+ case LoongArch::BI__builtin_lsx_vslti_h:
+ case LoongArch::BI__builtin_lsx_vslti_w:
+ case LoongArch::BI__builtin_lsx_vslti_d:
+ case LoongArch::BI__builtin_lsx_vslei_b:
+ case LoongArch::BI__builtin_lsx_vslei_h:
+ case LoongArch::BI__builtin_lsx_vslei_w:
+ case LoongArch::BI__builtin_lsx_vslei_d:
+ case LoongArch::BI__builtin_lsx_vmaxi_b:
+ case LoongArch::BI__builtin_lsx_vmaxi_h:
+ case LoongArch::BI__builtin_lsx_vmaxi_w:
+ case LoongArch::BI__builtin_lsx_vmaxi_d:
+ case LoongArch::BI__builtin_lsx_vmini_b:
+ case LoongArch::BI__builtin_lsx_vmini_h:
+ case LoongArch::BI__builtin_lsx_vmini_w:
+ case LoongArch::BI__builtin_lsx_vmini_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -16, 15);
+ case LoongArch::BI__builtin_lsx_vandi_b:
+ case LoongArch::BI__builtin_lsx_vnori_b:
+ case LoongArch::BI__builtin_lsx_vori_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_h:
+ case LoongArch::BI__builtin_lsx_vshuf4i_w:
+ case LoongArch::BI__builtin_lsx_vxori_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 255);
+ case LoongArch::BI__builtin_lsx_vbitseli_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_d:
+ case LoongArch::BI__builtin_lsx_vextrins_b:
+ case LoongArch::BI__builtin_lsx_vextrins_h:
+ case LoongArch::BI__builtin_lsx_vextrins_w:
+ case LoongArch::BI__builtin_lsx_vextrins_d:
+ case LoongArch::BI__builtin_lsx_vpermi_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 255);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_b:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
+ case LoongArch::BI__builtin_lsx_vreplvei_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_h:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
+ case LoongArch::BI__builtin_lsx_vreplvei_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_w:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
+ case LoongArch::BI__builtin_lsx_vreplvei_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_d:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_du:
+ case LoongArch::BI__builtin_lsx_vreplvei_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1);
+ case LoongArch::BI__builtin_lsx_vstelm_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -128, 127) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 15);
+ case LoongArch::BI__builtin_lsx_vstelm_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -256, 254) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case LoongArch::BI__builtin_lsx_vstelm_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -512, 508) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case LoongArch::BI__builtin_lsx_vstelm_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 1);
+ case LoongArch::BI__builtin_lsx_vldrepl_b:
+ case LoongArch::BI__builtin_lsx_vld:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2047);
+ case LoongArch::BI__builtin_lsx_vldrepl_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2046);
+ case LoongArch::BI__builtin_lsx_vldrepl_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2044);
+ case LoongArch::BI__builtin_lsx_vldrepl_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2040);
+ case LoongArch::BI__builtin_lsx_vst:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -2048, 2047);
+ case LoongArch::BI__builtin_lsx_vldi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -4096, 4095);
+ case LoongArch::BI__builtin_lsx_vrepli_b:
+ case LoongArch::BI__builtin_lsx_vrepli_h:
+ case LoongArch::BI__builtin_lsx_vrepli_w:
+ case LoongArch::BI__builtin_lsx_vrepli_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -512, 511);
+
+ // LASX intrinsics.
+ case LoongArch::BI__builtin_lasx_xvbitclri_b:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_b:
+ case LoongArch::BI__builtin_lasx_xvbitseti_b:
+ case LoongArch::BI__builtin_lasx_xvsat_b:
+ case LoongArch::BI__builtin_lasx_xvsat_bu:
+ case LoongArch::BI__builtin_lasx_xvslli_b:
+ case LoongArch::BI__builtin_lasx_xvsrai_b:
+ case LoongArch::BI__builtin_lasx_xvsrari_b:
+ case LoongArch::BI__builtin_lasx_xvsrli_b:
+ case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
+ case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
+ case LoongArch::BI__builtin_lasx_xvrotri_b:
+ case LoongArch::BI__builtin_lasx_xvsrlri_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvbitclri_h:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_h:
+ case LoongArch::BI__builtin_lasx_xvbitseti_h:
+ case LoongArch::BI__builtin_lasx_xvsat_h:
+ case LoongArch::BI__builtin_lasx_xvsat_hu:
+ case LoongArch::BI__builtin_lasx_xvslli_h:
+ case LoongArch::BI__builtin_lasx_xvsrai_h:
+ case LoongArch::BI__builtin_lasx_xvsrari_h:
+ case LoongArch::BI__builtin_lasx_xvsrli_h:
+ case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
+ case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
+ case LoongArch::BI__builtin_lasx_xvrotri_h:
+ case LoongArch::BI__builtin_lasx_xvsrlri_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvssrani_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
+ case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
+ case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvsrani_b_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvslei_bu:
+ case LoongArch::BI__builtin_lasx_xvslei_hu:
+ case LoongArch::BI__builtin_lasx_xvslei_wu:
+ case LoongArch::BI__builtin_lasx_xvslei_du:
+ case LoongArch::BI__builtin_lasx_xvslti_bu:
+ case LoongArch::BI__builtin_lasx_xvslti_hu:
+ case LoongArch::BI__builtin_lasx_xvslti_wu:
+ case LoongArch::BI__builtin_lasx_xvslti_du:
+ case LoongArch::BI__builtin_lasx_xvmaxi_bu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_hu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_wu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_du:
+ case LoongArch::BI__builtin_lasx_xvmini_bu:
+ case LoongArch::BI__builtin_lasx_xvmini_hu:
+ case LoongArch::BI__builtin_lasx_xvmini_wu:
+ case LoongArch::BI__builtin_lasx_xvmini_du:
+ case LoongArch::BI__builtin_lasx_xvaddi_bu:
+ case LoongArch::BI__builtin_lasx_xvaddi_hu:
+ case LoongArch::BI__builtin_lasx_xvaddi_wu:
+ case LoongArch::BI__builtin_lasx_xvaddi_du:
+ case LoongArch::BI__builtin_lasx_xvbitclri_w:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_w:
+ case LoongArch::BI__builtin_lasx_xvbitseti_w:
+ case LoongArch::BI__builtin_lasx_xvsat_w:
+ case LoongArch::BI__builtin_lasx_xvsat_wu:
+ case LoongArch::BI__builtin_lasx_xvslli_w:
+ case LoongArch::BI__builtin_lasx_xvsrai_w:
+ case LoongArch::BI__builtin_lasx_xvsrari_w:
+ case LoongArch::BI__builtin_lasx_xvsrli_w:
+ case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
+ case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
+ case LoongArch::BI__builtin_lasx_xvsrlri_w:
+ case LoongArch::BI__builtin_lasx_xvrotri_w:
+ case LoongArch::BI__builtin_lasx_xvsubi_bu:
+ case LoongArch::BI__builtin_lasx_xvsubi_hu:
+ case LoongArch::BI__builtin_lasx_xvsubi_wu:
+ case LoongArch::BI__builtin_lasx_xvsubi_du:
+ case LoongArch::BI__builtin_lasx_xvbsrl_v:
+ case LoongArch::BI__builtin_lasx_xvbsll_v:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
+ case LoongArch::BI__builtin_lasx_xvssrani_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
+ case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
+ case LoongArch::BI__builtin_lasx_xvsrani_h_w:
+ case LoongArch::BI__builtin_lasx_xvfrstpi_b:
+ case LoongArch::BI__builtin_lasx_xvfrstpi_h:
+ case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvbitclri_d:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_d:
+ case LoongArch::BI__builtin_lasx_xvbitseti_d:
+ case LoongArch::BI__builtin_lasx_xvsat_d:
+ case LoongArch::BI__builtin_lasx_xvsat_du:
+ case LoongArch::BI__builtin_lasx_xvslli_d:
+ case LoongArch::BI__builtin_lasx_xvsrai_d:
+ case LoongArch::BI__builtin_lasx_xvsrli_d:
+ case LoongArch::BI__builtin_lasx_xvsrari_d:
+ case LoongArch::BI__builtin_lasx_xvrotri_d:
+ case LoongArch::BI__builtin_lasx_xvsrlri_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63);
+ case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvssrani_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
+ case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
+ case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvsrani_w_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 63);
+ case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
+ case LoongArch::BI__builtin_lasx_xvssrani_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrani_du_q:
+ case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
+ case LoongArch::BI__builtin_lasx_xvsrani_d_q:
+ case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 127);
+ case LoongArch::BI__builtin_lasx_xvseqi_b:
+ case LoongArch::BI__builtin_lasx_xvseqi_h:
+ case LoongArch::BI__builtin_lasx_xvseqi_w:
+ case LoongArch::BI__builtin_lasx_xvseqi_d:
+ case LoongArch::BI__builtin_lasx_xvslti_b:
+ case LoongArch::BI__builtin_lasx_xvslti_h:
+ case LoongArch::BI__builtin_lasx_xvslti_w:
+ case LoongArch::BI__builtin_lasx_xvslti_d:
+ case LoongArch::BI__builtin_lasx_xvslei_b:
+ case LoongArch::BI__builtin_lasx_xvslei_h:
+ case LoongArch::BI__builtin_lasx_xvslei_w:
+ case LoongArch::BI__builtin_lasx_xvslei_d:
+ case LoongArch::BI__builtin_lasx_xvmaxi_b:
+ case LoongArch::BI__builtin_lasx_xvmaxi_h:
+ case LoongArch::BI__builtin_lasx_xvmaxi_w:
+ case LoongArch::BI__builtin_lasx_xvmaxi_d:
+ case LoongArch::BI__builtin_lasx_xvmini_b:
+ case LoongArch::BI__builtin_lasx_xvmini_h:
+ case LoongArch::BI__builtin_lasx_xvmini_w:
+ case LoongArch::BI__builtin_lasx_xvmini_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -16, 15);
+ case LoongArch::BI__builtin_lasx_xvandi_b:
+ case LoongArch::BI__builtin_lasx_xvnori_b:
+ case LoongArch::BI__builtin_lasx_xvori_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_h:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_w:
+ case LoongArch::BI__builtin_lasx_xvxori_b:
+ case LoongArch::BI__builtin_lasx_xvpermi_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 255);
+ case LoongArch::BI__builtin_lasx_xvbitseli_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_d:
+ case LoongArch::BI__builtin_lasx_xvextrins_b:
+ case LoongArch::BI__builtin_lasx_xvextrins_h:
+ case LoongArch::BI__builtin_lasx_xvextrins_w:
+ case LoongArch::BI__builtin_lasx_xvextrins_d:
+ case LoongArch::BI__builtin_lasx_xvpermi_q:
+ case LoongArch::BI__builtin_lasx_xvpermi_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 255);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
+ case LoongArch::BI__builtin_lasx_xvpickve_w_f:
+ case LoongArch::BI__builtin_lasx_xvpickve_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
+ case LoongArch::BI__builtin_lasx_xvinsve0_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
+ case LoongArch::BI__builtin_lasx_xvpickve_d_f:
+ case LoongArch::BI__builtin_lasx_xvpickve_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvinsve0_d:
+ case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvstelm_b:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -128, 127) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvstelm_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -256, 254) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvstelm_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -512, 508) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvstelm_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case LoongArch::BI__builtin_lasx_xvldrepl_b:
+ case LoongArch::BI__builtin_lasx_xvld:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2047);
+ case LoongArch::BI__builtin_lasx_xvldrepl_h:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2046);
+ case LoongArch::BI__builtin_lasx_xvldrepl_w:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2044);
+ case LoongArch::BI__builtin_lasx_xvldrepl_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, -2048, 2040);
+ case LoongArch::BI__builtin_lasx_xvst:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, -2048, 2047);
+ case LoongArch::BI__builtin_lasx_xvldi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -4096, 4095);
+ case LoongArch::BI__builtin_lasx_xvrepli_b:
+ case LoongArch::BI__builtin_lasx_xvrepli_h:
+ case LoongArch::BI__builtin_lasx_xvrepli_w:
+ case LoongArch::BI__builtin_lasx_xvrepli_d:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, -512, 511);
+ }
+ return false;
+}
+
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaMIPS.cpp b/clang/lib/Sema/SemaMIPS.cpp
new file mode 100644
index 0000000000000..df329e9539f27
--- /dev/null
+++ b/clang/lib/Sema/SemaMIPS.cpp
@@ -0,0 +1,239 @@
+//===------ SemaMIPS.cpp -------- MIPS target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to MIPS.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaMIPS.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaMIPS::SemaMIPS(Sema &S) : SemaBase(S) {}
+
+bool SemaMIPS::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall) {
+ return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
+ CheckMipsBuiltinArgument(BuiltinID, TheCall);
+}
+
+bool SemaMIPS::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+
+ if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_mips_lwx) {
+ if (!TI.hasFeature("dsp"))
+ return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
+ }
+
+ if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
+ if (!TI.hasFeature("dspr2"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_mips_builtin_requires_dspr2);
+ }
+
+ if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_msa_xori_b) {
+ if (!TI.hasFeature("msa"))
+ return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
+ }
+
+ return false;
+}
+
+// CheckMipsBuiltinArgument - Checks the constant value passed to the
+// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
+// ordering for DSP is unspecified. MSA is ordered by the data format used
+// by the underlying instruction i.e., df/m, df/n and then by size.
+//
+// FIXME: The size tests here should instead be tablegen'd along with the
+// definitions from include/clang/Basic/BuiltinsMips.def.
+// FIXME: GCC is strict on signedness for some of these intrinsics, we should
+// be too.
+bool SemaMIPS::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned i = 0, l = 0, u = 0, m = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
+ case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
+ case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
+ case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
+ // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
+ // df/m field.
+ // These intrinsics take an unsigned 3 bit immediate.
+ case Mips::BI__builtin_msa_bclri_b:
+ case Mips::BI__builtin_msa_bnegi_b:
+ case Mips::BI__builtin_msa_bseti_b:
+ case Mips::BI__builtin_msa_sat_s_b:
+ case Mips::BI__builtin_msa_sat_u_b:
+ case Mips::BI__builtin_msa_slli_b:
+ case Mips::BI__builtin_msa_srai_b:
+ case Mips::BI__builtin_msa_srari_b:
+ case Mips::BI__builtin_msa_srli_b:
+ case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
+ case Mips::BI__builtin_msa_binsli_b:
+ case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
+ // These intrinsics take an unsigned 4 bit immediate.
+ case Mips::BI__builtin_msa_bclri_h:
+ case Mips::BI__builtin_msa_bnegi_h:
+ case Mips::BI__builtin_msa_bseti_h:
+ case Mips::BI__builtin_msa_sat_s_h:
+ case Mips::BI__builtin_msa_sat_u_h:
+ case Mips::BI__builtin_msa_slli_h:
+ case Mips::BI__builtin_msa_srai_h:
+ case Mips::BI__builtin_msa_srari_h:
+ case Mips::BI__builtin_msa_srli_h:
+ case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
+ case Mips::BI__builtin_msa_binsli_h:
+ case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
+ // These intrinsics take an unsigned 5 bit immediate.
+ // The first block of intrinsics actually have an unsigned 5 bit field,
+ // not a df/n field.
+ case Mips::BI__builtin_msa_cfcmsa:
+ case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
+ case Mips::BI__builtin_msa_clei_u_b:
+ case Mips::BI__builtin_msa_clei_u_h:
+ case Mips::BI__builtin_msa_clei_u_w:
+ case Mips::BI__builtin_msa_clei_u_d:
+ case Mips::BI__builtin_msa_clti_u_b:
+ case Mips::BI__builtin_msa_clti_u_h:
+ case Mips::BI__builtin_msa_clti_u_w:
+ case Mips::BI__builtin_msa_clti_u_d:
+ case Mips::BI__builtin_msa_maxi_u_b:
+ case Mips::BI__builtin_msa_maxi_u_h:
+ case Mips::BI__builtin_msa_maxi_u_w:
+ case Mips::BI__builtin_msa_maxi_u_d:
+ case Mips::BI__builtin_msa_mini_u_b:
+ case Mips::BI__builtin_msa_mini_u_h:
+ case Mips::BI__builtin_msa_mini_u_w:
+ case Mips::BI__builtin_msa_mini_u_d:
+ case Mips::BI__builtin_msa_addvi_b:
+ case Mips::BI__builtin_msa_addvi_h:
+ case Mips::BI__builtin_msa_addvi_w:
+ case Mips::BI__builtin_msa_addvi_d:
+ case Mips::BI__builtin_msa_bclri_w:
+ case Mips::BI__builtin_msa_bnegi_w:
+ case Mips::BI__builtin_msa_bseti_w:
+ case Mips::BI__builtin_msa_sat_s_w:
+ case Mips::BI__builtin_msa_sat_u_w:
+ case Mips::BI__builtin_msa_slli_w:
+ case Mips::BI__builtin_msa_srai_w:
+ case Mips::BI__builtin_msa_srari_w:
+ case Mips::BI__builtin_msa_srli_w:
+ case Mips::BI__builtin_msa_srlri_w:
+ case Mips::BI__builtin_msa_subvi_b:
+ case Mips::BI__builtin_msa_subvi_h:
+ case Mips::BI__builtin_msa_subvi_w:
+ case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
+ case Mips::BI__builtin_msa_binsli_w:
+ case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
+ // These intrinsics take an unsigned 6 bit immediate.
+ case Mips::BI__builtin_msa_bclri_d:
+ case Mips::BI__builtin_msa_bnegi_d:
+ case Mips::BI__builtin_msa_bseti_d:
+ case Mips::BI__builtin_msa_sat_s_d:
+ case Mips::BI__builtin_msa_sat_u_d:
+ case Mips::BI__builtin_msa_slli_d:
+ case Mips::BI__builtin_msa_srai_d:
+ case Mips::BI__builtin_msa_srari_d:
+ case Mips::BI__builtin_msa_srli_d:
+ case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_msa_binsli_d:
+ case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
+ // These intrinsics take a signed 5 bit immediate.
+ case Mips::BI__builtin_msa_ceqi_b:
+ case Mips::BI__builtin_msa_ceqi_h:
+ case Mips::BI__builtin_msa_ceqi_w:
+ case Mips::BI__builtin_msa_ceqi_d:
+ case Mips::BI__builtin_msa_clti_s_b:
+ case Mips::BI__builtin_msa_clti_s_h:
+ case Mips::BI__builtin_msa_clti_s_w:
+ case Mips::BI__builtin_msa_clti_s_d:
+ case Mips::BI__builtin_msa_clei_s_b:
+ case Mips::BI__builtin_msa_clei_s_h:
+ case Mips::BI__builtin_msa_clei_s_w:
+ case Mips::BI__builtin_msa_clei_s_d:
+ case Mips::BI__builtin_msa_maxi_s_b:
+ case Mips::BI__builtin_msa_maxi_s_h:
+ case Mips::BI__builtin_msa_maxi_s_w:
+ case Mips::BI__builtin_msa_maxi_s_d:
+ case Mips::BI__builtin_msa_mini_s_b:
+ case Mips::BI__builtin_msa_mini_s_h:
+ case Mips::BI__builtin_msa_mini_s_w:
+ case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
+ // These intrinsics take an unsigned 8 bit immediate.
+ case Mips::BI__builtin_msa_andi_b:
+ case Mips::BI__builtin_msa_nori_b:
+ case Mips::BI__builtin_msa_ori_b:
+ case Mips::BI__builtin_msa_shf_b:
+ case Mips::BI__builtin_msa_shf_h:
+ case Mips::BI__builtin_msa_shf_w:
+ case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
+ case Mips::BI__builtin_msa_bseli_b:
+ case Mips::BI__builtin_msa_bmnzi_b:
+ case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
+ // df/n format
+ // These intrinsics take an unsigned 4 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_b:
+ case Mips::BI__builtin_msa_copy_u_b:
+ case Mips::BI__builtin_msa_insve_b:
+ case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
+ case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
+ // These intrinsics take an unsigned 3 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_h:
+ case Mips::BI__builtin_msa_copy_u_h:
+ case Mips::BI__builtin_msa_insve_h:
+ case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
+ case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
+ // These intrinsics take an unsigned 2 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_w:
+ case Mips::BI__builtin_msa_copy_u_w:
+ case Mips::BI__builtin_msa_insve_w:
+ case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
+ case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
+ // These intrinsics take an unsigned 1 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_d:
+ case Mips::BI__builtin_msa_copy_u_d:
+ case Mips::BI__builtin_msa_insve_d:
+ case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
+ case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
+ // Memory offsets and immediate loads.
+ // These intrinsics take a signed 10 bit immediate.
+ case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
+ case Mips::BI__builtin_msa_ldi_h:
+ case Mips::BI__builtin_msa_ldi_w:
+ case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
+ case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
+ }
+
+ if (!m)
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u);
+
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u) ||
+ SemaRef.BuiltinConstantArgMultiple(TheCall, i, m);
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaNVPTX.cpp b/clang/lib/Sema/SemaNVPTX.cpp
new file mode 100644
index 0000000000000..778b0c2c2f306
--- /dev/null
+++ b/clang/lib/Sema/SemaNVPTX.cpp
@@ -0,0 +1,35 @@
+//===------ SemaNVPTX.cpp -------- NVPTX target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to NVPTX.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaNVPTX.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaNVPTX::SemaNVPTX(Sema &S) : SemaBase(S) {}
+
+bool SemaNVPTX::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
+ case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
+ return SemaRef.checkArgCountAtMost(TheCall, 3);
+ }
+
+ return false;
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaPPC.cpp b/clang/lib/Sema/SemaPPC.cpp
new file mode 100644
index 0000000000000..0e5005de39c82
--- /dev/null
+++ b/clang/lib/Sema/SemaPPC.cpp
@@ -0,0 +1,433 @@
+//===------ SemaPPC.cpp ------ PowerPC target-specific routines -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to PowerPC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaPPC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+
+namespace clang {
+
+SemaPPC::SemaPPC(Sema &S) : SemaBase(S) {}
+
+void SemaPPC::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
+ const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens());
+ if (!ICE)
+ return;
+
+ const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DR)
+ return;
+
+ const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
+ if (!PD || !PD->getType()->isRecordType())
+ return;
+
+ QualType ArgType = Arg->getType();
+ for (const FieldDecl *FD :
+ ArgType->castAs<RecordType>()->getDecl()->fields()) {
+ if (const auto *AA = FD->getAttr<AlignedAttr>()) {
+ CharUnits Alignment =
+ getASTContext().toCharUnitsFromBits(AA->getAlignment(getASTContext()));
+ if (Alignment.getQuantity() == 16) {
+ Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
+ Diag(Loc, diag::note_misaligned_member_used_here) << PD;
+ }
+ }
+ }
+}
+
+static bool isPPC_64Builtin(unsigned BuiltinID) {
+ // These builtins only work on PPC 64bit targets.
+ switch (BuiltinID) {
+ case PPC::BI__builtin_divde:
+ case PPC::BI__builtin_divdeu:
+ case PPC::BI__builtin_bpermd:
+ case PPC::BI__builtin_pdepd:
+ case PPC::BI__builtin_pextd:
+ case PPC::BI__builtin_ppc_ldarx:
+ case PPC::BI__builtin_ppc_stdcx:
+ case PPC::BI__builtin_ppc_tdw:
+ case PPC::BI__builtin_ppc_trapd:
+ case PPC::BI__builtin_ppc_cmpeqb:
+ case PPC::BI__builtin_ppc_setb:
+ case PPC::BI__builtin_ppc_mulhd:
+ case PPC::BI__builtin_ppc_mulhdu:
+ case PPC::BI__builtin_ppc_maddhd:
+ case PPC::BI__builtin_ppc_maddhdu:
+ case PPC::BI__builtin_ppc_maddld:
+ case PPC::BI__builtin_ppc_load8r:
+ case PPC::BI__builtin_ppc_store8r:
+ case PPC::BI__builtin_ppc_insert_exp:
+ case PPC::BI__builtin_ppc_extract_sig:
+ case PPC::BI__builtin_ppc_addex:
+ case PPC::BI__builtin_darn:
+ case PPC::BI__builtin_darn_raw:
+ case PPC::BI__builtin_ppc_compare_and_swaplp:
+ case PPC::BI__builtin_ppc_fetch_and_addlp:
+ case PPC::BI__builtin_ppc_fetch_and_andlp:
+ case PPC::BI__builtin_ppc_fetch_and_orlp:
+ case PPC::BI__builtin_ppc_fetch_and_swaplp:
+ return true;
+ }
+ return false;
+}
+
+bool SemaPPC::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ unsigned i = 0, l = 0, u = 0;
+ bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
+ llvm::APSInt Result;
+
+ if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
+ return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
+ << TheCall->getSourceRange();
+
+ switch (BuiltinID) {
+ default: return false;
+ case PPC::BI__builtin_altivec_crypto_vshasigmaw:
+ case PPC::BI__builtin_altivec_crypto_vshasigmad:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case PPC::BI__builtin_altivec_dss:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case PPC::BI__builtin_tbegin:
+ case PPC::BI__builtin_tend:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ case PPC::BI__builtin_tsr:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7);
+ case PPC::BI__builtin_tabortwc:
+ case PPC::BI__builtin_tabortdc:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
+ case PPC::BI__builtin_tabortwci:
+ case PPC::BI__builtin_tabortdci:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
+ // __builtin_(un)pack_longdouble are available only if long double uses IBM
+ // extended double representation.
+ case PPC::BI__builtin_unpack_longdouble:
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1))
+ return true;
+ [[fallthrough]];
+ case PPC::BI__builtin_pack_longdouble:
+ if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
+ return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
+ << "ibmlongdouble";
+ return false;
+ case PPC::BI__builtin_altivec_dst:
+ case PPC::BI__builtin_altivec_dstt:
+ case PPC::BI__builtin_altivec_dstst:
+ case PPC::BI__builtin_altivec_dststt:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case PPC::BI__builtin_vsx_xxpermdi:
+ case PPC::BI__builtin_vsx_xxsldwi:
+ return BuiltinVSX(TheCall);
+ case PPC::BI__builtin_unpack_vector_int128:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case PPC::BI__builtin_altivec_vgnb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 2, 7);
+ case PPC::BI__builtin_vsx_xxeval:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 255);
+ case PPC::BI__builtin_altivec_vsldbi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_altivec_vsrdbi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case PPC::BI__builtin_vsx_xxpermx:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case PPC::BI__builtin_ppc_tw:
+ case PPC::BI__builtin_ppc_tdw:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 31);
+ case PPC::BI__builtin_ppc_cmprb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
+ // be a constant that represents a contiguous bit field.
+ case PPC::BI__builtin_ppc_rlwnm:
+ return SemaRef.ValueIsRunOfOnes(TheCall, 2);
+ case PPC::BI__builtin_ppc_rlwimi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaRef.ValueIsRunOfOnes(TheCall, 3);
+ case PPC::BI__builtin_ppc_rldimi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 63) ||
+ SemaRef.ValueIsRunOfOnes(TheCall, 3);
+ case PPC::BI__builtin_ppc_addex: {
+ if (SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3))
+ return true;
+ // Output warning for reserved values 1 to 3.
+ int ArgValue =
+ TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
+ if (ArgValue != 0)
+ Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
+ << ArgValue;
+ return false;
+ }
+ case PPC::BI__builtin_ppc_mtfsb0:
+ case PPC::BI__builtin_ppc_mtfsb1:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
+ case PPC::BI__builtin_ppc_mtfsf:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 255);
+ case PPC::BI__builtin_ppc_mtfsfi:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case PPC::BI__builtin_ppc_alignx:
+ return SemaRef.BuiltinConstantArgPower2(TheCall, 0);
+ case PPC::BI__builtin_ppc_rdlam:
+ return SemaRef.ValueIsRunOfOnes(TheCall, 2);
+ case PPC::BI__builtin_vsx_ldrmb:
+ case PPC::BI__builtin_vsx_strmb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16);
+ case PPC::BI__builtin_altivec_vcntmbb:
+ case PPC::BI__builtin_altivec_vcntmbh:
+ case PPC::BI__builtin_altivec_vcntmbw:
+ case PPC::BI__builtin_altivec_vcntmbd:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case PPC::BI__builtin_vsx_xxgenpcvbm:
+ case PPC::BI__builtin_vsx_xxgenpcvhm:
+ case PPC::BI__builtin_vsx_xxgenpcvwm:
+ case PPC::BI__builtin_vsx_xxgenpcvdm:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case PPC::BI__builtin_ppc_test_data_class: {
+ // Check if the first argument of the __builtin_ppc_test_data_class call is
+ // valid. The argument must be 'float' or 'double' or '__float128'.
+ QualType ArgType = TheCall->getArg(0)->getType();
+ if (ArgType != QualType(Context.FloatTy) &&
+ ArgType != QualType(Context.DoubleTy) &&
+ ArgType != QualType(Context.Float128Ty))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_ppc_invalid_test_data_class_type);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 127);
+ }
+ case PPC::BI__builtin_ppc_maxfe:
+ case PPC::BI__builtin_ppc_minfe:
+ case PPC::BI__builtin_ppc_maxfl:
+ case PPC::BI__builtin_ppc_minfl:
+ case PPC::BI__builtin_ppc_maxfs:
+ case PPC::BI__builtin_ppc_minfs: {
+ if (Context.getTargetInfo().getTriple().isOSAIX() &&
+ (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
+ BuiltinID == PPC::BI__builtin_ppc_minfe))
+ return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
+ << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
+ << false << Context.getTargetInfo().getTriple().str();
+ // Argument type should be exact.
+ QualType ArgType = QualType(Context.LongDoubleTy);
+ if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
+ BuiltinID == PPC::BI__builtin_ppc_minfl)
+ ArgType = QualType(Context.DoubleTy);
+ else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
+ BuiltinID == PPC::BI__builtin_ppc_minfs)
+ ArgType = QualType(Context.FloatTy);
+ for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
+ if (TheCall->getArg(I)->getType() != ArgType)
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
+ return false;
+ }
+#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
+ case PPC::BI__builtin_##Name: \
+ return BuiltinPPCMMACall(TheCall, BuiltinID, Types);
+#include "clang/Basic/BuiltinsPPC.def"
+ }
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+// Check if the given type is a non-pointer PPC MMA type. This function is used
+// in Sema to prevent invalid uses of restricted PPC MMA types.
+bool SemaPPC::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
+ ASTContext &Context = getASTContext();
+ if (Type->isPointerType() || Type->isArrayType())
+ return false;
+
+ QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
+#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
+ if (false
+#include "clang/Basic/PPCTypes.def"
+ ) {
+ Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
+ return true;
+ }
+ return false;
+}
+
+/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
+/// advancing the pointer over the consumed characters. The decoded type is
+/// returned. If the decoded type represents a constant integer with a
+/// constraint on its value then Mask is set to that value. The type descriptors
+/// used in Str are specific to PPC MMA builtins and are documented in the file
+/// defining the PPC builtins.
+static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
+ unsigned &Mask) {
+ bool RequireICE = false;
+ ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
+ switch (*Str++) {
+ case 'V':
+ return Context.getVectorType(Context.UnsignedCharTy, 16,
+ VectorKind::AltiVecVector);
+ case 'i': {
+ char *End;
+ unsigned size = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing constant parameter constraint");
+ Str = End;
+ Mask = size;
+ return Context.IntTy;
+ }
+ case 'W': {
+ char *End;
+ unsigned size = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing PowerPC MMA type size");
+ Str = End;
+ QualType Type;
+ switch (size) {
+ #define PPC_VECTOR_TYPE(typeName, Id, size) \
+ case size: Type = Context.Id##Ty; break;
+ #include "clang/Basic/PPCTypes.def"
+ default: llvm_unreachable("Invalid PowerPC MMA vector type");
+ }
+ bool CheckVectorArgs = false;
+ while (!CheckVectorArgs) {
+ switch (*Str++) {
+ case '*':
+ Type = Context.getPointerType(Type);
+ break;
+ case 'C':
+ Type = Type.withConst();
+ break;
+ default:
+ CheckVectorArgs = true;
+ --Str;
+ break;
+ }
+ }
+ return Type;
+ }
+ default:
+ return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
+ }
+}
+
+bool SemaPPC::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
+ const char *TypeStr) {
+
+ assert((TypeStr[0] != '\0') &&
+ "Invalid types in PPC MMA builtin declaration");
+
+ ASTContext &Context = getASTContext();
+ unsigned Mask = 0;
+ unsigned ArgNum = 0;
+
+ // The first type in TypeStr is the type of the value returned by the
+ // builtin. So we first read that type and change the type of TheCall.
+ QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ TheCall->setType(type);
+
+ while (*TypeStr != '\0') {
+ Mask = 0;
+ QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ if (ArgNum >= TheCall->getNumArgs()) {
+ ArgNum++;
+ break;
+ }
+
+ Expr *Arg = TheCall->getArg(ArgNum);
+ QualType PassedType = Arg->getType();
+ QualType StrippedRVType = PassedType.getCanonicalType();
+
+ // Strip Restrict/Volatile qualifiers.
+ if (StrippedRVType.isRestrictQualified() ||
+ StrippedRVType.isVolatileQualified())
+ StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
+
+ // The only case where the argument type and expected type are allowed to
+ // mismatch is if the argument type is a non-void pointer (or array) and
+ // expected type is a void pointer.
+ if (StrippedRVType != ExpectedType)
+ if (!(ExpectedType->isVoidPointerType() &&
+ (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
+ return Diag(Arg->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << PassedType << ExpectedType << 1 << 0 << 0;
+
+ // If the value of the Mask is not 0, we have a constraint in the size of
+ // the integer argument so here we ensure the argument is a constant that
+ // is in the valid range.
+ if (Mask != 0 && SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
+ return true;
+
+ ArgNum++;
+ }
+
+ // In case we exited early from the previous loop, there are other types to
+ // read from TypeStr. So we need to read them all to ensure we have the right
+ // number of arguments in TheCall and if it is not the case, to display a
+ // better error message.
+ while (*TypeStr != '\0') {
+ (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ ArgNum++;
+ }
+ if (SemaRef.checkArgCount(TheCall, ArgNum))
+ return true;
+
+ return false;
+}
+
+bool SemaPPC::BuiltinVSX(CallExpr *TheCall) {
+ unsigned ExpectedNumArgs = 3;
+ if (SemaRef.checkArgCount(TheCall, ExpectedNumArgs))
+ return true;
+
+ // Check the third argument is a compile time constant
+ if (!TheCall->getArg(2)->isIntegerConstantExpr(getASTContext()))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_vsx_builtin_nonconstant_argument)
+ << 3 /* argument index */ << TheCall->getDirectCallee()
+ << SourceRange(TheCall->getArg(2)->getBeginLoc(),
+ TheCall->getArg(2)->getEndLoc());
+
+ QualType Arg1Ty = TheCall->getArg(0)->getType();
+ QualType Arg2Ty = TheCall->getArg(1)->getType();
+
+ // Check the type of argument 1 and argument 2 are vectors.
+ SourceLocation BuiltinLoc = TheCall->getBeginLoc();
+ if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
+ (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
+ return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ }
+
+ // Check the first two arguments are the same type.
+ if (!getASTContext().hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
+ return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
+ }
+
+ // When default clang type checking is turned off and the customized type
+ // checking is used, the returning type of the function must be explicitly
+ // set. Otherwise it is _Bool by default.
+ TheCall->setType(Arg1Ty);
+
+ return false;
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaSystemZ.cpp b/clang/lib/Sema/SemaSystemZ.cpp
new file mode 100644
index 0000000000000..2a6fb6114446b
--- /dev/null
+++ b/clang/lib/Sema/SemaSystemZ.cpp
@@ -0,0 +1,94 @@
+//===------ SemaSystemZ.cpp ------ SystemZ target-specific routines -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to AIX.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaSystemZ.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/ADT/APSInt.h"
+#include <optional>
+
+namespace clang {
+
+SemaSystemZ::SemaSystemZ(Sema &S) : SemaBase(S) {}
+
+bool SemaSystemZ::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ if (BuiltinID == SystemZ::BI__builtin_tabort) {
+ Expr *Arg = TheCall->getArg(0);
+ if (std::optional<llvm::APSInt> AbortCode =
+ Arg->getIntegerConstantExpr(getASTContext()))
+ if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
+ return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
+ << Arg->getSourceRange();
+ }
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_verimb:
+ case SystemZ::BI__builtin_s390_verimh:
+ case SystemZ::BI__builtin_s390_verimf:
+ case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
+ case SystemZ::BI__builtin_s390_vfaeb:
+ case SystemZ::BI__builtin_s390_vfaeh:
+ case SystemZ::BI__builtin_s390_vfaef:
+ case SystemZ::BI__builtin_s390_vfaebs:
+ case SystemZ::BI__builtin_s390_vfaehs:
+ case SystemZ::BI__builtin_s390_vfaefs:
+ case SystemZ::BI__builtin_s390_vfaezb:
+ case SystemZ::BI__builtin_s390_vfaezh:
+ case SystemZ::BI__builtin_s390_vfaezf:
+ case SystemZ::BI__builtin_s390_vfaezbs:
+ case SystemZ::BI__builtin_s390_vfaezhs:
+ case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfisb:
+ case SystemZ::BI__builtin_s390_vfidb:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case SystemZ::BI__builtin_s390_vftcisb:
+ case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
+ case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vstrcb:
+ case SystemZ::BI__builtin_s390_vstrch:
+ case SystemZ::BI__builtin_s390_vstrcf:
+ case SystemZ::BI__builtin_s390_vstrczb:
+ case SystemZ::BI__builtin_s390_vstrczh:
+ case SystemZ::BI__builtin_s390_vstrczf:
+ case SystemZ::BI__builtin_s390_vstrcbs:
+ case SystemZ::BI__builtin_s390_vstrchs:
+ case SystemZ::BI__builtin_s390_vstrcfs:
+ case SystemZ::BI__builtin_s390_vstrczbs:
+ case SystemZ::BI__builtin_s390_vstrczhs:
+ case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfminsb:
+ case SystemZ::BI__builtin_s390_vfmaxsb:
+ case SystemZ::BI__builtin_s390_vfmindb:
+ case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vclfnhs:
+ case SystemZ::BI__builtin_s390_vclfnls:
+ case SystemZ::BI__builtin_s390_vcfn:
+ case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
+ }
+ return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u);
+}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index bb49aae2cb666..4c8eaf2d4ebf6 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -26,6 +26,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaAMDGPU.h"
#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
@@ -563,7 +564,7 @@ static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
return;
Expr *MaxExpr = Result.getAs<Expr>();
- S.addAMDGPUFlatWorkGroupSizeAttr(New, Attr, MinExpr, MaxExpr);
+ S.AMDGPU().addAMDGPUFlatWorkGroupSizeAttr(New, Attr, MinExpr, MaxExpr);
}
ExplicitSpecifier Sema::instantiateExplicitSpecifier(
@@ -607,7 +608,7 @@ static void instantiateDependentAMDGPUWavesPerEUAttr(
MaxExpr = Result.getAs<Expr>();
}
- S.addAMDGPUWavesPerEUAttr(New, Attr, MinExpr, MaxExpr);
+ S.AMDGPU().addAMDGPUWavesPerEUAttr(New, Attr, MinExpr, MaxExpr);
}
static void instantiateDependentAMDGPUMaxNumWorkGroupsAttr(
@@ -630,7 +631,7 @@ static void instantiateDependentAMDGPUMaxNumWorkGroupsAttr(
Expr *YExpr = ResultY.getAs<Expr>();
Expr *ZExpr = ResultZ.getAs<Expr>();
- S.addAMDGPUMaxNumWorkGroupsAttr(New, Attr, XExpr, YExpr, ZExpr);
+ S.AMDGPU().addAMDGPUMaxNumWorkGroupsAttr(New, Attr, XExpr, YExpr, ZExpr);
}
// This doesn't take any template parameters, but we have a custom action that
diff --git a/clang/lib/Sema/SemaWasm.cpp b/clang/lib/Sema/SemaWasm.cpp
new file mode 100644
index 0000000000000..730fad1dbe208
--- /dev/null
+++ b/clang/lib/Sema/SemaWasm.cpp
@@ -0,0 +1,337 @@
+//===------ SemaWasm.cpp ---- WebAssembly target-specific routines --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis functions specific to WebAssembly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaWasm.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/DiagnosticSema.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+SemaWasm::SemaWasm(Sema &S) : SemaBase(S) {}
+
+/// Checks the argument at the given index is a WebAssembly table and if it
+/// is, sets ElTy to the element type.
+static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
+ QualType &ElTy) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
+ if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_table_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ ElTy = ATy->getElementType();
+ return false;
+}
+
+/// Checks the argument at the given index is an integer.
+static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
+ unsigned ArgIndex) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ if (!ArgExpr->getType()->isIntegerType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_integer_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ return false;
+}
+
+bool SemaWasm::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 0)
+ return true;
+
+ TheCall->setType(getASTContext().getWebAssemblyExternrefType());
+
+ return false;
+}
+
+bool SemaWasm::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ if (TheCall->getNumArgs() != 0) {
+ Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
+ << /*is non object*/ 0;
+ return true;
+ }
+
+ // This custom type checking code ensures that the nodes are as expected
+ // in order to later on generate the necessary builtin.
+ QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
+ QualType Type = Context.getPointerType(Pointee);
+ Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
+ Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
+ Context.getPointerType(Pointee));
+ TheCall->setType(Type);
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, and the second
+/// is an index to use as index into the table.
+bool SemaWasm::BuiltinWasmTableGet(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 2))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 1))
+ return true;
+
+ // If all is well, we set the type of TheCall to be the type of the
+ // element of the table.
+ // i.e. a table.get on an externref table has type externref,
+ // or whatever the type of the table element is.
+ TheCall->setType(ElTy);
+
+ return false;
+}
+
+/// Check that the first argumnet is a WebAssembly table, the second is
+/// an index to use as index into the table and the third is the reference
+/// type to set into the table.
+bool SemaWasm::BuiltinWasmTableSet(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 1))
+ return true;
+
+ if (!getASTContext().hasSameType(ElTy, TheCall->getArg(2)->getType()))
+ return true;
+
+ return false;
+}
+
+/// Check that the argument is a WebAssembly table.
+bool SemaWasm::BuiltinWasmTableSize(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is the
+/// value to use for new elements (of a type matching the table type), the
+/// third value is an integer.
+bool SemaWasm::BuiltinWasmTableGrow(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(1);
+ if (!getASTContext().hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 2))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is an
+/// integer, the third is the value to use to fill the table (of a type
+/// matching the table type), and the fourth is an integer.
+bool SemaWasm::BuiltinWasmTableFill(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 4))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 1))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(2);
+ if (!getASTContext().hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 3 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, 3))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is also a
+/// WebAssembly table (of the same element type), and the third to fifth
+/// arguments are integers.
+bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 5))
+ return true;
+
+ QualType XElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 0, XElTy))
+ return true;
+
+ QualType YElTy;
+ if (CheckWasmBuiltinArgIsTable(SemaRef, TheCall, 1, YElTy))
+ return true;
+
+ Expr *TableYArg = TheCall->getArg(1);
+ if (!getASTContext().hasSameType(XElTy, YElTy)) {
+ return Diag(TableYArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << TableYArg->getSourceRange();
+ }
+
+ for (int I = 2; I <= 4; I++) {
+ if (CheckWasmBuiltinArgIsInteger(SemaRef, TheCall, I))
+ return true;
+ }
+
+ return false;
+}
+
+bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_ref_null_extern:
+ return BuiltinWasmRefNullExtern(TheCall);
+ case WebAssembly::BI__builtin_wasm_ref_null_func:
+ return BuiltinWasmRefNullFunc(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_get:
+ return BuiltinWasmTableGet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_set:
+ return BuiltinWasmTableSet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_size:
+ return BuiltinWasmTableSize(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_grow:
+ return BuiltinWasmTableGrow(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_fill:
+ return BuiltinWasmTableFill(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_copy:
+ return BuiltinWasmTableCopy(TheCall);
+ }
+
+ return false;
+}
+
+WebAssemblyImportModuleAttr *
+SemaWasm::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ if (ExistingAttr->getImportModule() == AL.getImportModule())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 0
+ << ExistingAttr->getImportModule() << AL.getImportModule();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return nullptr;
+ }
+ return ::new (getASTContext()) WebAssemblyImportModuleAttr(getASTContext(), AL,
+ AL.getImportModule());
+}
+
+WebAssemblyImportNameAttr *
+SemaWasm::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ if (ExistingAttr->getImportName() == AL.getImportName())
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 1
+ << ExistingAttr->getImportName() << AL.getImportName();
+ Diag(AL.getLoc(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return nullptr;
+ }
+ return ::new (getASTContext()) WebAssemblyImportNameAttr(getASTContext(), AL,
+ AL.getImportName());
+}
+
+void SemaWasm::handleWebAssemblyImportModuleAttr(Decl *D, const ParsedAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
+ return;
+ }
+
+ FD->addAttr(::new (getASTContext())
+ WebAssemblyImportModuleAttr(getASTContext(), AL, Str));
+}
+
+void SemaWasm::handleWebAssemblyImportNameAttr(Decl *D, const ParsedAttr &AL) {
+ auto *FD = cast<FunctionDecl>(D);
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+ if (FD->hasBody()) {
+ Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
+ return;
+ }
+
+ FD->addAttr(::new (getASTContext()) WebAssemblyImportNameAttr(getASTContext(), AL, Str));
+}
+
+void SemaWasm::handleWebAssemblyExportNameAttr(Decl *D, const ParsedAttr &AL) {
+ ASTContext &Context = getASTContext();
+ if (!SemaRef.isFunctionOrMethod(D)) {
+ Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
+ return;
+ }
+
+ auto *FD = cast<FunctionDecl>(D);
+ if (FD->isThisDeclarationADefinition()) {
+ Diag(D->getLocation(), diag::err_alias_is_definition) << FD << 0;
+ return;
+ }
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ D->addAttr(::new (Context) WebAssemblyExportNameAttr(Context, AL, Str));
+ D->addAttr(UsedAttr::CreateImplicit(Context));
+}
+
+} // namespace clang
diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp
index c455071ed9da7..70d3dd42eb71d 100644
--- a/clang/utils/TableGen/MveEmitter.cpp
+++ b/clang/utils/TableGen/MveEmitter.cpp
@@ -928,7 +928,7 @@ class ACLEIntrinsic {
llvm::APInt ArgTypeRange = llvm::APInt::getMaxValue(ArgTypeBits).zext(128);
llvm::APInt ActualRange = (hi-lo).trunc(64).sext(128);
if (ActualRange.ult(ArgTypeRange))
- SemaChecks.push_back("BuiltinConstantArgRange(TheCall, " + Index +
+ SemaChecks.push_back("SemaRef.BuiltinConstantArgRange(TheCall, " + Index +
", " + signedHexLiteral(lo) + ", " +
signedHexLiteral(hi) + ")");
@@ -943,7 +943,7 @@ class ACLEIntrinsic {
}
Suffix = (Twine(", ") + Arg).str();
}
- SemaChecks.push_back((Twine("BuiltinConstantArg") + IA.ExtraCheckType +
+ SemaChecks.push_back((Twine("SemaRef.BuiltinConstantArg") + IA.ExtraCheckType +
"(TheCall, " + Index + Suffix + ")")
.str());
}
>From 6e68c960bde622eb244f726d97982219b08a68bf Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Thu, 23 May 2024 14:56:22 +0300
Subject: [PATCH 2/4] Run clang-format
---
clang/include/clang/Sema/Sema.h | 8 +-
clang/include/clang/Sema/SemaAMDGPU.h | 2 +-
clang/include/clang/Sema/SemaARM.h | 5 +-
clang/include/clang/Sema/SemaNVPTX.h | 1 -
clang/include/clang/Sema/SemaPPC.h | 12 +-
clang/lib/Sema/SemaAMDGPU.cpp | 43 ++++---
clang/lib/Sema/SemaARM.cpp | 161 +++++++++++++++-----------
clang/lib/Sema/SemaBPF.cpp | 2 +-
clang/lib/Sema/SemaChecking.cpp | 3 +-
clang/lib/Sema/SemaDeclAttr.cpp | 7 +-
clang/lib/Sema/SemaHexagon.cpp | 11 +-
clang/lib/Sema/SemaLoongArch.cpp | 8 +-
clang/lib/Sema/SemaMIPS.cpp | 5 +-
clang/lib/Sema/SemaNVPTX.cpp | 6 +-
clang/lib/Sema/SemaPPC.cpp | 32 ++---
clang/lib/Sema/SemaSystemZ.cpp | 2 +-
clang/lib/Sema/SemaWasm.cpp | 29 ++---
clang/utils/TableGen/MveEmitter.cpp | 9 +-
18 files changed, 193 insertions(+), 153 deletions(-)
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 3ba80ed144436..e19509c811805 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -2144,7 +2144,7 @@ class Sema final : public SemaBase {
bool checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
unsigned MaxArgCount);
bool checkArgCount(CallExpr *Call, unsigned DesiredArgCount);
-
+
bool ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
private:
@@ -3576,9 +3576,9 @@ class Sema final : public SemaBase {
/// Negative argument is implicitly converted to unsigned, unless
/// \p StrictlyUnsigned is true.
template <typename AttrInfo>
- bool checkUInt32Argument(const AttrInfo &AI, const Expr *Expr,
- uint32_t &Val, unsigned Idx = UINT_MAX,
- bool StrictlyUnsigned = false) {
+ bool checkUInt32Argument(const AttrInfo &AI, const Expr *Expr, uint32_t &Val,
+ unsigned Idx = UINT_MAX,
+ bool StrictlyUnsigned = false) {
std::optional<llvm::APSInt> I = llvm::APSInt(32);
if (Expr->isTypeDependent() ||
!(I = Expr->getIntegerConstantExpr(Context))) {
diff --git a/clang/include/clang/Sema/SemaAMDGPU.h b/clang/include/clang/Sema/SemaAMDGPU.h
index 271475323dfeb..969078f552c6a 100644
--- a/clang/include/clang/Sema/SemaAMDGPU.h
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -27,7 +27,7 @@ class SemaAMDGPU : public SemaBase {
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- /// Create an AMDGPUWavesPerEUAttr attribute.
+ /// Create an AMDGPUWavesPerEUAttr attribute.
AMDGPUFlatWorkGroupSizeAttr *
CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, Expr *Min,
Expr *Max);
diff --git a/clang/include/clang/Sema/SemaARM.h b/clang/include/clang/Sema/SemaARM.h
index f7f29067ce320..02698a33abd55 100644
--- a/clang/include/clang/Sema/SemaARM.h
+++ b/clang/include/clang/Sema/SemaARM.h
@@ -38,8 +38,9 @@ class SemaARM : public SemaBase {
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool ParseSVEImmChecks(CallExpr *TheCall,
- llvm::SmallVector<std::tuple<int, int, int>, 3> &ImmChecks);
+ bool
+ ParseSVEImmChecks(CallExpr *TheCall,
+ llvm::SmallVector<std::tuple<int, int, int>, 3> &ImmChecks);
bool CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
diff --git a/clang/include/clang/Sema/SemaNVPTX.h b/clang/include/clang/Sema/SemaNVPTX.h
index ef1e701824b09..a663c694179d9 100644
--- a/clang/include/clang/Sema/SemaNVPTX.h
+++ b/clang/include/clang/Sema/SemaNVPTX.h
@@ -22,7 +22,6 @@ class SemaNVPTX : public SemaBase {
public:
SemaNVPTX(Sema &S);
-
bool CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
};
diff --git a/clang/include/clang/Sema/SemaPPC.h b/clang/include/clang/Sema/SemaPPC.h
index 394a50bedc3e4..3e8929d5b6ded 100644
--- a/clang/include/clang/Sema/SemaPPC.h
+++ b/clang/include/clang/Sema/SemaPPC.h
@@ -31,21 +31,21 @@ class SemaPPC : public SemaBase {
// code to be safe.
// Here we try to get information about the alignment of the struct member
// from the struct passed to the caller function. We only warn when the struct
- // is passed byval, hence the series of checks and early returns if we are a not
- // passing a struct byval.
+ // is passed byval, hence the series of checks and early returns if we are a
+ // not passing a struct byval.
void checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg);
/// BuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
/// Emit an error and return true on failure; return false on success.
- /// TypeStr is a string containing the type descriptor of the value returned by
- /// the builtin and the descriptors of the expected type of the arguments.
+ /// TypeStr is a string containing the type descriptor of the value returned
+ /// by the builtin and the descriptors of the expected type of the arguments.
bool BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
- // Customized Sema Checking for VSX builtins that have the following signature:
- // vector [...] builtinName(vector [...], vector [...], const int);
+ // Customized Sema Checking for VSX builtins that have the following
+ // signature: vector [...] builtinName(vector [...], vector [...], const int);
// Which takes the same type of vectors (any legal vector type) for the first
// two arguments and takes compile time constant for the third argument.
// Example builtins are :
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 0c101e6a5c00c..0e2fbccb393bd 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -23,7 +23,7 @@ namespace clang {
SemaAMDGPU::SemaAMDGPU(Sema &S) : SemaBase(S) {}
bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
+ CallExpr *TheCall) {
// position of memory order and scope arguments in the builtin
unsigned OrderIndex, ScopeIndex;
switch (BuiltinID) {
@@ -118,7 +118,7 @@ checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
AMDGPUFlatWorkGroupSizeAttr *
SemaAMDGPU::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
+ Expr *MinExpr, Expr *MaxExpr) {
ASTContext &Context = getASTContext();
AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
@@ -129,13 +129,14 @@ SemaAMDGPU::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
}
void SemaAMDGPU::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
- const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
+ const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
D->addAttr(Attr);
}
-void SemaAMDGPU::handleAMDGPUFlatWorkGroupSizeAttr(Decl *D, const ParsedAttr &AL) {
+void SemaAMDGPU::handleAMDGPUFlatWorkGroupSizeAttr(Decl *D,
+ const ParsedAttr &AL) {
Expr *MinExpr = AL.getArgAsExpr(0);
Expr *MaxExpr = AL.getArgAsExpr(1);
@@ -177,8 +178,8 @@ static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
}
AMDGPUWavesPerEUAttr *
-SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *MinExpr,
- Expr *MaxExpr) {
+SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
ASTContext &Context = getASTContext();
AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
@@ -189,7 +190,7 @@ SemaAMDGPU::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *MinE
}
void SemaAMDGPU::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
+ Expr *MinExpr, Expr *MaxExpr) {
if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
D->addAttr(Attr);
}
@@ -210,7 +211,8 @@ void SemaAMDGPU::handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL) {
if (!SemaRef.checkUInt32Argument(AL, NumSGPRExpr, NumSGPR))
return;
- D->addAttr(::new (getASTContext()) AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
+ D->addAttr(::new (getASTContext())
+ AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
}
void SemaAMDGPU::handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL) {
@@ -219,7 +221,8 @@ void SemaAMDGPU::handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL) {
if (!SemaRef.checkUInt32Argument(AL, NumVGPRExpr, NumVGPR))
return;
- D->addAttr(::new (getASTContext()) AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
+ D->addAttr(::new (getASTContext())
+ AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
}
static bool
@@ -242,7 +245,7 @@ checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr,
for (int i = 0; i < 3; i++) {
if (Exprs[i]) {
if (!S.checkUInt32Argument(Attr, Exprs[i], NumWG, i,
- /*StrictlyUnsigned=*/true))
+ /*StrictlyUnsigned=*/true))
return true;
if (NumWG == 0) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
@@ -255,27 +258,29 @@ checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr,
return false;
}
-AMDGPUMaxNumWorkGroupsAttr *
-SemaAMDGPU::CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI,
- Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
+AMDGPUMaxNumWorkGroupsAttr *SemaAMDGPU::CreateAMDGPUMaxNumWorkGroupsAttr(
+ const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
ASTContext &Context = getASTContext();
AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
- if (checkAMDGPUMaxNumWorkGroupsArguments(SemaRef, XExpr, YExpr, ZExpr, TmpAttr))
+ if (checkAMDGPUMaxNumWorkGroupsArguments(SemaRef, XExpr, YExpr, ZExpr,
+ TmpAttr))
return nullptr;
return ::new (Context)
AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
}
-void SemaAMDGPU::addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *XExpr, Expr *YExpr,
- Expr *ZExpr) {
+void SemaAMDGPU::addAMDGPUMaxNumWorkGroupsAttr(Decl *D,
+ const AttributeCommonInfo &CI,
+ Expr *XExpr, Expr *YExpr,
+ Expr *ZExpr) {
if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
D->addAttr(Attr);
}
-void SemaAMDGPU::handleAMDGPUMaxNumWorkGroupsAttr(Decl *D, const ParsedAttr &AL) {
+void SemaAMDGPU::handleAMDGPUMaxNumWorkGroupsAttr(Decl *D,
+ const ParsedAttr &AL) {
Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(2) : nullptr;
addAMDGPUMaxNumWorkGroupsAttr(D, AL, AL.getArgAsExpr(0), YExpr, ZExpr);
diff --git a/clang/lib/Sema/SemaARM.cpp b/clang/lib/Sema/SemaARM.cpp
index ba40b391235f7..c1280190ff1f1 100644
--- a/clang/lib/Sema/SemaARM.cpp
+++ b/clang/lib/Sema/SemaARM.cpp
@@ -21,7 +21,8 @@ namespace clang {
SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
-bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
ASTContext &Context = getASTContext();
if (BuiltinID == AArch64::BI__builtin_arm_irg) {
@@ -36,7 +37,7 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType FirstArgType = FirstArg.get()->getType();
if (!FirstArgType->isAnyPointerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
+ << "first" << FirstArgType << Arg0->getSourceRange();
TheCall->setArg(0, FirstArg.get());
ExprResult SecArg = SemaRef.DefaultLvalueConversion(Arg1);
@@ -45,7 +46,7 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType SecArgType = SecArg.get()->getType();
if (!SecArgType->isIntegerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
- << "second" << SecArgType << Arg1->getSourceRange();
+ << "second" << SecArgType << Arg1->getSourceRange();
// Derive the return type from the pointer argument.
TheCall->setType(FirstArgType);
@@ -63,7 +64,7 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType FirstArgType = FirstArg.get()->getType();
if (!FirstArgType->isAnyPointerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
+ << "first" << FirstArgType << Arg0->getSourceRange();
TheCall->setArg(0, FirstArg.get());
// Derive the return type from the pointer argument.
@@ -85,12 +86,12 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType FirstArgType = FirstArg.get()->getType();
if (!FirstArgType->isAnyPointerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
+ << "first" << FirstArgType << Arg0->getSourceRange();
QualType SecArgType = Arg1->getType();
if (!SecArgType->isIntegerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
- << "second" << SecArgType << Arg1->getSourceRange();
+ << "second" << SecArgType << Arg1->getSourceRange();
TheCall->setType(Context.IntTy);
return false;
}
@@ -107,7 +108,7 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType FirstArgType = FirstArg.get()->getType();
if (!FirstArgType->isAnyPointerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
- << "first" << FirstArgType << Arg0->getSourceRange();
+ << "first" << FirstArgType << Arg0->getSourceRange();
TheCall->setArg(0, FirstArg.get());
// Derive the return type from the pointer argument.
@@ -129,18 +130,19 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType ArgTypeA = ArgExprA.get()->getType();
QualType ArgTypeB = ArgExprB.get()->getType();
- auto isNull = [&] (Expr *E) -> bool {
- return E->isNullPointerConstant(
- Context, Expr::NPC_ValueDependentIsNotNull); };
+ auto isNull = [&](Expr *E) -> bool {
+ return E->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+ };
// argument should be either a pointer or null
if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
- << "first" << ArgTypeA << ArgA->getSourceRange();
+ << "first" << ArgTypeA << ArgA->getSourceRange();
if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
- << "second" << ArgTypeB << ArgB->getSourceRange();
+ << "second" << ArgTypeB << ArgB->getSourceRange();
// Ensure Pointee types are compatible
if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
@@ -148,24 +150,27 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
QualType pointeeA = ArgTypeA->getPointeeType();
QualType pointeeB = ArgTypeB->getPointeeType();
if (!Context.typesAreCompatible(
- Context.getCanonicalType(pointeeA).getUnqualifiedType(),
- Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
- return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
- << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
- << ArgB->getSourceRange();
+ Context.getCanonicalType(pointeeA).getUnqualifiedType(),
+ Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_typecheck_sub_ptr_compatible)
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
+ << ArgB->getSourceRange();
}
}
// at least one argument should be pointer type
if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
- << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
+ << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
if (isNull(ArgA)) // adopt type of the other pointer
- ArgExprA = SemaRef.ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
+ ArgExprA =
+ SemaRef.ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
if (isNull(ArgB))
- ArgExprB = SemaRef.ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
+ ArgExprB =
+ SemaRef.ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
TheCall->setArg(0, ArgExprA.get());
TheCall->setArg(1, ArgExprB.get());
@@ -179,8 +184,8 @@ bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall)
/// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
/// TheCall is an ARM/AArch64 special register string literal.
bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
- int ArgNum, unsigned ExpectedFieldNum,
- bool AllowName) {
+ int ArgNum, unsigned ExpectedFieldNum,
+ bool AllowName) {
bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsr ||
@@ -248,7 +253,7 @@ bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
else
Ranges.append({15, 7, 15});
- for (unsigned i=0; i<Fields.size(); ++i) {
+ for (unsigned i = 0; i < Fields.size(); ++i) {
int IntField;
ValidString &= !Fields[i].getAsInteger(10, IntField);
ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
@@ -272,17 +277,17 @@ bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
// These are the named PSTATE accesses using "MSR (immediate)" instructions,
// along with the upper limit on the immediates allowed.
auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
- .CaseLower("spsel", 15)
- .CaseLower("daifclr", 15)
- .CaseLower("daifset", 15)
- .CaseLower("pan", 15)
- .CaseLower("uao", 15)
- .CaseLower("dit", 15)
- .CaseLower("ssbs", 15)
- .CaseLower("tco", 15)
- .CaseLower("allint", 1)
- .CaseLower("pm", 1)
- .Default(std::nullopt);
+ .CaseLower("spsel", 15)
+ .CaseLower("daifclr", 15)
+ .CaseLower("daifset", 15)
+ .CaseLower("pan", 15)
+ .CaseLower("uao", 15)
+ .CaseLower("dit", 15)
+ .CaseLower("ssbs", 15)
+ .CaseLower("tco", 15)
+ .CaseLower("allint", 1)
+ .CaseLower("pm", 1)
+ .Default(std::nullopt);
// If this is not a named PSTATE, just continue without validating, as this
// will be lowered to an "MSR (register)" instruction directly
@@ -457,34 +462,37 @@ bool SemaARM::ParseSVEImmChecks(
break;
case SVETypeFlags::ImmCheckExtract:
if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (2048 / ElementSizeInBits) - 1))
+ (2048 / ElementSizeInBits) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckShiftRight:
- if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1,
+ ElementSizeInBits))
HasError = true;
break;
case SVETypeFlags::ImmCheckShiftRightNarrow:
- if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits / 2))
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 1,
+ ElementSizeInBits / 2))
HasError = true;
break;
case SVETypeFlags::ImmCheckShiftLeft:
- if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, ElementSizeInBits - 1))
+ if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
+ ElementSizeInBits - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckLaneIndex:
if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (1 * ElementSizeInBits)) - 1))
+ (128 / (1 * ElementSizeInBits)) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckLaneIndexCompRotate:
if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (2 * ElementSizeInBits)) - 1))
+ (128 / (2 * ElementSizeInBits)) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckLaneIndexDot:
if (SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0,
- (128 / (4 * ElementSizeInBits)) - 1))
+ (128 / (4 * ElementSizeInBits)) - 1))
HasError = true;
break;
case SVETypeFlags::ImmCheckComplexRot90_270:
@@ -565,7 +573,8 @@ static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
BuiltinType = SemaARM::ArmStreaming;
}
- if (FnType == SemaARM::ArmStreaming && BuiltinType == SemaARM::ArmNonStreaming) {
+ if (FnType == SemaARM::ArmStreaming &&
+ BuiltinType == SemaARM::ArmNonStreaming) {
S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
<< TheCall->getSourceRange() << "streaming";
}
@@ -577,7 +586,8 @@ static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
return;
}
- if (FnType == SemaARM::ArmNonStreaming && BuiltinType == SemaARM::ArmStreaming) {
+ if (FnType == SemaARM::ArmNonStreaming &&
+ BuiltinType == SemaARM::ArmStreaming) {
S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
<< TheCall->getSourceRange() << "non-streaming";
}
@@ -607,7 +617,8 @@ static ArmSMEState getSMEState(unsigned BuiltinID) {
}
}
-bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
std::optional<ArmStreamingType> BuiltinType;
@@ -645,7 +656,8 @@ bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall)
return ParseSVEImmChecks(TheCall, ImmChecks);
}
-bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
std::optional<ArmStreamingType> BuiltinType;
@@ -672,7 +684,8 @@ bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall)
}
bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
switch (BuiltinID) {
@@ -697,14 +710,14 @@ bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
bool HasConstPtr = false;
switch (BuiltinID) {
#define GET_NEON_OVERLOAD_CHECK
-#include "clang/Basic/arm_neon.inc"
#include "clang/Basic/arm_fp16.inc"
+#include "clang/Basic/arm_neon.inc"
#undef GET_NEON_OVERLOAD_CHECK
}
// For NEON intrinsics which are overloaded on vector element type, validate
// the immediate which specifies which variant to emit.
- unsigned ImmArg = TheCall->getNumArgs()-1;
+ unsigned ImmArg = TheCall->getNumArgs() - 1;
if (mask) {
if (SemaRef.BuiltinConstantArg(TheCall, ImmArg, Result))
return true;
@@ -728,8 +741,8 @@ bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be;
bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
- QualType EltTy =
- getNeonEltType(NeonTypeFlags(TV), getASTContext(), IsPolyUnsigned, IsInt64Long);
+ QualType EltTy = getNeonEltType(NeonTypeFlags(TV), getASTContext(),
+ IsPolyUnsigned, IsInt64Long);
if (HasConstPtr)
EltTy = EltTy.withConst();
QualType LHSTy = getASTContext().getPointerType(EltTy);
@@ -737,8 +750,8 @@ bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSTy, RHS);
if (RHS.isInvalid())
return true;
- if (SemaRef.DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
- RHS.get(), Sema::AA_Assigning))
+ if (SemaRef.DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy,
+ RHSTy, RHS.get(), Sema::AA_Assigning))
return true;
}
@@ -748,25 +761,27 @@ bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
switch (BuiltinID) {
default:
return false;
- #define GET_NEON_IMMEDIATE_CHECK
- #include "clang/Basic/arm_neon.inc"
- #include "clang/Basic/arm_fp16.inc"
- #undef GET_NEON_IMMEDIATE_CHECK
+#define GET_NEON_IMMEDIATE_CHECK
+#include "clang/Basic/arm_fp16.inc"
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_IMMEDIATE_CHECK
}
return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l);
}
-bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
switch (BuiltinID) {
default:
return false;
- #include "clang/Basic/arm_mve_builtin_sema.inc"
+#include "clang/Basic/arm_mve_builtin_sema.inc"
}
}
-bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
+bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
bool Err = false;
switch (BuiltinID) {
default:
@@ -781,7 +796,8 @@ bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned Builtin
}
bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
- const Expr *CoprocArg, bool WantCDE) {
+ const Expr *CoprocArg,
+ bool WantCDE) {
ASTContext &Context = getASTContext();
if (SemaRef.isConstantEvaluatedContext())
return false;
@@ -804,8 +820,9 @@ bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
return false;
}
-bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
- unsigned MaxWidth) {
+bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID,
+ CallExpr *TheCall,
+ unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -821,7 +838,8 @@ bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall
BuiltinID == AArch64::BI__builtin_arm_ldaex;
ASTContext &Context = getASTContext();
- DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
// Ensure that we have the proper number of arguments.
if (SemaRef.checkArgCount(TheCall, IsLdrex ? 1 : 2))
@@ -832,7 +850,8 @@ bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall
// Because it is a pointer type, we don't have to worry about any implicit
// casts here.
Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
- ExprResult PointerArgRes = SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
+ ExprResult PointerArgRes =
+ SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
if (PointerArgRes.isInvalid())
return true;
PointerArg = PointerArgRes.get();
@@ -920,8 +939,9 @@ bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall
return false;
}
-bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
+bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -955,7 +975,8 @@ bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned Builtin
// range check them here.
// FIXME: VFP Intrinsics should error if VFP not present.
switch (BuiltinID) {
- default: return false;
+ default:
+ return false;
case ARM::BI__builtin_arm_ssat:
return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 32);
case ARM::BI__builtin_arm_usat:
@@ -997,8 +1018,8 @@ bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned Builtin
}
bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
diff --git a/clang/lib/Sema/SemaBPF.cpp b/clang/lib/Sema/SemaBPF.cpp
index 7e9e9736b6e8f..bde1a26f1ebc0 100644
--- a/clang/lib/Sema/SemaBPF.cpp
+++ b/clang/lib/Sema/SemaBPF.cpp
@@ -108,7 +108,7 @@ static bool isValidPreserveEnumValueArg(Expr *Arg) {
}
bool SemaBPF::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
+ CallExpr *TheCall) {
assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
BuiltinID == BPF::BI__builtin_btf_type_id ||
BuiltinID == BPF::BI__builtin_preserve_type_info ||
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 555b65a921533..776357ef397be 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -2301,7 +2301,8 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
- return LoongArch().CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return LoongArch().CheckLoongArchBuiltinFunctionCall(TI, BuiltinID,
+ TheCall);
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
return Wasm().CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index cf0c69bc5193c..2b02883dd6303 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -294,7 +294,8 @@ static bool checkFunctionOrMethodParameterIndex(
}
if (HasImplicitThisParam && !CanIndexImplicitThis) {
if (IdxSource == 1) {
- S.Diag(S.getAttrLoc(AI), diag::err_attribute_invalid_implicit_this_argument)
+ S.Diag(S.getAttrLoc(AI),
+ diag::err_attribute_invalid_implicit_this_argument)
<< &AI << IdxExpr->getSourceRange();
return false;
}
@@ -3255,7 +3256,7 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
for (unsigned i = 0; i < 3; ++i) {
const Expr *E = AL.getArgAsExpr(i);
if (!S.checkUInt32Argument(AL, E, WGSize[i], i,
- /*StrictlyUnsigned=*/true))
+ /*StrictlyUnsigned=*/true))
return;
if (WGSize[i] == 0) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
@@ -4185,7 +4186,7 @@ static void handleCallbackAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// If the expression is not parseable as an int32_t we have a problem.
if (!S.checkUInt32Argument(AL, IdxExpr, (uint32_t &)ArgIdx, I + 1,
- false)) {
+ false)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
<< AL << (I + 1) << IdxExpr->getSourceRange();
return;
diff --git a/clang/lib/Sema/SemaHexagon.cpp b/clang/lib/Sema/SemaHexagon.cpp
index e18da784fd078..5c921c0bc9e33 100644
--- a/clang/lib/Sema/SemaHexagon.cpp
+++ b/clang/lib/Sema/SemaHexagon.cpp
@@ -21,7 +21,8 @@ namespace clang {
SemaHexagon::SemaHexagon(Sema &S) : SemaBase(S) {}
-bool SemaHexagon::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
+bool SemaHexagon::CheckHexagonBuiltinArgument(unsigned BuiltinID,
+ CallExpr *TheCall) {
struct ArgInfo {
uint8_t OpNum;
bool IsSigned;
@@ -248,9 +249,9 @@ bool SemaHexagon::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheC
// first run.
static const bool SortOnce =
(llvm::sort(Infos,
- [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
- return LHS.BuiltinID < RHS.BuiltinID;
- }),
+ [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ }),
true);
(void)SortOnce;
@@ -282,7 +283,7 @@ bool SemaHexagon::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheC
}
bool SemaHexagon::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
+ CallExpr *TheCall) {
return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
}
diff --git a/clang/lib/Sema/SemaLoongArch.cpp b/clang/lib/Sema/SemaLoongArch.cpp
index e373f6ba1a06c..0a67bf2c77386 100644
--- a/clang/lib/Sema/SemaLoongArch.cpp
+++ b/clang/lib/Sema/SemaLoongArch.cpp
@@ -20,8 +20,8 @@ namespace clang {
SemaLoongArch::SemaLoongArch(Sema &S) : SemaBase(S) {}
bool SemaLoongArch::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
switch (BuiltinID) {
default:
break;
@@ -29,7 +29,8 @@ bool SemaLoongArch::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
case LoongArch::BI__builtin_loongarch_cacop_d:
case LoongArch::BI__builtin_loongarch_cacop_w: {
SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
- SemaRef.BuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), llvm::maxIntN(12));
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12),
+ llvm::maxIntN(12));
break;
}
case LoongArch::BI__builtin_loongarch_break:
@@ -511,5 +512,4 @@ bool SemaLoongArch::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
return false;
}
-
} // namespace clang
diff --git a/clang/lib/Sema/SemaMIPS.cpp b/clang/lib/Sema/SemaMIPS.cpp
index df329e9539f27..df5328fbf6640 100644
--- a/clang/lib/Sema/SemaMIPS.cpp
+++ b/clang/lib/Sema/SemaMIPS.cpp
@@ -20,13 +20,14 @@ namespace clang {
SemaMIPS::SemaMIPS(Sema &S) : SemaBase(S) {}
bool SemaMIPS::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID, CallExpr *TheCall) {
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
CheckMipsBuiltinArgument(BuiltinID, TheCall);
}
bool SemaMIPS::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
+ CallExpr *TheCall) {
if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
BuiltinID <= Mips::BI__builtin_mips_lwx) {
diff --git a/clang/lib/Sema/SemaNVPTX.cpp b/clang/lib/Sema/SemaNVPTX.cpp
index 778b0c2c2f306..cc8941071463d 100644
--- a/clang/lib/Sema/SemaNVPTX.cpp
+++ b/clang/lib/Sema/SemaNVPTX.cpp
@@ -1,4 +1,4 @@
-//===------ SemaNVPTX.cpp -------- NVPTX target-specific routines -----------===//
+//===------ SemaNVPTX.cpp ------- NVPTX target-specific routines ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -19,8 +19,8 @@ namespace clang {
SemaNVPTX::SemaNVPTX(Sema &S) : SemaBase(S) {}
bool SemaNVPTX::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
switch (BuiltinID) {
case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
diff --git a/clang/lib/Sema/SemaPPC.cpp b/clang/lib/Sema/SemaPPC.cpp
index 0e5005de39c82..99f46b12e6968 100644
--- a/clang/lib/Sema/SemaPPC.cpp
+++ b/clang/lib/Sema/SemaPPC.cpp
@@ -43,8 +43,8 @@ void SemaPPC::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
for (const FieldDecl *FD :
ArgType->castAs<RecordType>()->getDecl()->fields()) {
if (const auto *AA = FD->getAttr<AlignedAttr>()) {
- CharUnits Alignment =
- getASTContext().toCharUnitsFromBits(AA->getAlignment(getASTContext()));
+ CharUnits Alignment = getASTContext().toCharUnitsFromBits(
+ AA->getAlignment(getASTContext()));
if (Alignment.getQuantity() == 16) {
Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
Diag(Loc, diag::note_misaligned_member_used_here) << PD;
@@ -89,8 +89,9 @@ static bool isPPC_64Builtin(unsigned BuiltinID) {
return false;
}
-bool SemaPPC::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall) {
+bool SemaPPC::CheckPPCBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
ASTContext &Context = getASTContext();
unsigned i = 0, l = 0, u = 0;
bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
@@ -101,7 +102,8 @@ bool SemaPPC::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned Builtin
<< TheCall->getSourceRange();
switch (BuiltinID) {
- default: return false;
+ default:
+ return false;
case PPC::BI__builtin_altivec_crypto_vshasigmaw:
case PPC::BI__builtin_altivec_crypto_vshasigmad:
return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
@@ -260,7 +262,7 @@ bool SemaPPC::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
if (false
#include "clang/Basic/PPCTypes.def"
- ) {
+ ) {
Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
return true;
}
@@ -296,10 +298,13 @@ static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
Str = End;
QualType Type;
switch (size) {
- #define PPC_VECTOR_TYPE(typeName, Id, size) \
- case size: Type = Context.Id##Ty; break;
- #include "clang/Basic/PPCTypes.def"
- default: llvm_unreachable("Invalid PowerPC MMA vector type");
+#define PPC_VECTOR_TYPE(typeName, Id, size) \
+ case size: \
+ Type = Context.Id##Ty; \
+ break;
+#include "clang/Basic/PPCTypes.def"
+ default:
+ llvm_unreachable("Invalid PowerPC MMA vector type");
}
bool CheckVectorArgs = false;
while (!CheckVectorArgs) {
@@ -324,7 +329,7 @@ static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
}
bool SemaPPC::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
- const char *TypeStr) {
+ const char *TypeStr) {
assert((TypeStr[0] != '\0') &&
"Invalid types in PPC MMA builtin declaration");
@@ -368,7 +373,8 @@ bool SemaPPC::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
// If the value of the Mask is not 0, we have a constraint in the size of
// the integer argument so here we ensure the argument is a constant that
// is in the valid range.
- if (Mask != 0 && SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
+ if (Mask != 0 &&
+ SemaRef.BuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true))
return true;
ArgNum++;
@@ -379,7 +385,7 @@ bool SemaPPC::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
// number of arguments in TheCall and if it is not the case, to display a
// better error message.
while (*TypeStr != '\0') {
- (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
+ (void)DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
ArgNum++;
}
if (SemaRef.checkArgCount(TheCall, ArgNum))
diff --git a/clang/lib/Sema/SemaSystemZ.cpp b/clang/lib/Sema/SemaSystemZ.cpp
index 2a6fb6114446b..717093b0665f9 100644
--- a/clang/lib/Sema/SemaSystemZ.cpp
+++ b/clang/lib/Sema/SemaSystemZ.cpp
@@ -22,7 +22,7 @@ namespace clang {
SemaSystemZ::SemaSystemZ(Sema &S) : SemaBase(S) {}
bool SemaSystemZ::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
- CallExpr *TheCall) {
+ CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
Expr *Arg = TheCall->getArg(0);
if (std::optional<llvm::APSInt> AbortCode =
diff --git a/clang/lib/Sema/SemaWasm.cpp b/clang/lib/Sema/SemaWasm.cpp
index 730fad1dbe208..2f2906c739d2e 100644
--- a/clang/lib/Sema/SemaWasm.cpp
+++ b/clang/lib/Sema/SemaWasm.cpp
@@ -216,8 +216,8 @@ bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) {
}
bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_ref_null_extern:
return BuiltinWasmRefNullExtern(TheCall);
@@ -241,14 +241,15 @@ bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
}
WebAssemblyImportModuleAttr *
-SemaWasm::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL) {
+SemaWasm::mergeImportModuleAttr(Decl *D,
+ const WebAssemblyImportModuleAttr &AL) {
auto *FD = cast<FunctionDecl>(D);
if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
if (ExistingAttr->getImportModule() == AL.getImportModule())
return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 0
- << ExistingAttr->getImportModule() << AL.getImportModule();
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import)
+ << 0 << ExistingAttr->getImportModule() << AL.getImportModule();
Diag(AL.getLoc(), diag::note_previous_attribute);
return nullptr;
}
@@ -256,8 +257,8 @@ SemaWasm::mergeImportModuleAttr(Decl *D, const WebAssemblyImportModuleAttr &AL)
Diag(AL.getLoc(), diag::warn_import_on_definition) << 0;
return nullptr;
}
- return ::new (getASTContext()) WebAssemblyImportModuleAttr(getASTContext(), AL,
- AL.getImportModule());
+ return ::new (getASTContext())
+ WebAssemblyImportModuleAttr(getASTContext(), AL, AL.getImportModule());
}
WebAssemblyImportNameAttr *
@@ -267,8 +268,8 @@ SemaWasm::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
if (const auto *ExistingAttr = FD->getAttr<WebAssemblyImportNameAttr>()) {
if (ExistingAttr->getImportName() == AL.getImportName())
return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import) << 1
- << ExistingAttr->getImportName() << AL.getImportName();
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_import)
+ << 1 << ExistingAttr->getImportName() << AL.getImportName();
Diag(AL.getLoc(), diag::note_previous_attribute);
return nullptr;
}
@@ -276,11 +277,12 @@ SemaWasm::mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL) {
Diag(AL.getLoc(), diag::warn_import_on_definition) << 1;
return nullptr;
}
- return ::new (getASTContext()) WebAssemblyImportNameAttr(getASTContext(), AL,
- AL.getImportName());
+ return ::new (getASTContext())
+ WebAssemblyImportNameAttr(getASTContext(), AL, AL.getImportName());
}
-void SemaWasm::handleWebAssemblyImportModuleAttr(Decl *D, const ParsedAttr &AL) {
+void SemaWasm::handleWebAssemblyImportModuleAttr(Decl *D,
+ const ParsedAttr &AL) {
auto *FD = cast<FunctionDecl>(D);
StringRef Str;
@@ -308,7 +310,8 @@ void SemaWasm::handleWebAssemblyImportNameAttr(Decl *D, const ParsedAttr &AL) {
return;
}
- FD->addAttr(::new (getASTContext()) WebAssemblyImportNameAttr(getASTContext(), AL, Str));
+ FD->addAttr(::new (getASTContext())
+ WebAssemblyImportNameAttr(getASTContext(), AL, Str));
}
void SemaWasm::handleWebAssemblyExportNameAttr(Decl *D, const ParsedAttr &AL) {
diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp
index 70d3dd42eb71d..bb4f091604f5e 100644
--- a/clang/utils/TableGen/MveEmitter.cpp
+++ b/clang/utils/TableGen/MveEmitter.cpp
@@ -928,8 +928,8 @@ class ACLEIntrinsic {
llvm::APInt ArgTypeRange = llvm::APInt::getMaxValue(ArgTypeBits).zext(128);
llvm::APInt ActualRange = (hi-lo).trunc(64).sext(128);
if (ActualRange.ult(ArgTypeRange))
- SemaChecks.push_back("SemaRef.BuiltinConstantArgRange(TheCall, " + Index +
- ", " + signedHexLiteral(lo) + ", " +
+ SemaChecks.push_back("SemaRef.BuiltinConstantArgRange(TheCall, " +
+ Index + ", " + signedHexLiteral(lo) + ", " +
signedHexLiteral(hi) + ")");
if (!IA.ExtraCheckType.empty()) {
@@ -943,8 +943,9 @@ class ACLEIntrinsic {
}
Suffix = (Twine(", ") + Arg).str();
}
- SemaChecks.push_back((Twine("SemaRef.BuiltinConstantArg") + IA.ExtraCheckType +
- "(TheCall, " + Index + Suffix + ")")
+ SemaChecks.push_back((Twine("SemaRef.BuiltinConstantArg") +
+ IA.ExtraCheckType + "(TheCall, " + Index +
+ Suffix + ")")
.str());
}
>From d0a080e6720b9c6d92384b81cf9f8628a5309980 Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Thu, 23 May 2024 15:54:21 +0300
Subject: [PATCH 3/4] Add new files to the PR labeler config
---
.github/new-prs-labeler.yml | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/.github/new-prs-labeler.yml b/.github/new-prs-labeler.yml
index a57ba28faf160..995e2f615f162 100644
--- a/.github/new-prs-labeler.yml
+++ b/.github/new-prs-labeler.yml
@@ -746,6 +746,8 @@ backend:ARM:
- clang/lib/CodeGen/Targets/ARM.cpp
- clang/include/clang/Basic/BuiltinsARM*
- llvm/test/MC/DisasemblerARM/**
+ - clang/include/clang/Sema/SemaARM.h
+ - clang/lib/Sema/SemaARM.cpp
backend:AArch64:
- llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -757,6 +759,8 @@ backend:AArch64:
- clang/lib/CodeGen/Targets/AArch64.cpp
- clang/include/clang/Basic/BuiltinsAArch64*
- llvm/test/MC/Disassembler/AArch64/**
+ - clang/include/clang/Sema/SemaARM.h
+ - clang/lib/Sema/SemaARM.cpp
backend:loongarch:
- llvm/include/llvm/IR/IntrinsicsLoongArch.td
@@ -767,6 +771,8 @@ backend:loongarch:
- clang/lib/Driver/ToolChains/Arch/LoongArch.*
- clang/lib/CodeGen/Targets/LoongArch.cpp
- clang/include/clang/Basic/BuiltinsLoongArch*
+ - clang/include/clang/Sema/SemaLoongArch.h
+ - clang/lib/Sema/SemaLoongArch.cpp
backend:MSP430:
- llvm/include/llvm/IR/IntrinsicsMSP430.td
@@ -814,6 +820,8 @@ backend:WebAssembly:
- llvm/unittests/Target/WebAssembly/**
- llvm/test/DebugInfo/WebAssembly/**
- llvm/test/MC/WebAssembly/**
+ - clang/include/clang/Sema/SemaWasm.h
+ - clang/lib/Sema/SemaLoongWasm.cpp
backend:X86:
- llvm/include/llvm/IR/IntrinsicsX86.td
@@ -833,6 +841,8 @@ backend:X86:
- llvm/include/llvm/TargetParser/X86*
- llvm/lib/TargetParser/X86*
- llvm/utils/TableGen/X86*
+ - clang/include/clang/Sema/SemaX86.h
+ - clang/lib/Sema/SemaX86.cpp
backend:PowerPC:
- llvm/include/llvm/BinaryFormat/ELFRelocs/PowerPC*
@@ -857,6 +867,8 @@ backend:PowerPC:
- clang/lib/Driver/ToolChains/AIX*
- clang/lib/Driver/ToolChains/Arch/PPC.*
- clang/test/CodeGen/PowerPC/**
+ - clang/include/clang/Sema/SemaPPC.h
+ - clang/lib/Sema/SemaPPC.cpp
backend:SystemZ:
- llvm/include/llvm/BinaryFormat/ELFRelocs/SystemZ*
@@ -877,6 +889,8 @@ backend:SystemZ:
- clang/lib/Driver/ToolChains/ZOS*
- clang/lib/Driver/ToolChains/Arch/SystemZ.*
- clang/test/CodeGen/SystemZ/**
+ - clang/include/clang/Sema/SemaSystemZ.h
+ - clang/lib/Sema/SemaSystemZ.cpp
third-party:unittests:
- third-party/unittests/**
>From 70b112ca0a484d96188640ab8d2dbee391f2fab2 Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Fri, 24 May 2024 17:03:20 +0300
Subject: [PATCH 4/4] Fix description of SemaSystemZ.cpp
---
clang/lib/Sema/SemaSystemZ.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/Sema/SemaSystemZ.cpp b/clang/lib/Sema/SemaSystemZ.cpp
index 717093b0665f9..7e836adbee659 100644
--- a/clang/lib/Sema/SemaSystemZ.cpp
+++ b/clang/lib/Sema/SemaSystemZ.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements semantic analysis functions specific to AIX.
+// This file implements semantic analysis functions specific to SystemZ.
//
//===----------------------------------------------------------------------===//
More information about the llvm-commits
mailing list