[clang] [clang] Introduce `SemaRISCV` (PR #92682)
Vlad Serebrennikov via cfe-commits
cfe-commits at lists.llvm.org
Tue May 21 00:40:08 PDT 2024
https://github.com/Endilll updated https://github.com/llvm/llvm-project/pull/92682
>From 029a38a838b37d463ca647a5eddd12c9ffcbea8c Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Sun, 19 May 2024 12:29:01 +0300
Subject: [PATCH 1/5] [clang] Introduce `SemaRISCV`
---
clang/include/clang/Sema/Sema.h | 65 +-
clang/include/clang/Sema/SemaRISCV.h | 52 +
clang/lib/Parse/ParsePragma.cpp | 5 +-
clang/lib/Sema/CMakeLists.txt | 2 +-
clang/lib/Sema/Sema.cpp | 4 +-
clang/lib/Sema/SemaCast.cpp | 5 +-
clang/lib/Sema/SemaChecking.cpp | 1041 ++--------------
clang/lib/Sema/SemaDecl.cpp | 3 +-
clang/lib/Sema/SemaExpr.cpp | 21 -
clang/lib/Sema/SemaLookup.cpp | 11 +-
clang/lib/Sema/SemaRISCV.cpp | 1425 ++++++++++++++++++++++
clang/lib/Sema/SemaRISCVVectorLookup.cpp | 504 --------
12 files changed, 1595 insertions(+), 1543 deletions(-)
create mode 100644 clang/include/clang/Sema/SemaRISCV.h
create mode 100644 clang/lib/Sema/SemaRISCV.cpp
delete mode 100644 clang/lib/Sema/SemaRISCVVectorLookup.cpp
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index d4d4a82525a02..8a2427d26a104 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -173,6 +173,7 @@ class SemaHLSL;
class SemaObjC;
class SemaOpenACC;
class SemaOpenMP;
+class SemaRISCV;
class SemaSYCL;
class StandardConversionSequence;
class Stmt;
@@ -1014,6 +1015,11 @@ class Sema final : public SemaBase {
return *OpenMPPtr;
}
+ SemaRISCV &RISCV() {
+ assert(RISCVPtr);
+ return *RISCVPtr;
+ }
+
SemaSYCL &SYCL() {
assert(SYCLPtr);
return *SYCLPtr;
@@ -1055,6 +1061,7 @@ class Sema final : public SemaBase {
std::unique_ptr<SemaObjC> ObjCPtr;
std::unique_ptr<SemaOpenACC> OpenACCPtr;
std::unique_ptr<SemaOpenMP> OpenMPPtr;
+ std::unique_ptr<SemaRISCV> RISCVPtr;
std::unique_ptr<SemaSYCL> SYCLPtr;
///@}
@@ -2030,6 +2037,23 @@ class Sema final : public SemaBase {
void CheckConstrainedAuto(const AutoType *AutoT, SourceLocation Loc);
+ bool BuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result);
+ bool BuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High,
+ bool RangeIsError = true);
+ bool BuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
+ unsigned Multiple);
+ bool BuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
+ bool BuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits);
+ bool BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
+ unsigned ArgBits);
+
+ bool checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount);
+ bool checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount);
+ bool checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
+ unsigned MaxArgCount);
+ bool checkArgCount(CallExpr *Call, unsigned DesiredArgCount);
+
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE = nullptr,
@@ -2098,11 +2122,7 @@ class Sema final : public SemaBase {
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
- bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
- void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
- const llvm::StringMap<bool> &FeatureMap);
+
bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
@@ -2132,16 +2152,6 @@ class Sema final : public SemaBase {
ExprResult BuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult AtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
- bool BuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result);
- bool BuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High,
- bool RangeIsError = true);
- bool BuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
- unsigned Multiple);
- bool BuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
- bool BuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
- unsigned ArgBits);
- bool BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
- unsigned ArgBits);
bool BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum,
unsigned ExpectedFieldNum, bool AllowName);
bool BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
@@ -5885,7 +5895,6 @@ class Sema final : public SemaBase {
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
- bool isValidRVVBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
@@ -11700,27 +11709,6 @@ class Sema final : public SemaBase {
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
- ///@}
- //
- //
- // -------------------------------------------------------------------------
- //
- //
-
- /// \name Name Lookup for RISC-V Vector Intrinsic
- /// Implementations are in SemaRISCVVectorLookup.cpp
- ///@{
-
-public:
- /// Indicate RISC-V vector builtin functions enabled or not.
- bool DeclareRISCVVBuiltins = false;
-
- /// Indicate RISC-V SiFive vector builtin functions enabled or not.
- bool DeclareRISCVSiFiveVectorBuiltins = false;
-
-private:
- std::unique_ptr<sema::RISCVIntrinsicManager> RVIntrinsicManager;
-
///@}
};
@@ -11743,9 +11731,6 @@ void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
-
-std::unique_ptr<sema::RISCVIntrinsicManager>
-CreateRISCVIntrinsicManager(Sema &S);
} // end namespace clang
#endif
diff --git a/clang/include/clang/Sema/SemaRISCV.h b/clang/include/clang/Sema/SemaRISCV.h
new file mode 100644
index 0000000000000..e71c999e15513
--- /dev/null
+++ b/clang/include/clang/Sema/SemaRISCV.h
@@ -0,0 +1,52 @@
+//===----- SemaRISCV.h ------- RISC-V target-specific routines ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis functions specific to RISC-V.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMARISCV_H
+#define LLVM_CLANG_SEMA_SEMARISCV_H
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/SemaBase.h"
+#include "llvm/ADT/StringMap.h"
+#include <memory>
+
+namespace clang {
+class SemaRISCV : public SemaBase {
+public:
+ SemaRISCV(Sema &S);
+
+ bool CheckLMUL(CallExpr *TheCall, unsigned ArgNum);
+ bool CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
+ void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap);
+
+ bool isValidRVVBitcast(QualType srcType, QualType destType);
+
+ /// Indicate RISC-V vector builtin functions enabled or not.
+ bool DeclareRVVBuiltins = false;
+
+ /// Indicate RISC-V SiFive vector builtin functions enabled or not.
+ bool DeclareSiFiveVectorBuiltins = false;
+
+ std::unique_ptr<sema::RISCVIntrinsicManager> IntrinsicManager;
+};
+
+std::unique_ptr<sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S);
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMARISCV_H
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 643fdac287d18..cc6f18b5b319f 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -23,6 +23,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaCUDA.h"
#include "clang/Sema/SemaCodeCompletion.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSwitch.h"
#include <optional>
@@ -4154,7 +4155,7 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
}
if (II->isStr("vector"))
- Actions.DeclareRISCVVBuiltins = true;
+ Actions.RISCV().DeclareRVVBuiltins = true;
else if (II->isStr("sifive_vector"))
- Actions.DeclareRISCVSiFiveVectorBuiltins = true;
+ Actions.RISCV().DeclareSiFiveVectorBuiltins = true;
}
diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt
index 58e0a3b9679b7..6b7742cae2db9 100644
--- a/clang/lib/Sema/CMakeLists.txt
+++ b/clang/lib/Sema/CMakeLists.txt
@@ -60,7 +60,7 @@ add_clang_library(clangSema
SemaOpenMP.cpp
SemaOverload.cpp
SemaPseudoObject.cpp
- SemaRISCVVectorLookup.cpp
+ SemaRISCV.cpp
SemaStmt.cpp
SemaStmtAsm.cpp
SemaStmtAttr.cpp
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index f847c49920cf3..12e5e27cfec45 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -49,6 +49,7 @@
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenACC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/SemaSYCL.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -210,6 +211,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ObjCPtr(std::make_unique<SemaObjC>(*this)),
OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
+ RISCVPtr(std::make_unique<SemaRISCV>(*this)),
SYCLPtr(std::make_unique<SemaSYCL>(*this)),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
@@ -2049,7 +2051,7 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
+ RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
}
// Don't allow SVE types in functions without a SVE target.
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index 483ec7e36eaed..7db6b1dfe923b 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -25,6 +25,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include <set>
@@ -2391,7 +2392,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
}
// Allow bitcasting between SVE VLATs and VLSTs, and vice-versa.
- if (Self.isValidRVVBitcast(SrcType, DestType)) {
+ if (Self.RISCV().isValidRVVBitcast(SrcType, DestType)) {
Kind = CK_BitCast;
return TC_Success;
}
@@ -3002,7 +3003,7 @@ void CastOperation::CheckCStyleCast() {
// Allow bitcasting between compatible RVV vector types.
if ((SrcType->isVectorType() || DestType->isVectorType()) &&
- Self.isValidRVVBitcast(SrcType, DestType)) {
+ Self.RISCV().isValidRVVBitcast(SrcType, DestType)) {
Kind = CK_BitCast;
return;
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index f2dc8e9dd0050..5d8cbe7e32048 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -63,6 +63,7 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
+#include "clang/Sema/SemaRISCV.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
@@ -120,13 +121,13 @@ static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
/// Checks that a call expression's argument count is at least the desired
/// number. This is useful when doing custom type-checking on a variadic
/// function. Returns true on error.
-static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
+bool Sema::checkArgCountAtLeast(CallExpr *Call,
unsigned MinArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount >= MinArgCount)
return false;
- return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /*function call*/ << MinArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
}
@@ -134,11 +135,11 @@ static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
/// Checks that a call expression's argument count is at most the desired
/// number. This is useful when doing custom type-checking on a variadic
/// function. Returns true on error.
-static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
+bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount <= MaxArgCount)
return false;
- return S.Diag(Call->getEndLoc(),
+ return Diag(Call->getEndLoc(),
diag::err_typecheck_call_too_many_args_at_most)
<< 0 /*function call*/ << MaxArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
@@ -147,20 +148,20 @@ static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
/// Checks that a call expression's argument count is in the desired range. This
/// is useful when doing custom type-checking on a variadic function. Returns
/// true on error.
-static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
+bool Sema::checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
unsigned MaxArgCount) {
- return checkArgCountAtLeast(S, Call, MinArgCount) ||
- checkArgCountAtMost(S, Call, MaxArgCount);
+ return checkArgCountAtLeast(Call, MinArgCount) ||
+ checkArgCountAtMost(Call, MaxArgCount);
}
/// Checks that a call expression's argument count is the desired number.
/// This is useful when doing custom type-checking. Returns true on error.
-static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
+bool Sema::checkArgCount(CallExpr *Call, unsigned DesiredArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount == DesiredArgCount)
return false;
- if (checkArgCountAtLeast(S, Call, DesiredArgCount))
+ if (checkArgCountAtLeast(Call, DesiredArgCount))
return true;
assert(ArgCount > DesiredArgCount && "should have diagnosed this");
@@ -168,7 +169,7 @@ static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
Call->getArg(ArgCount - 1)->getEndLoc());
- return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
+ return Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/ << DesiredArgCount << ArgCount
<< /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
}
@@ -190,7 +191,7 @@ static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
/// Check that the first argument to __builtin_annotation is an integer
/// and the second argument is a non-wide string literal.
static bool BuiltinAnnotation(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 2))
+ if (S.checkArgCount(TheCall, 2))
return true;
// First argument should be an integer.
@@ -240,7 +241,7 @@ static bool BuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
/// Check that the argument to __builtin_addressof is a glvalue, and set the
/// result type to the corresponding pointer type.
static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult Arg(TheCall->getArg(0));
@@ -255,7 +256,7 @@ static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
/// Check that the argument to __builtin_function_start is a function.
static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
@@ -279,7 +280,7 @@ static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
/// Check the number of arguments and set the result type to
/// the argument type.
static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
TheCall->setType(TheCall->getArg(0)->getType());
@@ -290,7 +291,7 @@ static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
/// type (but not a function pointer) and that the alignment is a power-of-two.
static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
- if (checkArgCount(S, TheCall, 2))
+ if (S.checkArgCount(TheCall, 2))
return true;
clang::Expr *Source = TheCall->getArg(0);
@@ -368,7 +369,7 @@ static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
}
static bool BuiltinOverflow(Sema &S, CallExpr *TheCall, unsigned BuiltinID) {
- if (checkArgCount(S, TheCall, 3))
+ if (S.checkArgCount(TheCall, 3))
return true;
std::pair<unsigned, const char *> Builtins[] = {
@@ -696,7 +697,7 @@ struct BuiltinDumpStructGenerator {
} // namespace
static ExprResult BuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
- if (checkArgCountAtLeast(S, TheCall, 2))
+ if (S.checkArgCountAtLeast(TheCall, 2))
return ExprError();
ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -762,7 +763,7 @@ static ExprResult BuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
}
static bool BuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
- if (checkArgCount(S, BuiltinCall, 2))
+ if (S.checkArgCount(BuiltinCall, 2))
return true;
SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
@@ -1504,7 +1505,7 @@ static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
}
static bool OpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 2))
+ if (S.checkArgCount(TheCall, 2))
return true;
if (checkOpenCLSubgroupExt(S, TheCall))
@@ -1531,7 +1532,7 @@ static bool OpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
/// get_kernel_work_group_size
/// and get_kernel_preferred_work_group_size_multiple builtin functions.
static bool OpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
Expr *BlockArg = TheCall->getArg(0);
@@ -1861,7 +1862,7 @@ static bool BuiltinRWPipe(Sema &S, CallExpr *Call) {
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool BuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
@@ -1890,7 +1891,7 @@ static bool BuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool BuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return true;
if (checkOpenCLPipeArg(S, Call))
@@ -1913,7 +1914,7 @@ static bool BuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
// \param Call The call to the builtin function to be analyzed.
// \return True if a semantic error was found, false otherwise.
static bool BuiltinPipePackets(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 1))
+ if (S.checkArgCount(Call, 1))
return true;
if (!Call->getArg(0)->getType()->isPipeType()) {
@@ -1932,7 +1933,7 @@ static bool BuiltinPipePackets(Sema &S, CallExpr *Call) {
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
static bool OpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, CallExpr *Call) {
- if (checkArgCount(S, Call, 1))
+ if (S.checkArgCount(Call, 1))
return true;
auto RT = Call->getArg(0)->getType();
@@ -2087,7 +2088,7 @@ static bool checkPointerAuthValue(Sema &S, Expr *&Arg,
}
static ExprResult PointerAuthStrip(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2100,7 +2101,7 @@ static ExprResult PointerAuthStrip(Sema &S, CallExpr *Call) {
}
static ExprResult PointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2113,7 +2114,7 @@ static ExprResult PointerAuthBlendDiscriminator(Sema &S, CallExpr *Call) {
}
static ExprResult PointerAuthSignGenericData(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 2))
+ if (S.checkArgCount(Call, 2))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2127,7 +2128,7 @@ static ExprResult PointerAuthSignGenericData(Sema &S, CallExpr *Call) {
static ExprResult PointerAuthSignOrAuth(Sema &S, CallExpr *Call,
PointerAuthOpKind OpKind) {
- if (checkArgCount(S, Call, 3))
+ if (S.checkArgCount(Call, 3))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2141,7 +2142,7 @@ static ExprResult PointerAuthSignOrAuth(Sema &S, CallExpr *Call,
}
static ExprResult PointerAuthAuthAndResign(Sema &S, CallExpr *Call) {
- if (checkArgCount(S, Call, 5))
+ if (S.checkArgCount(Call, 5))
return ExprError();
if (checkPointerAuthEnabled(S, Call))
return ExprError();
@@ -2157,7 +2158,7 @@ static ExprResult PointerAuthAuthAndResign(Sema &S, CallExpr *Call) {
}
static ExprResult BuiltinLaunder(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return ExprError();
// Compute __builtin_launder's parameter type from the argument.
@@ -2288,7 +2289,7 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
- return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
@@ -2377,7 +2378,7 @@ static bool BuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
/// Checks that __builtin_popcountg was called with a single argument, which is
/// an unsigned integer.
static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
- if (checkArgCount(S, TheCall, 1))
+ if (S.checkArgCount(TheCall, 1))
return true;
ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -2401,7 +2402,7 @@ static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
/// an unsigned integer, and an optional second argument, which is promoted to
/// an 'int'.
static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
- if (checkArgCountRange(S, TheCall, 1, 2))
+ if (S.checkArgCountRange(TheCall, 1, 2))
return true;
ExprResult Arg0Res = S.DefaultLvalueConversion(TheCall->getArg(0));
@@ -2625,7 +2626,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_classify_type:
- if (checkArgCount(*this, TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__builtin_complex:
@@ -2633,7 +2634,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_constant_p: {
- if (checkArgCount(*this, TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1)) return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid()) return true;
TheCall->setArg(0, Arg.get());
@@ -2822,7 +2823,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return BuiltinDumpStruct(*this, TheCall);
case Builtin::BI__builtin_expect_with_probability: {
// We first want to ensure we are called with 3 arguments
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return ExprError();
// then check probability is constant float in range [0.0, 1.0]
const Expr *ProbArg = TheCall->getArg(2);
@@ -2870,7 +2871,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__GetExceptionInfo:
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
if (CheckCXXThrowOperand(
@@ -2891,7 +2892,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// These are all expected to be of the form
// T &/&&/* f(U &/&&)
// where T and U only differ in qualification.
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
QualType Param = FDecl->getParamDecl(0)->getType();
QualType Result = FDecl->getReturnType();
@@ -3129,7 +3130,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
case Builtin::BI__builtin_elementwise_copysign: {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return ExprError();
ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
@@ -3806,7 +3807,7 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
// Ensure that we have the proper number of arguments.
- if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
+ if (checkArgCount(TheCall, IsLdrex ? 1 : 2))
return true;
// Inspect the pointer argument of the atomic builtin. This should always be
@@ -4145,7 +4146,7 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin");
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
// The second argument needs to be a constant int
@@ -5589,12 +5590,12 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
case Builtin::BI__builtin_hlsl_elementwise_all:
case Builtin::BI__builtin_hlsl_elementwise_any: {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
break;
}
case Builtin::BI__builtin_hlsl_elementwise_clamp: {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5605,7 +5606,7 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_dot: {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5639,7 +5640,7 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_lerp: {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5650,7 +5651,7 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_mad: {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(this, TheCall))
return true;
@@ -5753,866 +5754,6 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
return false;
}
-bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
- llvm::APSInt Result;
-
- // We can't check the value of a dependent argument.
- Expr *Arg = TheCall->getArg(ArgNum);
- if (Arg->isTypeDependent() || Arg->isValueDependent())
- return false;
-
- // Check constant-ness first.
- if (BuiltinConstantArg(TheCall, ArgNum, Result))
- return true;
-
- int64_t Val = Result.getSExtValue();
- if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
- return false;
-
- return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
- << Arg->getSourceRange();
-}
-
-static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
- Sema &S, QualType Type, int EGW) {
- assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
-
- // LMUL * VLEN >= EGW
- ASTContext::BuiltinVectorTypeInfo Info =
- S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
- unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
- unsigned MinElemCount = Info.EC.getKnownMinValue();
-
- unsigned EGS = EGW / ElemSize;
- // If EGS is less than or equal to the minimum number of elements, then the
- // type is valid.
- if (EGS <= MinElemCount)
- return false;
-
- // Otherwise, we need vscale to be at least EGS / MinElemCont.
- assert(EGS % MinElemCount == 0);
- unsigned VScaleFactor = EGS / MinElemCount;
- // Vscale is VLEN/RVVBitsPerBlock.
- unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
- std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
- if (!TI.hasFeature(RequiredExt))
- return S.Diag(TheCall->getBeginLoc(),
- diag::err_riscv_type_requires_extension) << Type << RequiredExt;
-
- return false;
-}
-
-bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
- unsigned BuiltinID,
- CallExpr *TheCall) {
- // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
- // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
- switch (BuiltinID) {
- default:
- break;
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv:
- case RISCVVector::BI__builtin_rvv_vmulh_vx:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
- case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv:
- case RISCVVector::BI__builtin_rvv_vsmul_vx:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
- ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
- TheCall->getType()->castAs<BuiltinType>());
-
- if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_riscv_builtin_requires_extension)
- << /* IsExtension */ true << TheCall->getSourceRange() << "v";
-
- break;
- }
- }
-
- switch (BuiltinID) {
- case RISCVVector::BI__builtin_rvv_vsetvli:
- return BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
- CheckRISCVLMUL(TheCall, 2);
- case RISCVVector::BI__builtin_rvv_vsetvlimax:
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- CheckRISCVLMUL(TheCall, 1);
- case RISCVVector::BI__builtin_rvv_vget_v: {
- ASTContext::BuiltinVectorTypeInfo ResVecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getType().getCanonicalType().getTypePtr()));
- ASTContext::BuiltinVectorTypeInfo VecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex;
- if (VecInfo.NumVectors != 1) // vget for tuple type
- MaxIndex = VecInfo.NumVectors;
- else // vget for non-tuple type
- MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
- (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
- return BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
- }
- case RISCVVector::BI__builtin_rvv_vset_v: {
- ASTContext::BuiltinVectorTypeInfo ResVecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getType().getCanonicalType().getTypePtr()));
- ASTContext::BuiltinVectorTypeInfo VecInfo =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
- TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex;
- if (ResVecInfo.NumVectors != 1) // vset for tuple type
- MaxIndex = ResVecInfo.NumVectors;
- else // vset fo non-tuple type
- MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
- (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
- return BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
- }
- // Vector Crypto
- case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
- case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
- case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
- case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
- case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 256) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
- case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31);
- }
- case RISCVVector::BI__builtin_rvv_vaesdf_vv:
- case RISCVVector::BI__builtin_rvv_vaesdf_vs:
- case RISCVVector::BI__builtin_rvv_vaesdm_vv:
- case RISCVVector::BI__builtin_rvv_vaesdm_vs:
- case RISCVVector::BI__builtin_rvv_vaesef_vv:
- case RISCVVector::BI__builtin_rvv_vaesef_vs:
- case RISCVVector::BI__builtin_rvv_vaesem_vv:
- case RISCVVector::BI__builtin_rvv_vaesem_vs:
- case RISCVVector::BI__builtin_rvv_vaesz_vs:
- case RISCVVector::BI__builtin_rvv_vsm4r_vv:
- case RISCVVector::BI__builtin_rvv_vsm4r_vs:
- case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
- case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
- case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128);
- }
- case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
- case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
- case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
- case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
- QualType Op1Type = TheCall->getArg(0)->getType();
- QualType Op2Type = TheCall->getArg(1)->getType();
- QualType Op3Type = TheCall->getArg(2)->getType();
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
- uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
- if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
- return Diag(TheCall->getBeginLoc(),
- diag::err_riscv_builtin_requires_extension)
- << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
-
- return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize * 4);
- }
-
- case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
- // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 3, -16, 15) ||
- CheckRISCVLMUL(TheCall, 5);
- case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
- // bit_27_26, bit_11_7, vs2, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
- // bit_27_26, bit_24_20, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
- // bit_27_26, vs2, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 2, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
- // bit_27_26, vd, vs2, simm5
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 3, -16, 15);
- case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
- // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
- BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
- CheckRISCVLMUL(TheCall, 5);
- case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
- // bit_27_26, bit_11_7, vs2, xs1/vs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
- // bit_27_26, bit_24-20, xs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
- // bit_27_26, vd, vs2, xs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
- // bit_27_26, vs2, xs1/vs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
- // bit_27_26, vd, vs2, xs1/vs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 3);
- case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
- // bit_26, bit_11_7, vs2, fs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 1) ||
- BuiltinConstantArgRange(TheCall, 1, 0, 31);
- case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
- // bit_26, vd, vs2, fs1
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
- case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
- // bit_26, vs2, fs1
- return BuiltinConstantArgRange(TheCall, 0, 0, 1);
- // Check if byteselect is in [0, 3]
- case RISCV::BI__builtin_riscv_aes32dsi:
- case RISCV::BI__builtin_riscv_aes32dsmi:
- case RISCV::BI__builtin_riscv_aes32esi:
- case RISCV::BI__builtin_riscv_aes32esmi:
- case RISCV::BI__builtin_riscv_sm4ks:
- case RISCV::BI__builtin_riscv_sm4ed:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- // Check if rnum is in [0, 10]
- case RISCV::BI__builtin_riscv_aes64ks1i:
- return BuiltinConstantArgRange(TheCall, 1, 0, 10);
- // Check if value range for vxrm is in [0, 3]
- case RISCVVector::BI__builtin_rvv_vaaddu_vv:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx:
- case RISCVVector::BI__builtin_rvv_vaadd_vv:
- case RISCVVector::BI__builtin_rvv_vaadd_vx:
- case RISCVVector::BI__builtin_rvv_vasubu_vv:
- case RISCVVector::BI__builtin_rvv_vasubu_vx:
- case RISCVVector::BI__builtin_rvv_vasub_vv:
- case RISCVVector::BI__builtin_rvv_vasub_vx:
- case RISCVVector::BI__builtin_rvv_vsmul_vv:
- case RISCVVector::BI__builtin_rvv_vsmul_vx:
- case RISCVVector::BI__builtin_rvv_vssra_vv:
- case RISCVVector::BI__builtin_rvv_vssra_vx:
- case RISCVVector::BI__builtin_rvv_vssrl_vv:
- case RISCVVector::BI__builtin_rvv_vssrl_vx:
- case RISCVVector::BI__builtin_rvv_vnclip_wv:
- case RISCVVector::BI__builtin_rvv_vnclip_wx:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx:
- return BuiltinConstantArgRange(TheCall, 2, 0, 3);
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
- case RISCVVector::BI__builtin_rvv_vasub_vv_m:
- case RISCVVector::BI__builtin_rvv_vasub_vx_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
- case RISCVVector::BI__builtin_rvv_vssra_vv_m:
- case RISCVVector::BI__builtin_rvv_vssra_vx_m:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
- return BuiltinConstantArgRange(TheCall, 3, 0, 3);
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
- case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
- case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
- case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
- case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
- case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
- case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
- case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
- return BuiltinConstantArgRange(TheCall, 4, 0, 3);
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
- return BuiltinConstantArgRange(TheCall, 1, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
- return BuiltinConstantArgRange(TheCall, 2, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
- return BuiltinConstantArgRange(TheCall, 3, 0, 4);
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
- case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
- case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
- return BuiltinConstantArgRange(TheCall, 4, 0, 4);
- case RISCV::BI__builtin_riscv_ntl_load:
- case RISCV::BI__builtin_riscv_ntl_store:
- DeclRefExpr *DRE =
- cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
- assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
- BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
- "Unexpected RISC-V nontemporal load/store builtin!");
- bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
- unsigned NumArgs = IsStore ? 3 : 2;
-
- if (checkArgCountAtLeast(*this, TheCall, NumArgs - 1))
- return true;
-
- if (checkArgCountAtMost(*this, TheCall, NumArgs))
- return true;
-
- // Domain value should be compile-time constant.
- // 2 <= domain <= 5
- if (TheCall->getNumArgs() == NumArgs &&
- BuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
- return true;
-
- Expr *PointerArg = TheCall->getArg(0);
- ExprResult PointerArgResult =
- DefaultFunctionArrayLvalueConversion(PointerArg);
-
- if (PointerArgResult.isInvalid())
- return true;
- PointerArg = PointerArgResult.get();
-
- const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
- if (!PtrType) {
- Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- QualType ValType = PtrType->getPointeeType();
- ValType = ValType.getUnqualifiedType();
- if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
- !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
- !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
- Diag(DRE->getBeginLoc(),
- diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
- }
-
- if (!IsStore) {
- TheCall->setType(ValType);
- return false;
- }
-
- ExprResult ValArg = TheCall->getArg(1);
- InitializedEntity Entity = InitializedEntity::InitializeParameter(
- Context, ValType, /*consume*/ false);
- ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
- if (ValArg.isInvalid())
- return true;
-
- TheCall->setArg(1, ValArg.get());
- TheCall->setType(Context.VoidTy);
- return false;
- }
-
- return false;
-}
-
bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
@@ -6708,38 +5849,6 @@ bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
return false;
}
-void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
- const llvm::StringMap<bool> &FeatureMap) {
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
- unsigned EltSize = Context.getTypeSize(Info.ElementType);
- unsigned MinElts = Info.EC.getKnownMinValue();
-
- if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
- !FeatureMap.lookup("zve64d"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
- // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
- // least zve64x
- else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
- MinElts == 1) &&
- !FeatureMap.lookup("zve64x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
- else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
- !FeatureMap.lookup("zvfhmin"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D)
- << Ty << "zvfh or zvfhmin";
- else if (Info.ElementType->isBFloat16Type() &&
- !FeatureMap.lookup("experimental-zvfbfmin"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
- else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
- !FeatureMap.lookup("zve32f"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
- // Given that caller already checked isRVVType() before calling this function,
- // if we don't have at least zve32x supported, then we need to emit error.
- else if (!FeatureMap.lookup("zve32x"))
- Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
-}
-
bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID,
CallExpr *TheCall) {
@@ -6748,7 +5857,7 @@ bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
- return checkArgCountAtMost(*this, TheCall, 3);
+ return checkArgCountAtMost(TheCall, 3);
}
return false;
@@ -9302,7 +8411,7 @@ ExprResult Sema::BuiltinNontemporalOverloaded(ExprResult TheCallResult) {
unsigned numArgs = isStore ? 2 : 1;
// Ensure that we have the proper number of arguments.
- if (checkArgCount(*this, TheCall, numArgs))
+ if (checkArgCount(TheCall, numArgs))
return ExprError();
// Inspect the last argument of the nontemporal builtin. This should always
@@ -9467,7 +8576,7 @@ bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
// In C23 mode, va_start only needs one argument. However, the builtin still
// requires two arguments (which matches the behavior of the GCC builtin),
// <stdarg.h> passes `0` as the second argument in C23 mode.
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
// Type-check the first argument normally.
@@ -9598,7 +8707,7 @@ bool Sema::BuiltinVAStartARMMicrosoft(CallExpr *Call) {
/// BuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
/// friends. This is declared to take (...), so we have to check everything.
bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
if (BuiltinID == Builtin::BI__builtin_isunordered &&
@@ -9642,7 +8751,7 @@ bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
/// to check everything.
bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
unsigned BuiltinID) {
- if (checkArgCount(*this, TheCall, NumArgs))
+ if (checkArgCount(TheCall, NumArgs))
return true;
FPOptions FPO = TheCall->getFPFeaturesInEffect(getLangOpts());
@@ -9727,7 +8836,7 @@ bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
/// Perform semantic analysis for a call to __builtin_complex.
bool Sema::BuiltinComplex(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
bool Dependent = false;
@@ -9789,7 +8898,7 @@ bool Sema::BuiltinComplex(CallExpr *TheCall) {
// vector short vec_xxsldwi(vector short, vector short, int);
bool Sema::BuiltinVSX(CallExpr *TheCall) {
unsigned ExpectedNumArgs = 3;
- if (checkArgCount(*this, TheCall, ExpectedNumArgs))
+ if (checkArgCount(TheCall, ExpectedNumArgs))
return true;
// Check the third argument is a compile time constant
@@ -9976,7 +9085,7 @@ bool Sema::BuiltinArithmeticFence(CallExpr *TheCall) {
if (!Context.getTargetInfo().checkArithmeticFenceSupported())
return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
Expr *Arg = TheCall->getArg(0);
if (Arg->isInstantiationDependent())
@@ -10046,7 +9155,7 @@ bool Sema::BuiltinAllocaWithAlign(CallExpr *TheCall) {
/// Handle __builtin_assume_aligned. This is declared
/// as (const void*, size_t, ...) and can take one optional constant int arg.
bool Sema::BuiltinAssumeAligned(CallExpr *TheCall) {
- if (checkArgCountRange(*this, TheCall, 2, 3))
+ if (checkArgCountRange(TheCall, 2, 3))
return true;
unsigned NumArgs = TheCall->getNumArgs();
@@ -10349,7 +9458,7 @@ bool Sema::BuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
/// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_irg) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
Expr *Arg0 = TheCall->getArg(0);
Expr *Arg1 = TheCall->getArg(1);
@@ -10377,7 +9486,7 @@ bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
}
if (BuiltinID == AArch64::BI__builtin_arm_addg) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
Expr *Arg0 = TheCall->getArg(0);
@@ -10398,7 +9507,7 @@ bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
}
if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
Expr *Arg0 = TheCall->getArg(0);
Expr *Arg1 = TheCall->getArg(1);
@@ -10421,7 +9530,7 @@ bool Sema::BuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
BuiltinID == AArch64::BI__builtin_arm_stg) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
Expr *Arg0 = TheCall->getArg(0);
ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0);
@@ -10694,7 +9803,7 @@ bool Sema::BuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
(void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask);
ArgNum++;
}
- if (checkArgCount(*this, TheCall, ArgNum))
+ if (checkArgCount(TheCall, ArgNum))
return true;
return false;
@@ -19706,7 +18815,7 @@ void Sema::CheckAddressOfPackedMember(Expr *rhs) {
}
bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
@@ -19745,7 +18854,7 @@ bool Sema::BuiltinVectorToScalarMath(CallExpr *TheCall) {
}
bool Sema::BuiltinVectorMath(CallExpr *TheCall, QualType &Res) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
ExprResult A = TheCall->getArg(0);
@@ -19774,7 +18883,7 @@ bool Sema::BuiltinVectorMath(CallExpr *TheCall, QualType &Res) {
bool Sema::BuiltinElementwiseTernaryMath(CallExpr *TheCall,
bool CheckForFloatArgs) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
Expr *Args[3];
@@ -19817,7 +18926,7 @@ bool Sema::BuiltinElementwiseTernaryMath(CallExpr *TheCall,
}
bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
@@ -19829,7 +18938,7 @@ bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
}
bool Sema::BuiltinNonDeterministicValue(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
ExprResult Arg = TheCall->getArg(0);
@@ -19845,7 +18954,7 @@ bool Sema::BuiltinNonDeterministicValue(CallExpr *TheCall) {
ExprResult Sema::BuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return ExprError();
ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0));
@@ -19900,7 +19009,7 @@ ExprResult Sema::BuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
return ExprError();
}
- if (checkArgCount(*this, TheCall, 4))
+ if (checkArgCount(TheCall, 4))
return ExprError();
unsigned PtrArgIdx = 0;
@@ -20011,7 +19120,7 @@ ExprResult Sema::BuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult Sema::BuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return ExprError();
unsigned PtrArgIdx = 1;
@@ -20137,7 +19246,7 @@ static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
/// Check that the first argument is a WebAssembly table, and the second
/// is an index to use as index into the table.
bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 2))
+ if (checkArgCount(TheCall, 2))
return true;
QualType ElTy;
@@ -20160,7 +19269,7 @@ bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
/// an index to use as index into the table and the third is the reference
/// type to set into the table.
bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
QualType ElTy;
@@ -20178,7 +19287,7 @@ bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
/// Check that the argument is a WebAssembly table.
bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 1))
+ if (checkArgCount(TheCall, 1))
return true;
QualType ElTy;
@@ -20192,7 +19301,7 @@ bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
/// value to use for new elements (of a type matching the table type), the
/// third value is an integer.
bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 3))
+ if (checkArgCount(TheCall, 3))
return true;
QualType ElTy;
@@ -20216,7 +19325,7 @@ bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
/// integer, the third is the value to use to fill the table (of a type
/// matching the table type), and the fourth is an integer.
bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 4))
+ if (checkArgCount(TheCall, 4))
return true;
QualType ElTy;
@@ -20243,7 +19352,7 @@ bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
/// WebAssembly table (of the same element type), and the third to fifth
/// arguments are integers.
bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
- if (checkArgCount(*this, TheCall, 5))
+ if (checkArgCount(TheCall, 5))
return true;
QualType XElTy;
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index f2b9202255cd4..74aa63b371fd7 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -50,6 +50,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/SmallString.h"
@@ -8926,7 +8927,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
const FunctionDecl *FD = cast<FunctionDecl>(CurContext);
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
- checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
+ RISCV().checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
CallerFeatureMap);
}
}
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 5ecfdee21f09d..e353ded3da53b 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -7585,27 +7585,6 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
ValidScalableConversion(destTy, srcTy);
}
-/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
-/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
-/// VLS type) allowed?
-///
-/// This will also return false if the two given types do not make sense from
-/// the perspective of RVV bitcasts.
-bool Sema::isValidRVVBitcast(QualType srcTy, QualType destTy) {
- assert(srcTy->isVectorType() || destTy->isVectorType());
-
- auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
- if (!FirstType->isRVVSizelessBuiltinType())
- return false;
-
- const auto *VecTy = SecondType->getAs<VectorType>();
- return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
- };
-
- return ValidScalableConversion(srcTy, destTy) ||
- ValidScalableConversion(destTy, srcTy);
-}
-
/// Are the two types matrix types and do they have the same dimensions i.e.
/// do they have the same number of rows and the same number of columns?
bool Sema::areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy) {
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 0834db95d42ad..e17899803ad3a 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -34,6 +34,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/SemaRISCV.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
@@ -945,13 +946,13 @@ bool Sema::LookupBuiltin(LookupResult &R) {
}
}
- if (DeclareRISCVVBuiltins || DeclareRISCVSiFiveVectorBuiltins) {
- if (!RVIntrinsicManager)
- RVIntrinsicManager = CreateRISCVIntrinsicManager(*this);
+ if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins) {
+ if (!RISCV().IntrinsicManager)
+ RISCV().IntrinsicManager = CreateRISCVIntrinsicManager(*this);
- RVIntrinsicManager->InitIntrinsicList();
+ RISCV().IntrinsicManager->InitIntrinsicList();
- if (RVIntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
+ if (RISCV().IntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
return true;
}
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
new file mode 100644
index 0000000000000..8aae622e6abc4
--- /dev/null
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -0,0 +1,1425 @@
+//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements name lookup for RISC-V vector intrinsic.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaRISCV.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <optional>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::RISCV;
+
+using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
+
+namespace {
+
+// Function definition of a RVV intrinsic.
+struct RVVIntrinsicDef {
+ /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName;
+
+ /// Function signature, first element is return type.
+ RVVTypes Signature;
+};
+
+struct RVVOverloadIntrinsicDef {
+ // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
+ SmallVector<uint16_t, 8> Indexes;
+};
+
+} // namespace
+
+static const PrototypeDescriptor RVVSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+// Get subsequence of signature table.
+static ArrayRef<PrototypeDescriptor>
+ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
+ switch (K) {
+ case IntrinsicKind::RVV:
+ return ArrayRef(&RVVSignatureTable[Index], Length);
+ case IntrinsicKind::SIFIVE_VECTOR:
+ return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
+ }
+ llvm_unreachable("Unhandled IntrinsicKind");
+}
+
+static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
+ QualType QT;
+ switch (Type->getScalarType()) {
+ case ScalarTypeKind::Void:
+ QT = Context.VoidTy;
+ break;
+ case ScalarTypeKind::Size_t:
+ QT = Context.getSizeType();
+ break;
+ case ScalarTypeKind::Ptrdiff_t:
+ QT = Context.getPointerDiffType();
+ break;
+ case ScalarTypeKind::UnsignedLong:
+ QT = Context.UnsignedLongTy;
+ break;
+ case ScalarTypeKind::SignedLong:
+ QT = Context.LongTy;
+ break;
+ case ScalarTypeKind::Boolean:
+ QT = Context.BoolTy;
+ break;
+ case ScalarTypeKind::SignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
+ break;
+ case ScalarTypeKind::BFloat:
+ QT = Context.BFloat16Ty;
+ break;
+ case ScalarTypeKind::Float:
+ switch (Type->getElementBitwidth()) {
+ case 64:
+ QT = Context.DoubleTy;
+ break;
+ case 32:
+ QT = Context.FloatTy;
+ break;
+ case 16:
+ QT = Context.Float16Ty;
+ break;
+ default:
+ llvm_unreachable("Unsupported floating point width.");
+ }
+ break;
+ case Invalid:
+ case Undefined:
+ llvm_unreachable("Unhandled type.");
+ }
+ if (Type->isVector()) {
+ if (Type->isTuple())
+ QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
+ else
+ QT = Context.getScalableVectorType(QT, *Type->getScale());
+ }
+
+ if (Type->isConstant())
+ QT = Context.getConstType(QT);
+
+ // Transform the type to a pointer as the last step, if necessary.
+ if (Type->isPointer())
+ QT = Context.getPointerType(QT);
+
+ return QT;
+}
+
+namespace {
+class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
+private:
+ Sema &S;
+ ASTContext &Context;
+ RVVTypeCache TypeCache;
+ bool ConstructedRISCVVBuiltins;
+ bool ConstructedRISCVSiFiveVectorBuiltins;
+
+ // List of all RVV intrinsic.
+ std::vector<RVVIntrinsicDef> IntrinsicList;
+ // Mapping function name to index of IntrinsicList.
+ StringMap<uint16_t> Intrinsics;
+ // Mapping function name to RVVOverloadIntrinsicDef.
+ StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
+
+
+ // Create RVVIntrinsicDef.
+ void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMask,
+ RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
+
+ // Create FunctionDecl for a vector intrinsic.
+ void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP, uint32_t Index,
+ bool IsOverload);
+
+ void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
+ IntrinsicKind K);
+
+public:
+ RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
+ ConstructedRISCVVBuiltins = false;
+ ConstructedRISCVSiFiveVectorBuiltins = false;
+ }
+
+ // Initialize IntrinsicList
+ void InitIntrinsicList() override;
+
+ // Create RISC-V vector intrinsic and insert into symbol table if found, and
+ // return true, otherwise return false.
+ bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) override;
+};
+} // namespace
+
+void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
+ ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
+ {"64bit", RVV_REQ_RV64},
+ {"xsfvcp", RVV_REQ_Xsfvcp},
+ {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
+ {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
+ {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
+ {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
+ {"zvbb", RVV_REQ_Zvbb},
+ {"zvbc", RVV_REQ_Zvbc},
+ {"zvkb", RVV_REQ_Zvkb},
+ {"zvkg", RVV_REQ_Zvkg},
+ {"zvkned", RVV_REQ_Zvkned},
+ {"zvknha", RVV_REQ_Zvknha},
+ {"zvknhb", RVV_REQ_Zvknhb},
+ {"zvksed", RVV_REQ_Zvksed},
+ {"zvksh", RVV_REQ_Zvksh},
+ {"zvfbfwma", RVV_REQ_Zvfbfwma},
+ {"zvfbfmin", RVV_REQ_Zvfbfmin},
+ {"experimental", RVV_REQ_Experimental}};
+
+ // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
+ // in RISCVVEmitter.cpp.
+ for (auto &Record : Recs) {
+ // Check requirements.
+ if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
+ return (Record.RequiredExtensions & Item.second) == Item.second &&
+ !TI.hasFeature(Item.first);
+ }))
+ continue;
+
+ // Create Intrinsics for each type and LMUL.
+ BasicType BaseType = BasicType::Unknown;
+ ArrayRef<PrototypeDescriptor> BasicProtoSeq =
+ ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
+ ArrayRef<PrototypeDescriptor> SuffixProto =
+ ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
+ ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
+ K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
+
+ PolicyScheme UnMaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
+ PolicyScheme MaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
+
+ const Policy DefaultPolicy;
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
+ if (Record.HasMasked)
+ ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
+ Record.IsTuple);
+
+ bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
+ bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
+ SmallVector<Policy> SupportedUnMaskedPolicies =
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
+ SmallVector<Policy> SupportedMaskedPolicies =
+ RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
+ Record.HasMaskPolicy);
+
+ for (unsigned int TypeRangeMaskShift = 0;
+ TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
+ ++TypeRangeMaskShift) {
+ unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
+ BaseType = static_cast<BasicType>(BaseTypeI);
+
+ if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
+ continue;
+
+ if (BaseType == BasicType::Float16) {
+ if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
+ if (!TI.hasFeature("zvfhmin"))
+ continue;
+ } else if (!TI.hasFeature("zvfh")) {
+ continue;
+ }
+ }
+
+ // Expanded with different LMUL.
+ for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
+ if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
+ continue;
+
+ std::optional<RVVTypes> Types =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
+
+ // Ignored to create new intrinsic if there are any illegal types.
+ if (!Types.has_value())
+ continue;
+
+ std::string SuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, SuffixProto);
+ std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
+
+ // Create non-masked intrinsic.
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
+ UnMaskedHasPolicy, DefaultPolicy);
+
+ // Create non-masked policy intrinsic.
+ if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
+ for (auto P : SupportedUnMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, P, Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
+ P);
+ }
+ }
+ if (!Record.HasMasked)
+ continue;
+ // Create masked intrinsic.
+ std::optional<RVVTypes> MaskTypes =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
+ *MaskTypes, MaskedHasPolicy, DefaultPolicy);
+ if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
+ continue;
+ // Create masked policy intrinsic.
+ for (auto P : SupportedMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, P,
+ Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
+ }
+ } // End for different LMUL
+ } // End for different TypeRange
+ }
+}
+
+void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+
+ if (S.RISCV().DeclareRVVBuiltins && !ConstructedRISCVVBuiltins) {
+ ConstructedRISCVVBuiltins = true;
+ ConstructRVVIntrinsics(RVVIntrinsicRecords,
+ IntrinsicKind::RVV);
+ }
+ if (S.RISCV().DeclareSiFiveVectorBuiltins &&
+ !ConstructedRISCVSiFiveVectorBuiltins) {
+ ConstructedRISCVSiFiveVectorBuiltins = true;
+ ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
+ IntrinsicKind::SIFIVE_VECTOR);
+ }
+}
+
+// Compute name and signatures for intrinsic with practical types.
+void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
+ const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
+ bool HasPolicy, Policy PolicyAttrs) {
+ // Function name, e.g. vadd_vv_i32m1.
+ std::string Name = Record.Name;
+ if (!SuffixStr.empty())
+ Name += "_" + SuffixStr.str();
+
+ // Overloaded function name, e.g. vadd.
+ std::string OverloadedName;
+ if (!Record.OverloadedName)
+ OverloadedName = StringRef(Record.Name).split("_").first.str();
+ else
+ OverloadedName = Record.OverloadedName;
+ if (!OverloadedSuffixStr.empty())
+ OverloadedName += "_" + OverloadedSuffixStr.str();
+
+ // clang built-in function name, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName = std::string(Record.Name);
+
+ RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
+ OverloadedName, PolicyAttrs,
+ Record.HasFRMRoundModeOp);
+
+ // Put into IntrinsicList.
+ uint16_t Index = IntrinsicList.size();
+ assert(IntrinsicList.size() == (size_t)Index &&
+ "Intrinsics indices overflow.");
+ IntrinsicList.push_back({BuiltinName, Signature});
+
+ // Creating mapping to Intrinsics.
+ Intrinsics.insert({Name, Index});
+
+ // Get the RVVOverloadIntrinsicDef.
+ RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
+ OverloadIntrinsics[OverloadedName];
+
+ // And added the index.
+ OverloadIntrinsicDef.Indexes.push_back(Index);
+}
+
+void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP,
+ uint32_t Index,
+ bool IsOverload) {
+ ASTContext &Context = S.Context;
+ RVVIntrinsicDef &IDef = IntrinsicList[Index];
+ RVVTypes Sigs = IDef.Signature;
+ size_t SigLength = Sigs.size();
+ RVVType *ReturnType = Sigs[0];
+ QualType RetType = RVVType2Qual(Context, ReturnType);
+ SmallVector<QualType, 8> ArgTypes;
+ QualType BuiltinFuncType;
+
+ // Skip return type, and convert RVVType to QualType for arguments.
+ for (size_t i = 1; i < SigLength; ++i)
+ ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
+
+ FunctionProtoType::ExtProtoInfo PI(
+ Context.getDefaultCallingConvention(false, false, true));
+
+ PI.Variadic = false;
+
+ SourceLocation Loc = LR.getNameLoc();
+ BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+
+ FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
+ Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
+ SC_Extern, S.getCurFPFeatures().isFPConstrained(),
+ /*isInlineSpecified*/ false,
+ /*hasWrittenPrototype*/ true);
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
+ SmallVector<ParmVarDecl *, 8> ParmList;
+ for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
+ FP->getParamType(IParm), nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, IParm);
+ ParmList.push_back(Parm);
+ }
+ RVVIntrinsicDecl->setParams(ParmList);
+
+ // Add function attributes.
+ if (IsOverload)
+ RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
+
+ // Setup alias to __builtin_rvv_*
+ IdentifierInfo &IntrinsicII =
+ PP.getIdentifierTable().get("__builtin_rvv_" + IDef.BuiltinName);
+ RVVIntrinsicDecl->addAttr(
+ BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
+
+ // Add to symbol table.
+ LR.addDecl(RVVIntrinsicDecl);
+}
+
+bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP) {
+ StringRef Name = II->getName();
+ if (!Name.consume_front("__riscv_"))
+ return false;
+
+ // Lookup the function name from the overload intrinsics first.
+ auto OvIItr = OverloadIntrinsics.find(Name);
+ if (OvIItr != OverloadIntrinsics.end()) {
+ const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
+ for (auto Index : OvIntrinsicDef.Indexes)
+ CreateRVVIntrinsicDecl(LR, II, PP, Index,
+ /*IsOverload*/ true);
+
+ // If we added overloads, need to resolve the lookup result.
+ LR.resolveKind();
+ return true;
+ }
+
+ // Lookup the function name from the intrinsics.
+ auto Itr = Intrinsics.find(Name);
+ if (Itr != Intrinsics.end()) {
+ CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
+ /*IsOverload*/ false);
+ return true;
+ }
+
+ // It's not an RVV intrinsics.
+ return false;
+}
+
+namespace clang {
+std::unique_ptr<clang::sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S) {
+ return std::make_unique<RISCVIntrinsicManagerImpl>(S);
+}
+
+bool SemaRISCV::CheckLMUL(CallExpr *TheCall, unsigned ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaRef.BuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ int64_t Val = Result.getSExtValue();
+ if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
+ << Arg->getSourceRange();
+}
+
+static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
+ Sema &S, QualType Type, int EGW) {
+ assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
+
+ // LMUL * VLEN >= EGW
+ ASTContext::BuiltinVectorTypeInfo Info =
+ S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
+ unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
+ unsigned MinElemCount = Info.EC.getKnownMinValue();
+
+ unsigned EGS = EGW / ElemSize;
+ // If EGS is less than or equal to the minimum number of elements, then the
+ // type is valid.
+ if (EGS <= MinElemCount)
+ return false;
+
+ // Otherwise, we need vscale to be at least EGS / MinElemCont.
+ assert(EGS % MinElemCount == 0);
+ unsigned VScaleFactor = EGS / MinElemCount;
+ // Vscale is VLEN/RVVBitsPerBlock.
+ unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
+ std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
+ if (!TI.hasFeature(RequiredExt))
+ return S.Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_type_requires_extension) << Type << RequiredExt;
+
+ return false;
+}
+
+bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ ASTContext &Context = getASTContext();
+ // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+ // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
+ switch (BuiltinID) {
+ default:
+ break;
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+ ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
+ TheCall->getType()->castAs<BuiltinType>());
+
+ if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "v";
+
+ break;
+ }
+ }
+
+ switch (BuiltinID) {
+ case RISCVVector::BI__builtin_rvv_vsetvli:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 3) ||
+ CheckLMUL(TheCall, 2);
+ case RISCVVector::BI__builtin_rvv_vsetvlimax:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ CheckLMUL(TheCall, 1);
+ case RISCVVector::BI__builtin_rvv_vget_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (VecInfo.NumVectors != 1) // vget for tuple type
+ MaxIndex = VecInfo.NumVectors;
+ else // vget for non-tuple type
+ MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ case RISCVVector::BI__builtin_rvv_vset_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (ResVecInfo.NumVectors != 1) // vset for tuple type
+ MaxIndex = ResVecInfo.NumVectors;
+ else // vset fo non-tuple type
+ MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ // Vector Crypto
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, 128) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 256) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, 128);
+ }
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ QualType Op3Type = TheCall->getArg(2)->getType();
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
+ uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
+ if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
+
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op3Type, ElemSize * 4);
+ }
+
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
+ // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15) ||
+ CheckLMUL(TheCall, 5);
+ case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
+ // bit_27_26, bit_11_7, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
+ // bit_27_26, bit_24_20, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
+ // bit_27_26, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
+ // bit_27_26, vd, vs2, simm5
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
+ // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ CheckLMUL(TheCall, 5);
+ case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
+ // bit_27_26, bit_11_7, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
+ // bit_27_26, bit_24-20, xs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
+ // bit_27_26, vd, vs2, xs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
+ // bit_27_26, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
+ // bit_27_26, vd, vs2, xs1/vs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
+ // bit_26, bit_11_7, vs2, fs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
+ // bit_26, vd, vs2, fs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
+ // bit_26, vs2, fs1
+ return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // Check if byteselect is in [0, 3]
+ case RISCV::BI__builtin_riscv_aes32dsi:
+ case RISCV::BI__builtin_riscv_aes32dsmi:
+ case RISCV::BI__builtin_riscv_aes32esi:
+ case RISCV::BI__builtin_riscv_aes32esmi:
+ case RISCV::BI__builtin_riscv_sm4ks:
+ case RISCV::BI__builtin_riscv_sm4ed:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ // Check if rnum is in [0, 10]
+ case RISCV::BI__builtin_riscv_aes64ks1i:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 10);
+ // Check if value range for vxrm is in [0, 3]
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx:
+ case RISCVVector::BI__builtin_rvv_vasub_vv:
+ case RISCVVector::BI__builtin_rvv_vasub_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vssra_vv:
+ case RISCVVector::BI__builtin_rvv_vssra_vx:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 4);
+ case RISCV::BI__builtin_riscv_ntl_load:
+ case RISCV::BI__builtin_riscv_ntl_store:
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
+ BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
+ "Unexpected RISC-V nontemporal load/store builtin!");
+ bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
+ unsigned NumArgs = IsStore ? 3 : 2;
+
+ if (SemaRef.checkArgCountAtLeast(TheCall, NumArgs - 1))
+ return true;
+
+ if (SemaRef.checkArgCountAtMost(TheCall, NumArgs))
+ return true;
+
+ // Domain value should be compile-time constant.
+ // 2 <= domain <= 5
+ if (TheCall->getNumArgs() == NumArgs &&
+ SemaRef.BuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
+ return true;
+
+ Expr *PointerArg = TheCall->getArg(0);
+ ExprResult PointerArgResult =
+ SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return true;
+ PointerArg = PointerArgResult.get();
+
+ const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
+ if (!PtrType) {
+ Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ QualType ValType = PtrType->getPointeeType();
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
+ Diag(DRE->getBeginLoc(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (!IsStore) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ ExprResult ValArg = TheCall->getArg(1);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+
+ TheCall->setArg(1, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return false;
+ }
+
+ return false;
+}
+
+void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap) {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ SemaRef.Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
+ unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
+ unsigned MinElts = Info.EC.getKnownMinValue();
+
+ if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
+ !FeatureMap.lookup("zve64d"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
+ // least zve64x
+ else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
+ MinElts == 1) &&
+ !FeatureMap.lookup("zve64x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
+ !FeatureMap.lookup("zvfhmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ << Ty << "zvfh or zvfhmin";
+ else if (Info.ElementType->isBFloat16Type() &&
+ !FeatureMap.lookup("experimental-zvfbfmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
+ else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
+ !FeatureMap.lookup("zve32f"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ // Given that caller already checked isRVVType() before calling this function,
+ // if we don't have at least zve32x supported, then we need to emit error.
+ else if (!FeatureMap.lookup("zve32x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+}
+
+/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
+/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
+/// VLS type) allowed?
+///
+/// This will also return false if the two given types do not make sense from
+/// the perspective of RVV bitcasts.
+bool SemaRISCV::isValidRVVBitcast(QualType srcTy, QualType destTy) {
+ assert(srcTy->isVectorType() || destTy->isVectorType());
+
+ auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
+ if (!FirstType->isRVVSizelessBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
+ };
+
+ return ValidScalableConversion(srcTy, destTy) ||
+ ValidScalableConversion(destTy, srcTy);
+}
+
+SemaRISCV::SemaRISCV(Sema& S) : SemaBase(S) {}
+
+} // namespace clang
diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
deleted file mode 100644
index 26e13e87b1d6b..0000000000000
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ /dev/null
@@ -1,504 +0,0 @@
-//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements name lookup for RISC-V vector intrinsic.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/Basic/Builtins.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Sema/Lookup.h"
-#include "clang/Sema/RISCVIntrinsicManager.h"
-#include "clang/Sema/Sema.h"
-#include "clang/Support/RISCVVIntrinsicUtils.h"
-#include "llvm/ADT/SmallVector.h"
-#include <optional>
-#include <string>
-#include <vector>
-
-using namespace llvm;
-using namespace clang;
-using namespace clang::RISCV;
-
-using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
-
-namespace {
-
-// Function definition of a RVV intrinsic.
-struct RVVIntrinsicDef {
- /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
- std::string BuiltinName;
-
- /// Function signature, first element is return type.
- RVVTypes Signature;
-};
-
-struct RVVOverloadIntrinsicDef {
- // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
- SmallVector<uint16_t, 8> Indexes;
-};
-
-} // namespace
-
-static const PrototypeDescriptor RVVSignatureTable[] = {
-#define DECL_SIGNATURE_TABLE
-#include "clang/Basic/riscv_vector_builtin_sema.inc"
-#undef DECL_SIGNATURE_TABLE
-};
-
-static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
-#define DECL_SIGNATURE_TABLE
-#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
-#undef DECL_SIGNATURE_TABLE
-};
-
-static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
-#define DECL_INTRINSIC_RECORDS
-#include "clang/Basic/riscv_vector_builtin_sema.inc"
-#undef DECL_INTRINSIC_RECORDS
-};
-
-static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
-#define DECL_INTRINSIC_RECORDS
-#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
-#undef DECL_INTRINSIC_RECORDS
-};
-
-// Get subsequence of signature table.
-static ArrayRef<PrototypeDescriptor>
-ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
- switch (K) {
- case IntrinsicKind::RVV:
- return ArrayRef(&RVVSignatureTable[Index], Length);
- case IntrinsicKind::SIFIVE_VECTOR:
- return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
- }
- llvm_unreachable("Unhandled IntrinsicKind");
-}
-
-static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
- QualType QT;
- switch (Type->getScalarType()) {
- case ScalarTypeKind::Void:
- QT = Context.VoidTy;
- break;
- case ScalarTypeKind::Size_t:
- QT = Context.getSizeType();
- break;
- case ScalarTypeKind::Ptrdiff_t:
- QT = Context.getPointerDiffType();
- break;
- case ScalarTypeKind::UnsignedLong:
- QT = Context.UnsignedLongTy;
- break;
- case ScalarTypeKind::SignedLong:
- QT = Context.LongTy;
- break;
- case ScalarTypeKind::Boolean:
- QT = Context.BoolTy;
- break;
- case ScalarTypeKind::SignedInteger:
- QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
- break;
- case ScalarTypeKind::UnsignedInteger:
- QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
- break;
- case ScalarTypeKind::BFloat:
- QT = Context.BFloat16Ty;
- break;
- case ScalarTypeKind::Float:
- switch (Type->getElementBitwidth()) {
- case 64:
- QT = Context.DoubleTy;
- break;
- case 32:
- QT = Context.FloatTy;
- break;
- case 16:
- QT = Context.Float16Ty;
- break;
- default:
- llvm_unreachable("Unsupported floating point width.");
- }
- break;
- case Invalid:
- case Undefined:
- llvm_unreachable("Unhandled type.");
- }
- if (Type->isVector()) {
- if (Type->isTuple())
- QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
- else
- QT = Context.getScalableVectorType(QT, *Type->getScale());
- }
-
- if (Type->isConstant())
- QT = Context.getConstType(QT);
-
- // Transform the type to a pointer as the last step, if necessary.
- if (Type->isPointer())
- QT = Context.getPointerType(QT);
-
- return QT;
-}
-
-namespace {
-class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
-private:
- Sema &S;
- ASTContext &Context;
- RVVTypeCache TypeCache;
- bool ConstructedRISCVVBuiltins;
- bool ConstructedRISCVSiFiveVectorBuiltins;
-
- // List of all RVV intrinsic.
- std::vector<RVVIntrinsicDef> IntrinsicList;
- // Mapping function name to index of IntrinsicList.
- StringMap<uint16_t> Intrinsics;
- // Mapping function name to RVVOverloadIntrinsicDef.
- StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
-
-
- // Create RVVIntrinsicDef.
- void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
- StringRef OverloadedSuffixStr, bool IsMask,
- RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
-
- // Create FunctionDecl for a vector intrinsic.
- void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
- Preprocessor &PP, uint32_t Index,
- bool IsOverload);
-
- void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
- IntrinsicKind K);
-
-public:
- RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
- ConstructedRISCVVBuiltins = false;
- ConstructedRISCVSiFiveVectorBuiltins = false;
- }
-
- // Initialize IntrinsicList
- void InitIntrinsicList() override;
-
- // Create RISC-V vector intrinsic and insert into symbol table if found, and
- // return true, otherwise return false.
- bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
- Preprocessor &PP) override;
-};
-} // namespace
-
-void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
- ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
- const TargetInfo &TI = Context.getTargetInfo();
- static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
- {"64bit", RVV_REQ_RV64},
- {"xsfvcp", RVV_REQ_Xsfvcp},
- {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
- {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
- {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
- {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
- {"zvbb", RVV_REQ_Zvbb},
- {"zvbc", RVV_REQ_Zvbc},
- {"zvkb", RVV_REQ_Zvkb},
- {"zvkg", RVV_REQ_Zvkg},
- {"zvkned", RVV_REQ_Zvkned},
- {"zvknha", RVV_REQ_Zvknha},
- {"zvknhb", RVV_REQ_Zvknhb},
- {"zvksed", RVV_REQ_Zvksed},
- {"zvksh", RVV_REQ_Zvksh},
- {"zvfbfwma", RVV_REQ_Zvfbfwma},
- {"zvfbfmin", RVV_REQ_Zvfbfmin},
- {"experimental", RVV_REQ_Experimental}};
-
- // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
- // in RISCVVEmitter.cpp.
- for (auto &Record : Recs) {
- // Check requirements.
- if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
- return (Record.RequiredExtensions & Item.second) == Item.second &&
- !TI.hasFeature(Item.first);
- }))
- continue;
-
- // Create Intrinsics for each type and LMUL.
- BasicType BaseType = BasicType::Unknown;
- ArrayRef<PrototypeDescriptor> BasicProtoSeq =
- ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
- ArrayRef<PrototypeDescriptor> SuffixProto =
- ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
- ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
- K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
-
- PolicyScheme UnMaskedPolicyScheme =
- static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
- PolicyScheme MaskedPolicyScheme =
- static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
-
- const Policy DefaultPolicy;
-
- llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
-
- llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
- if (Record.HasMasked)
- ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
- Record.IsTuple);
-
- bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
- bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
- SmallVector<Policy> SupportedUnMaskedPolicies =
- RVVIntrinsic::getSupportedUnMaskedPolicies();
- SmallVector<Policy> SupportedMaskedPolicies =
- RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
- Record.HasMaskPolicy);
-
- for (unsigned int TypeRangeMaskShift = 0;
- TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
- ++TypeRangeMaskShift) {
- unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
- BaseType = static_cast<BasicType>(BaseTypeI);
-
- if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
- continue;
-
- if (BaseType == BasicType::Float16) {
- if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
- if (!TI.hasFeature("zvfhmin"))
- continue;
- } else if (!TI.hasFeature("zvfh")) {
- continue;
- }
- }
-
- // Expanded with different LMUL.
- for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
- if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
- continue;
-
- std::optional<RVVTypes> Types =
- TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
-
- // Ignored to create new intrinsic if there are any illegal types.
- if (!Types.has_value())
- continue;
-
- std::string SuffixStr = RVVIntrinsic::getSuffixStr(
- TypeCache, BaseType, Log2LMUL, SuffixProto);
- std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
- TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
-
- // Create non-masked intrinsic.
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
- UnMaskedHasPolicy, DefaultPolicy);
-
- // Create non-masked policy intrinsic.
- if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
- for (auto P : SupportedUnMaskedPolicies) {
- llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, P, Record.IsTuple);
- std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
- BaseType, Log2LMUL, Record.NF, PolicyPrototype);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
- /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
- P);
- }
- }
- if (!Record.HasMasked)
- continue;
- // Create masked intrinsic.
- std::optional<RVVTypes> MaskTypes =
- TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
- *MaskTypes, MaskedHasPolicy, DefaultPolicy);
- if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
- continue;
- // Create masked policy intrinsic.
- for (auto P : SupportedMaskedPolicies) {
- llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
- RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, P,
- Record.IsTuple);
- std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
- BaseType, Log2LMUL, Record.NF, PolicyPrototype);
- InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
- /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
- }
- } // End for different LMUL
- } // End for different TypeRange
- }
-}
-
-void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
-
- if (S.DeclareRISCVVBuiltins && !ConstructedRISCVVBuiltins) {
- ConstructedRISCVVBuiltins = true;
- ConstructRVVIntrinsics(RVVIntrinsicRecords,
- IntrinsicKind::RVV);
- }
- if (S.DeclareRISCVSiFiveVectorBuiltins &&
- !ConstructedRISCVSiFiveVectorBuiltins) {
- ConstructedRISCVSiFiveVectorBuiltins = true;
- ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
- IntrinsicKind::SIFIVE_VECTOR);
- }
-}
-
-// Compute name and signatures for intrinsic with practical types.
-void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
- const RVVIntrinsicRecord &Record, StringRef SuffixStr,
- StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
- bool HasPolicy, Policy PolicyAttrs) {
- // Function name, e.g. vadd_vv_i32m1.
- std::string Name = Record.Name;
- if (!SuffixStr.empty())
- Name += "_" + SuffixStr.str();
-
- // Overloaded function name, e.g. vadd.
- std::string OverloadedName;
- if (!Record.OverloadedName)
- OverloadedName = StringRef(Record.Name).split("_").first.str();
- else
- OverloadedName = Record.OverloadedName;
- if (!OverloadedSuffixStr.empty())
- OverloadedName += "_" + OverloadedSuffixStr.str();
-
- // clang built-in function name, e.g. __builtin_rvv_vadd.
- std::string BuiltinName = std::string(Record.Name);
-
- RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
- OverloadedName, PolicyAttrs,
- Record.HasFRMRoundModeOp);
-
- // Put into IntrinsicList.
- uint16_t Index = IntrinsicList.size();
- assert(IntrinsicList.size() == (size_t)Index &&
- "Intrinsics indices overflow.");
- IntrinsicList.push_back({BuiltinName, Signature});
-
- // Creating mapping to Intrinsics.
- Intrinsics.insert({Name, Index});
-
- // Get the RVVOverloadIntrinsicDef.
- RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
- OverloadIntrinsics[OverloadedName];
-
- // And added the index.
- OverloadIntrinsicDef.Indexes.push_back(Index);
-}
-
-void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
- IdentifierInfo *II,
- Preprocessor &PP,
- uint32_t Index,
- bool IsOverload) {
- ASTContext &Context = S.Context;
- RVVIntrinsicDef &IDef = IntrinsicList[Index];
- RVVTypes Sigs = IDef.Signature;
- size_t SigLength = Sigs.size();
- RVVType *ReturnType = Sigs[0];
- QualType RetType = RVVType2Qual(Context, ReturnType);
- SmallVector<QualType, 8> ArgTypes;
- QualType BuiltinFuncType;
-
- // Skip return type, and convert RVVType to QualType for arguments.
- for (size_t i = 1; i < SigLength; ++i)
- ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
-
- FunctionProtoType::ExtProtoInfo PI(
- Context.getDefaultCallingConvention(false, false, true));
-
- PI.Variadic = false;
-
- SourceLocation Loc = LR.getNameLoc();
- BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
- DeclContext *Parent = Context.getTranslationUnitDecl();
-
- FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
- Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
- SC_Extern, S.getCurFPFeatures().isFPConstrained(),
- /*isInlineSpecified*/ false,
- /*hasWrittenPrototype*/ true);
-
- // Create Decl objects for each parameter, adding them to the
- // FunctionDecl.
- const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
- SmallVector<ParmVarDecl *, 8> ParmList;
- for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
- ParmVarDecl *Parm =
- ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
- FP->getParamType(IParm), nullptr, SC_None, nullptr);
- Parm->setScopeInfo(0, IParm);
- ParmList.push_back(Parm);
- }
- RVVIntrinsicDecl->setParams(ParmList);
-
- // Add function attributes.
- if (IsOverload)
- RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
-
- // Setup alias to __builtin_rvv_*
- IdentifierInfo &IntrinsicII =
- PP.getIdentifierTable().get("__builtin_rvv_" + IDef.BuiltinName);
- RVVIntrinsicDecl->addAttr(
- BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
-
- // Add to symbol table.
- LR.addDecl(RVVIntrinsicDecl);
-}
-
-bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
- IdentifierInfo *II,
- Preprocessor &PP) {
- StringRef Name = II->getName();
- if (!Name.consume_front("__riscv_"))
- return false;
-
- // Lookup the function name from the overload intrinsics first.
- auto OvIItr = OverloadIntrinsics.find(Name);
- if (OvIItr != OverloadIntrinsics.end()) {
- const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
- for (auto Index : OvIntrinsicDef.Indexes)
- CreateRVVIntrinsicDecl(LR, II, PP, Index,
- /*IsOverload*/ true);
-
- // If we added overloads, need to resolve the lookup result.
- LR.resolveKind();
- return true;
- }
-
- // Lookup the function name from the intrinsics.
- auto Itr = Intrinsics.find(Name);
- if (Itr != Intrinsics.end()) {
- CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
- /*IsOverload*/ false);
- return true;
- }
-
- // It's not an RVV intrinsics.
- return false;
-}
-
-namespace clang {
-std::unique_ptr<clang::sema::RISCVIntrinsicManager>
-CreateRISCVIntrinsicManager(Sema &S) {
- return std::make_unique<RISCVIntrinsicManagerImpl>(S);
-}
-} // namespace clang
>From 0725cc395704fb213948f57caa075763d9c3caf9 Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Sun, 19 May 2024 12:29:09 +0300
Subject: [PATCH 2/5] Run clang-format
---
clang/include/clang/Sema/Sema.h | 2 +-
clang/include/clang/Sema/SemaRISCV.h | 2 +-
clang/lib/Sema/SemaChecking.cpp | 14 +++++++-------
clang/lib/Sema/SemaDecl.cpp | 2 +-
clang/lib/Sema/SemaRISCV.cpp | 22 ++++++++++++----------
5 files changed, 22 insertions(+), 20 deletions(-)
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 8a2427d26a104..e01cca9f380a6 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -2051,7 +2051,7 @@ class Sema final : public SemaBase {
bool checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount);
bool checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount);
bool checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
- unsigned MaxArgCount);
+ unsigned MaxArgCount);
bool checkArgCount(CallExpr *Call, unsigned DesiredArgCount);
private:
diff --git a/clang/include/clang/Sema/SemaRISCV.h b/clang/include/clang/Sema/SemaRISCV.h
index e71c999e15513..3eee79fcd5ec7 100644
--- a/clang/include/clang/Sema/SemaRISCV.h
+++ b/clang/include/clang/Sema/SemaRISCV.h
@@ -30,7 +30,7 @@ class SemaRISCV : public SemaBase {
bool CheckLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
- CallExpr *TheCall);
+ CallExpr *TheCall);
void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
const llvm::StringMap<bool> &FeatureMap);
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 5d8cbe7e32048..8c08bf7510c85 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -121,8 +121,7 @@ static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
/// Checks that a call expression's argument count is at least the desired
/// number. This is useful when doing custom type-checking on a variadic
/// function. Returns true on error.
-bool Sema::checkArgCountAtLeast(CallExpr *Call,
- unsigned MinArgCount) {
+bool Sema::checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount >= MinArgCount)
return false;
@@ -139,8 +138,7 @@ bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
unsigned ArgCount = Call->getNumArgs();
if (ArgCount <= MaxArgCount)
return false;
- return Diag(Call->getEndLoc(),
- diag::err_typecheck_call_too_many_args_at_most)
+ return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_many_args_at_most)
<< 0 /*function call*/ << MaxArgCount << ArgCount
<< /*is non object*/ 0 << Call->getSourceRange();
}
@@ -149,7 +147,7 @@ bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
/// is useful when doing custom type-checking on a variadic function. Returns
/// true on error.
bool Sema::checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
- unsigned MaxArgCount) {
+ unsigned MaxArgCount) {
return checkArgCountAtLeast(Call, MinArgCount) ||
checkArgCountAtMost(Call, MaxArgCount);
}
@@ -2626,7 +2624,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_classify_type:
- if (checkArgCount(TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1))
+ return true;
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__builtin_complex:
@@ -2634,7 +2633,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_constant_p: {
- if (checkArgCount(TheCall, 1)) return true;
+ if (checkArgCount(TheCall, 1))
+ return true;
ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
if (Arg.isInvalid()) return true;
TheCall->setArg(0, Arg.get());
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 74aa63b371fd7..d884ca446b7d9 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -8928,7 +8928,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
RISCV().checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
- CallerFeatureMap);
+ CallerFeatureMap);
}
}
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 8aae622e6abc4..40a07da0f2a61 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -170,7 +170,6 @@ class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
// Mapping function name to RVVOverloadIntrinsicDef.
StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
-
// Create RVVIntrinsicDef.
void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
StringRef OverloadedSuffixStr, bool IsMask,
@@ -346,7 +345,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
/*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
}
} // End for different LMUL
- } // End for different TypeRange
+ } // End for different TypeRange
}
}
@@ -354,8 +353,7 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
if (S.RISCV().DeclareRVVBuiltins && !ConstructedRISCVVBuiltins) {
ConstructedRISCVVBuiltins = true;
- ConstructRVVIntrinsics(RVVIntrinsicRecords,
- IntrinsicKind::RVV);
+ ConstructRVVIntrinsics(RVVIntrinsicRecords, IntrinsicKind::RVV);
}
if (S.RISCV().DeclareSiFiveVectorBuiltins &&
!ConstructedRISCVSiFiveVectorBuiltins) {
@@ -550,7 +548,8 @@ static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
if (!TI.hasFeature(RequiredExt))
return S.Diag(TheCall->getBeginLoc(),
- diag::err_riscv_type_requires_extension) << Type << RequiredExt;
+ diag::err_riscv_type_requires_extension)
+ << Type << RequiredExt;
return false;
}
@@ -728,8 +727,10 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
diag::err_riscv_builtin_requires_extension)
<< /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
- return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type, ElemSize * 4) ||
- CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type, ElemSize * 4) ||
+ return CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op1Type,
+ ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op2Type,
+ ElemSize * 4) ||
CheckInvalidVLENandLMUL(TI, TheCall, SemaRef, Op3Type, ElemSize * 4);
}
@@ -1355,7 +1356,8 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
ExprResult ValArg = TheCall->getArg(1);
InitializedEntity Entity = InitializedEntity::InitializeParameter(
Context, ValType, /*consume*/ false);
- ValArg = SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ ValArg =
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
if (ValArg.isInvalid())
return true;
@@ -1368,7 +1370,7 @@ bool SemaRISCV::CheckBuiltinFunctionCall(const TargetInfo &TI,
}
void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
- const llvm::StringMap<bool> &FeatureMap) {
+ const llvm::StringMap<bool> &FeatureMap) {
ASTContext::BuiltinVectorTypeInfo Info =
SemaRef.Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
unsigned EltSize = SemaRef.Context.getTypeSize(Info.ElementType);
@@ -1420,6 +1422,6 @@ bool SemaRISCV::isValidRVVBitcast(QualType srcTy, QualType destTy) {
ValidScalableConversion(destTy, srcTy);
}
-SemaRISCV::SemaRISCV(Sema& S) : SemaBase(S) {}
+SemaRISCV::SemaRISCV(Sema &S) : SemaBase(S) {}
} // namespace clang
>From 4d815b231ff4744099f0f21db8adc7562da14c6c Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Mon, 20 May 2024 20:10:22 +0300
Subject: [PATCH 3/5] Fix `SemaRISCV.cpp` file header
---
clang/lib/Sema/SemaRISCV.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 40a07da0f2a61..403a420d1d0ba 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1,4 +1,4 @@
-//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
+//==------- SemaRISCV.cpp ------- RISC-V target-specific routines ----------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements name lookup for RISC-V vector intrinsic.
+// This file implements semantic analysis functions specific to RISC-V.
//
//===----------------------------------------------------------------------===//
>From e6b99223c4d74c08741400eb3e2c0c651a10c360 Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Mon, 20 May 2024 20:17:14 +0300
Subject: [PATCH 4/5] Fix `SemaRISCV.cpp` file header again
---
clang/lib/Sema/SemaRISCV.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 403a420d1d0ba..ea6e3f75490bc 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1,4 +1,4 @@
-//==------- SemaRISCV.cpp ------- RISC-V target-specific routines ----------==//
+//===------ SemaRISCV.cpp ------- RISC-V target-specific routines ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
>From 7b6504e60adc7b2d443b3a263ea810c9c1d1b603 Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Tue, 21 May 2024 10:39:13 +0300
Subject: [PATCH 5/5] Add `-*- C++ -*-` to `SemaRISCV.h` file header
---
clang/include/clang/Sema/SemaRISCV.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/include/clang/Sema/SemaRISCV.h b/clang/include/clang/Sema/SemaRISCV.h
index 3eee79fcd5ec7..b6dd81f8d4d80 100644
--- a/clang/include/clang/Sema/SemaRISCV.h
+++ b/clang/include/clang/Sema/SemaRISCV.h
@@ -1,4 +1,4 @@
-//===----- SemaRISCV.h ------- RISC-V target-specific routines ------------===//
+//===----- SemaRISCV.h ---- RISC-V target-specific routines ---*- C++ -*---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
More information about the cfe-commits
mailing list