[clang] [llvm] adding clang codegen (PR #109331)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 19 13:45:33 PDT 2024
https://github.com/joaosaffran created https://github.com/llvm/llvm-project/pull/109331
None
>From 770a7032b8d7fe67c1e44813e7d11c5e3ea81a41 Mon Sep 17 00:00:00 2001
From: Joao Saffran <jderezende at microsoft.com>
Date: Thu, 19 Sep 2024 00:13:51 +0000
Subject: [PATCH] adding clang codegen
---
clang/include/clang/Basic/Builtins.td | 6 +
clang/lib/CodeGen/CGBuiltin.cpp | 4821 ++++++++++-------
clang/lib/CodeGen/CGHLSLRuntime.h | 1 -
clang/lib/Headers/hlsl/hlsl_intrinsics.h | 20 +
clang/lib/Sema/SemaHLSL.cpp | 56 +-
.../builtins/asuint-splitdouble.hlsl | 9 +
llvm/include/llvm/IR/IntrinsicsDirectX.td | 5 +
llvm/lib/Target/DirectX/DXIL.td | 1 +
.../Target/DirectX/DXILIntrinsicExpansion.cpp | 13 +
9 files changed, 2840 insertions(+), 2092 deletions(-)
create mode 100644 clang/test/CodeGenHLSL/builtins/asuint-splitdouble.hlsl
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 6cf03d27055cd9..c8f0903246b7cd 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -4782,6 +4782,12 @@ def HLSLStep: LangBuiltin<"HLSL_LANG"> {
let Prototype = "void(...)";
}
+def HLSLAsUintSplitDouble: LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_asuint_splitdouble"];
+ let Attributes = [NoThrow, Const];
+ let Prototype = "void(...)";
+}
+
// Builtins for XRay.
def XRayCustomEvent : Builtin {
let Spellings = ["__xray_customevent"];
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a52e880a764252..ec850024190b17 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -11,12 +11,14 @@
//===----------------------------------------------------------------------===//
#include "ABIInfo.h"
+#include "Address.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGRecordLayout.h"
+#include "CGValue.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
@@ -27,6 +29,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
@@ -34,12 +37,16 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -58,6 +65,7 @@
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
+#include "llvm/IR/Type.h"
#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
@@ -65,6 +73,7 @@
#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/X86TargetParser.h"
+#include <cassert>
#include <optional>
#include <sstream>
@@ -164,15 +173,15 @@ llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
}
llvm::FunctionType *Ty =
- cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
-static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, llvm::IntegerType *IntType) {
+static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T,
+ llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
@@ -182,8 +191,8 @@ static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
-static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, llvm::Type *ResultType) {
+static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T,
+ llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
@@ -217,8 +226,8 @@ static Value *MakeBinaryAtomicValue(
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
- assert(CGF.getContext().hasSameUnqualifiedType(T,
- E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(
+ T, E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
Address DestAddr = CheckAtomicAlignment(CGF, E);
@@ -265,13 +274,12 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
- const CallExpr *E,
- Instruction::BinaryOps Op,
+ const CallExpr *E, Instruction::BinaryOps Op,
bool Invert = false) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
- assert(CGF.getContext().hasSameUnqualifiedType(T,
- E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(
+ T, E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
Address DestAddr = CheckAtomicAlignment(CGF, E);
@@ -347,8 +355,8 @@ static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
/// function MakeAtomicCmpXchgValue since it expects the arguments to be
/// already swapped.
-static
-Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
+static Value *EmitAtomicCmpXchgForMSIntrin(
+ CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(
@@ -364,9 +372,9 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
// For Release ordering, the failure ordering should be Monotonic.
- auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
- AtomicOrdering::Monotonic :
- SuccessOrdering;
+ auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
+ ? AtomicOrdering::Monotonic
+ : SuccessOrdering;
// The atomic instruction is marked volatile for consistency with MSVC. This
// blocks the few atomics optimizations that LLVM has. If we want to optimize
@@ -443,7 +451,8 @@ static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
}
-static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
+static Value *EmitAtomicIncrementValue(
+ CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
@@ -493,15 +502,16 @@ static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type. Depending on mode, this may be a constrained
// floating-point intrinsic.
-static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
- const CallExpr *E, unsigned IntrinsicID,
- unsigned ConstrainedIntrinsicID) {
+static Value *
+emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
- return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
+ return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
@@ -510,19 +520,20 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
// Emit an intrinsic that has 2 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
-static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
- const CallExpr *E, unsigned IntrinsicID,
- unsigned ConstrainedIntrinsicID) {
+static Value *
+emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
- return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
+ return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, { Src0, Src1 });
+ return CGF.Builder.CreateCall(F, {Src0, Src1});
}
}
@@ -547,9 +558,10 @@ static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
// Emit an intrinsic that has 3 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
-static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
- const CallExpr *E, unsigned IntrinsicID,
- unsigned ConstrainedIntrinsicID) {
+static Value *
+emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
@@ -557,10 +569,10 @@ static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
- return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
+ return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1, Src2});
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
+ return CGF.Builder.CreateCall(F, {Src0, Src1, Src2});
}
}
@@ -599,8 +611,7 @@ Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF, const CallExpr *E,
}
// Emit an intrinsic that has 1 float or double operand, and 1 integer.
-static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
+static Value *emitFPIntBuiltin(CodeGenFunction &CGF, const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
@@ -747,11 +758,11 @@ static Value *emitRangedBuiltin(CodeGenFunction &CGF, unsigned IntrinsicID,
}
namespace {
- struct WidthAndSignedness {
- unsigned Width;
- bool Signed;
- };
-}
+struct WidthAndSignedness {
+ unsigned Width;
+ bool Signed;
+};
+} // namespace
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
@@ -810,11 +821,9 @@ getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
}
-llvm::Value *
-CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType,
- llvm::Value *EmittedE,
- bool IsDynamic) {
+llvm::Value *CodeGenFunction::evaluateOrEmitBuiltinObjectSize(
+ const Expr *E, unsigned Type, llvm::IntegerType *ResType,
+ llvm::Value *EmittedE, bool IsDynamic) {
uint64_t ObjectSize;
if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
@@ -1069,10 +1078,11 @@ CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
/// and we wouldn't otherwise try to reference a pass_object_size parameter,
/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
-llvm::Value *
-CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType,
- llvm::Value *EmittedE, bool IsDynamic) {
+llvm::Value *CodeGenFunction::emitBuiltinObjectSize(const Expr *E,
+ unsigned Type,
+ llvm::IntegerType *ResType,
+ llvm::Value *EmittedE,
+ bool IsDynamic) {
// We need to reference an argument if the pointer is a parameter with the
// pass_object_size attribute.
if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
@@ -1190,16 +1200,19 @@ BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
static char bitActionToX86BTCode(BitTest::ActionKind A) {
switch (A) {
- case BitTest::TestOnly: return '\0';
- case BitTest::Complement: return 'c';
- case BitTest::Reset: return 'r';
- case BitTest::Set: return 's';
+ case BitTest::TestOnly:
+ return '\0';
+ case BitTest::Complement:
+ return 'c';
+ case BitTest::Reset:
+ return 'r';
+ case BitTest::Set:
+ return 's';
}
llvm_unreachable("invalid action");
}
-static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
- BitTest BT,
+static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT,
const CallExpr *E, Value *BitBase,
Value *BitPos) {
char Action = bitActionToX86BTCode(BT.Action);
@@ -1236,11 +1249,16 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
switch (I) {
- case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
- case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
- case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
- case BitTest::Release: return llvm::AtomicOrdering::Release;
- case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
+ case BitTest::Unlocked:
+ return llvm::AtomicOrdering::NotAtomic;
+ case BitTest::Sequential:
+ return llvm::AtomicOrdering::SequentiallyConsistent;
+ case BitTest::Acquire:
+ return llvm::AtomicOrdering::Acquire;
+ case BitTest::Release:
+ return llvm::AtomicOrdering::Release;
+ case BitTest::NoFence:
+ return llvm::AtomicOrdering::Monotonic;
}
llvm_unreachable("invalid interlocking");
}
@@ -1376,11 +1394,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
}
namespace {
-enum class MSVCSetJmpKind {
- _setjmpex,
- _setjmp3,
- _setjmp
-};
+enum class MSVCSetJmpKind { _setjmpex, _setjmp3, _setjmp };
}
/// MSVC handles setjmp a bit differently on different platforms. On every
@@ -1992,12 +2006,12 @@ struct CallObjCArcUse final : EHScopeStack::Cleanup {
CGF.EmitARCIntrinsicUse(object);
}
};
-}
+} // namespace
Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
BuiltinCheckKind Kind) {
- assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
- && "Unsupported builtin check kind");
+ assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
+ "Unsupported builtin check kind");
Value *ArgValue = EmitScalarExpr(E);
if (!SanOpts.has(SanitizerKind::Builtin))
@@ -2230,8 +2244,8 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
unsigned ArgValSize =
CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
- llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
- ArgValSize);
+ llvm::IntegerType *IntTy =
+ llvm::Type::getIntNTy(getLLVMContext(), ArgValSize);
ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
CanQualType ArgTy = getOSLogArgType(Ctx, Size);
// If ArgVal has type x86_fp80, zero-extend ArgVal.
@@ -2366,8 +2380,7 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
if (ResultInfo.Width < OpWidth) {
- auto IntMax =
- llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
+ auto IntMax = llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
@@ -2436,75 +2449,75 @@ RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
// Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
Function *F = CGM.getIntrinsic(IID, Ty);
- return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
+ return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt}));
}
// Map math builtins for long-double to f128 version.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
switch (BuiltinID) {
-#define MUTATE_LDBL(func) \
- case Builtin::BI__builtin_##func##l: \
+#define MUTATE_LDBL(func) \
+ case Builtin::BI__builtin_##func##l: \
return Builtin::BI__builtin_##func##f128;
- MUTATE_LDBL(sqrt)
- MUTATE_LDBL(cbrt)
- MUTATE_LDBL(fabs)
- MUTATE_LDBL(log)
- MUTATE_LDBL(log2)
- MUTATE_LDBL(log10)
- MUTATE_LDBL(log1p)
- MUTATE_LDBL(logb)
- MUTATE_LDBL(exp)
- MUTATE_LDBL(exp2)
- MUTATE_LDBL(expm1)
- MUTATE_LDBL(fdim)
- MUTATE_LDBL(hypot)
- MUTATE_LDBL(ilogb)
- MUTATE_LDBL(pow)
- MUTATE_LDBL(fmin)
- MUTATE_LDBL(fmax)
- MUTATE_LDBL(ceil)
- MUTATE_LDBL(trunc)
- MUTATE_LDBL(rint)
- MUTATE_LDBL(nearbyint)
- MUTATE_LDBL(round)
- MUTATE_LDBL(floor)
- MUTATE_LDBL(lround)
- MUTATE_LDBL(llround)
- MUTATE_LDBL(lrint)
- MUTATE_LDBL(llrint)
- MUTATE_LDBL(fmod)
- MUTATE_LDBL(modf)
- MUTATE_LDBL(nan)
- MUTATE_LDBL(nans)
- MUTATE_LDBL(inf)
- MUTATE_LDBL(fma)
- MUTATE_LDBL(sin)
- MUTATE_LDBL(cos)
- MUTATE_LDBL(tan)
- MUTATE_LDBL(sinh)
- MUTATE_LDBL(cosh)
- MUTATE_LDBL(tanh)
- MUTATE_LDBL(asin)
- MUTATE_LDBL(acos)
- MUTATE_LDBL(atan)
- MUTATE_LDBL(asinh)
- MUTATE_LDBL(acosh)
- MUTATE_LDBL(atanh)
- MUTATE_LDBL(atan2)
- MUTATE_LDBL(erf)
- MUTATE_LDBL(erfc)
- MUTATE_LDBL(ldexp)
- MUTATE_LDBL(frexp)
- MUTATE_LDBL(huge_val)
- MUTATE_LDBL(copysign)
- MUTATE_LDBL(nextafter)
- MUTATE_LDBL(nexttoward)
- MUTATE_LDBL(remainder)
- MUTATE_LDBL(remquo)
- MUTATE_LDBL(scalbln)
- MUTATE_LDBL(scalbn)
- MUTATE_LDBL(tgamma)
- MUTATE_LDBL(lgamma)
+ MUTATE_LDBL(sqrt)
+ MUTATE_LDBL(cbrt)
+ MUTATE_LDBL(fabs)
+ MUTATE_LDBL(log)
+ MUTATE_LDBL(log2)
+ MUTATE_LDBL(log10)
+ MUTATE_LDBL(log1p)
+ MUTATE_LDBL(logb)
+ MUTATE_LDBL(exp)
+ MUTATE_LDBL(exp2)
+ MUTATE_LDBL(expm1)
+ MUTATE_LDBL(fdim)
+ MUTATE_LDBL(hypot)
+ MUTATE_LDBL(ilogb)
+ MUTATE_LDBL(pow)
+ MUTATE_LDBL(fmin)
+ MUTATE_LDBL(fmax)
+ MUTATE_LDBL(ceil)
+ MUTATE_LDBL(trunc)
+ MUTATE_LDBL(rint)
+ MUTATE_LDBL(nearbyint)
+ MUTATE_LDBL(round)
+ MUTATE_LDBL(floor)
+ MUTATE_LDBL(lround)
+ MUTATE_LDBL(llround)
+ MUTATE_LDBL(lrint)
+ MUTATE_LDBL(llrint)
+ MUTATE_LDBL(fmod)
+ MUTATE_LDBL(modf)
+ MUTATE_LDBL(nan)
+ MUTATE_LDBL(nans)
+ MUTATE_LDBL(inf)
+ MUTATE_LDBL(fma)
+ MUTATE_LDBL(sin)
+ MUTATE_LDBL(cos)
+ MUTATE_LDBL(tan)
+ MUTATE_LDBL(sinh)
+ MUTATE_LDBL(cosh)
+ MUTATE_LDBL(tanh)
+ MUTATE_LDBL(asin)
+ MUTATE_LDBL(acos)
+ MUTATE_LDBL(atan)
+ MUTATE_LDBL(asinh)
+ MUTATE_LDBL(acosh)
+ MUTATE_LDBL(atanh)
+ MUTATE_LDBL(atan2)
+ MUTATE_LDBL(erf)
+ MUTATE_LDBL(erfc)
+ MUTATE_LDBL(ldexp)
+ MUTATE_LDBL(frexp)
+ MUTATE_LDBL(huge_val)
+ MUTATE_LDBL(copysign)
+ MUTATE_LDBL(nextafter)
+ MUTATE_LDBL(nexttoward)
+ MUTATE_LDBL(remainder)
+ MUTATE_LDBL(remquo)
+ MUTATE_LDBL(scalbln)
+ MUTATE_LDBL(scalbn)
+ MUTATE_LDBL(tgamma)
+ MUTATE_LDBL(lgamma)
#undef MUTATE_LDBL
default:
return BuiltinID;
@@ -2548,11 +2561,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
- return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
- Result.Val.getInt()));
+ return RValue::get(
+ llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()));
if (Result.Val.isFloat())
- return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
- Result.Val.getFloat()));
+ return RValue::get(
+ llvm::ConstantFP::get(getLLVMContext(), Result.Val.getFloat()));
}
// If current long-double semantics is IEEE 128-bit, replace math builtins
@@ -2587,8 +2600,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// using the '#pragma float_control(precise, off)', and
// attribute opt-none hasn't been seen.
bool ErrnoOverridenToFalseWithOpt =
- ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone &&
- CGM.getCodeGenOpts().OptimizationLevel != 0;
+ ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0;
// There are LLVM math intrinsics/instructions corresponding to math library
// functions except the LLVM op will never set errno while the math library
@@ -2597,8 +2610,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// LLVM counterparts if the call is marked 'const' (known to never set errno).
// In case FP exceptions are enabled, the experimental versions of the
// intrinsics model those.
- bool ConstAlways =
- getContext().BuiltinInfo.isConst(BuiltinID);
+ bool ConstAlways = getContext().BuiltinInfo.isConst(BuiltinID);
// There's a special case with the fma builtins where they are always const
// if the target environment is GNU or the target is OS is Windows and we're
@@ -2703,9 +2715,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ceilf16:
case Builtin::BI__builtin_ceill:
case Builtin::BI__builtin_ceilf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::ceil,
- Intrinsic::experimental_constrained_ceil));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::ceil, Intrinsic::experimental_constrained_ceil));
case Builtin::BIcopysign:
case Builtin::BIcopysignf:
@@ -2726,9 +2737,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_cosf16:
case Builtin::BI__builtin_cosl:
case Builtin::BI__builtin_cosf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::cos,
- Intrinsic::experimental_constrained_cos));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::cos, Intrinsic::experimental_constrained_cos));
case Builtin::BIcosh:
case Builtin::BIcoshf:
@@ -2749,9 +2759,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_expf16:
case Builtin::BI__builtin_expl:
case Builtin::BI__builtin_expf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::exp,
- Intrinsic::experimental_constrained_exp));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::exp, Intrinsic::experimental_constrained_exp));
case Builtin::BIexp2:
case Builtin::BIexp2f:
@@ -2761,9 +2770,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_exp2f16:
case Builtin::BI__builtin_exp2l:
case Builtin::BI__builtin_exp2f128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::exp2,
- Intrinsic::experimental_constrained_exp2));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::exp2, Intrinsic::experimental_constrained_exp2));
case Builtin::BI__builtin_exp10:
case Builtin::BI__builtin_exp10f:
case Builtin::BI__builtin_exp10f16:
@@ -2794,9 +2802,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_floorf16:
case Builtin::BI__builtin_floorl:
case Builtin::BI__builtin_floorf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::floor,
- Intrinsic::experimental_constrained_floor));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::floor,
+ Intrinsic::experimental_constrained_floor));
case Builtin::BIfma:
case Builtin::BIfmaf:
@@ -2806,9 +2814,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaf16:
case Builtin::BI__builtin_fmal:
case Builtin::BI__builtin_fmaf128:
- return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::fma,
- Intrinsic::experimental_constrained_fma));
+ return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::fma, Intrinsic::experimental_constrained_fma));
case Builtin::BIfmax:
case Builtin::BIfmaxf:
@@ -2818,9 +2825,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
case Builtin::BI__builtin_fmaxf128:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::maxnum,
- Intrinsic::experimental_constrained_maxnum));
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::maxnum,
+ Intrinsic::experimental_constrained_maxnum));
case Builtin::BIfmin:
case Builtin::BIfminf:
@@ -2830,9 +2837,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
case Builtin::BI__builtin_fminf128:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::minnum,
- Intrinsic::experimental_constrained_minnum));
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::minnum,
+ Intrinsic::experimental_constrained_minnum));
// fmod() is a special-case. It maps to the frem instruction rather than an
// LLVM intrinsic.
@@ -2858,9 +2865,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_logf16:
case Builtin::BI__builtin_logl:
case Builtin::BI__builtin_logf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::log,
- Intrinsic::experimental_constrained_log));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::log, Intrinsic::experimental_constrained_log));
case Builtin::BIlog10:
case Builtin::BIlog10f:
@@ -2870,9 +2876,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log10f16:
case Builtin::BI__builtin_log10l:
case Builtin::BI__builtin_log10f128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::log10,
- Intrinsic::experimental_constrained_log10));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::log10,
+ Intrinsic::experimental_constrained_log10));
case Builtin::BIlog2:
case Builtin::BIlog2f:
@@ -2882,9 +2888,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_log2f16:
case Builtin::BI__builtin_log2l:
case Builtin::BI__builtin_log2f128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::log2,
- Intrinsic::experimental_constrained_log2));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::log2, Intrinsic::experimental_constrained_log2));
case Builtin::BInearbyint:
case Builtin::BInearbyintf:
@@ -2893,9 +2898,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_nearbyintf:
case Builtin::BI__builtin_nearbyintl:
case Builtin::BI__builtin_nearbyintf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::nearbyint,
- Intrinsic::experimental_constrained_nearbyint));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::nearbyint,
+ Intrinsic::experimental_constrained_nearbyint));
case Builtin::BIpow:
case Builtin::BIpowf:
@@ -2905,9 +2910,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_powf16:
case Builtin::BI__builtin_powl:
case Builtin::BI__builtin_powf128:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::pow,
- Intrinsic::experimental_constrained_pow));
+ return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::pow, Intrinsic::experimental_constrained_pow));
case Builtin::BIrint:
case Builtin::BIrintf:
@@ -2917,9 +2921,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_rintf16:
case Builtin::BI__builtin_rintl:
case Builtin::BI__builtin_rintf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::rint,
- Intrinsic::experimental_constrained_rint));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::rint, Intrinsic::experimental_constrained_rint));
case Builtin::BIround:
case Builtin::BIroundf:
@@ -2929,9 +2932,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_roundf16:
case Builtin::BI__builtin_roundl:
case Builtin::BI__builtin_roundf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::round,
- Intrinsic::experimental_constrained_round));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::round,
+ Intrinsic::experimental_constrained_round));
case Builtin::BIroundeven:
case Builtin::BIroundevenf:
@@ -2941,9 +2944,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_roundevenf16:
case Builtin::BI__builtin_roundevenl:
case Builtin::BI__builtin_roundevenf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::roundeven,
- Intrinsic::experimental_constrained_roundeven));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::roundeven,
+ Intrinsic::experimental_constrained_roundeven));
case Builtin::BIsin:
case Builtin::BIsinf:
@@ -2953,9 +2956,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sinf16:
case Builtin::BI__builtin_sinl:
case Builtin::BI__builtin_sinf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::sin,
- Intrinsic::experimental_constrained_sin));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::sin, Intrinsic::experimental_constrained_sin));
case Builtin::BIsinh:
case Builtin::BIsinhf:
@@ -3013,9 +3015,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_truncf16:
case Builtin::BI__builtin_truncl:
case Builtin::BI__builtin_truncf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::trunc,
- Intrinsic::experimental_constrained_trunc));
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::trunc,
+ Intrinsic::experimental_constrained_trunc));
case Builtin::BIlround:
case Builtin::BIlroundf:
@@ -3096,7 +3098,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
};
switch (BuiltinIDIfNoAsmLabel) {
- default: break;
+ default:
+ break;
case Builtin::BI__builtin___CFStringMakeConstantString:
case Builtin::BI__builtin___NSStringMakeConstantString:
return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
@@ -3174,13 +3177,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// is available as debuginfo is needed to preserve user-level
// access pattern.
if (!getDebugInfo()) {
- CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
+ CGM.Error(E->getExprLoc(),
+ "using builtin_preserve_access_index() without -g");
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
// Nested builtin_preserve_access_index() not supported
if (IsInPreservedAIRegion) {
- CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
+ CGM.Error(E->getExprLoc(),
+ "nested builtin_preserve_access_index() not supported");
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
@@ -3216,8 +3221,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_ctzs:
@@ -3301,8 +3306,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_parity:
@@ -3318,8 +3323,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *Tmp = Builder.CreateCall(F, ArgValue);
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
return RValue::get(Result);
}
case Builtin::BI__lzcnt16:
@@ -3333,8 +3338,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
return RValue::get(Result);
}
case Builtin::BI__popcnt16:
@@ -3409,7 +3414,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const Expr *Ptr = E->getArg(0);
Value *PtrValue = EmitScalarExpr(Ptr);
Value *OffsetValue =
- (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
+ (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
@@ -3550,7 +3555,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false);
return RValue::get(Result);
}
case Builtin::BI__builtin_dynamic_object_size:
@@ -3568,10 +3573,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
- RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
- llvm::ConstantInt::get(Int32Ty, 0);
- Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
- llvm::ConstantInt::get(Int32Ty, 3);
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1))
+ : llvm::ConstantInt::get(Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2))
+ : llvm::ConstantInt::get(Int32Ty, 3);
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
Builder.CreateCall(F, {Address, RW, Locality, Data});
@@ -3630,12 +3635,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
Src0->getType());
- return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
+ return RValue::get(Builder.CreateConstrainedFPCall(F, {Src0, Src1}));
}
- Function *F = CGM.getIntrinsic(Intrinsic::powi,
- { Src0->getType(), Src1->getType() });
- return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::powi, {Src0->getType(), Src1->getType()});
+ return RValue::get(Builder.CreateCall(F, {Src0, Src1}));
}
case Builtin::BI__builtin_frexpl: {
// Linux PPC will not be adding additional PPCDoubleDouble support.
@@ -3663,7 +3668,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *RHS = EmitScalarExpr(E->getArg(1));
switch (BuiltinID) {
- default: llvm_unreachable("Unknown ordered comparison");
+ default:
+ llvm_unreachable("Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
break;
@@ -4050,8 +4056,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F);
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
return RValue::get(Result);
}
@@ -4073,14 +4079,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
BasicBlock *Begin = Builder.GetInsertBlock();
BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
Builder.SetInsertPoint(End);
- PHINode *Result =
- Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
- "fpclassify_result");
+ PHINode *Result = Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
+ "fpclassify_result");
// if (V==0) return FP_ZERO
Builder.SetInsertPoint(Begin);
- Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
- "iszero");
+ Value *IsZero =
+ Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), "iszero");
Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
Builder.CreateCondBr(IsZero, End, NotZero);
@@ -4097,9 +4102,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// if (fabs(V) == infinity) return FP_INFINITY
Builder.SetInsertPoint(NotNan);
Value *VAbs = EmitFAbs(*this, V);
- Value *IsInf =
- Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
- "isinf");
+ Value *IsInf = Builder.CreateFCmpOEQ(
+ VAbs, ConstantFP::getInfinity(V->getType()), "isinf");
Value *InfLiteral = EmitScalarExpr(E->getArg(1));
BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
Builder.CreateCondBr(IsInf, End, NotInf);
@@ -4109,12 +4113,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.SetInsertPoint(NotInf);
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
- Value *IsNormal =
- Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
- "isnormal");
- Value *NormalResult =
- Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)));
+ Value *IsNormal = Builder.CreateFCmpUGE(
+ VAbs, ConstantFP::get(V->getContext(), Smallest), "isnormal");
+ Value *NormalResult = Builder.CreateSelect(
+ IsNormal, EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)));
Builder.CreateBr(End);
Result->addIncoming(NormalResult, NotInf);
@@ -4148,8 +4150,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
if (AAS != EAS) {
llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
- return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
- EAS, Ty));
+ return RValue::get(
+ getTargetHooks().performAddrSpaceCast(*this, AI, AAS, EAS, Ty));
}
return RValue::get(AI);
}
@@ -4170,8 +4172,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
if (AAS != EAS) {
llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
- return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
- EAS, Ty));
+ return RValue::get(
+ getTargetHooks().performAddrSpaceCast(*this, AI, AAS, EAS, Ty));
}
return RValue::get(AI);
}
@@ -4255,8 +4257,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
- DestAddr, SrcAddr, SizeVal);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestAddr, SrcAddr,
+ SizeVal);
return RValue::get(DestAddr, *this);
}
@@ -4290,8 +4292,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- Builder.getInt8Ty());
+ Value *ByteVal =
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
@@ -4321,8 +4323,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- Builder.getInt8Ty());
+ Value *ByteVal =
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest, *this);
@@ -4442,12 +4444,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
int32_t Offset = 0;
Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
- return RValue::get(Builder.CreateCall(F,
- llvm::ConstantInt::get(Int32Ty, Offset)));
+ return RValue::get(
+ Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
- Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
- getContext().UnsignedIntTy);
+ Value *Depth = ConstantEmitter(*this).emitAbstract(
+ E->getArg(0), getContext().UnsignedIntTy);
Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -4456,8 +4458,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
}
case Builtin::BI__builtin_frame_address: {
- Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
- getContext().UnsignedIntTy);
+ Value *Depth = ConstantEmitter(*this).emitAbstract(
+ E->getArg(0), getContext().UnsignedIntTy);
Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -4472,8 +4474,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
- llvm::IntegerType *Ty
- = cast<llvm::IntegerType>(ConvertType(E->getType()));
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
@@ -4766,8 +4767,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- return EmitCall(FuncInfo, CGCallee::forDirect(Func),
- ReturnValueSlot(), Args);
+ return EmitCall(FuncInfo, CGCallee::forDirect(Func), ReturnValueSlot(),
+ Args);
}
case Builtin::BI__atomic_test_and_set: {
@@ -4818,12 +4819,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[5] = {
- createBasicBlock("monotonic", CurFn),
- createBasicBlock("acquire", CurFn),
- createBasicBlock("release", CurFn),
- createBasicBlock("acqrel", CurFn),
- createBasicBlock("seqcst", CurFn)
- };
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn), createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn), createBasicBlock("seqcst", CurFn)};
llvm::AtomicOrdering Orders[5] = {
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
@@ -4872,10 +4870,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
default: // invalid order
Store->setOrdering(llvm::AtomicOrdering::Monotonic);
break;
- case 3: // memory_order_release
+ case 3: // memory_order_release
Store->setOrdering(llvm::AtomicOrdering::Release);
break;
- case 5: // memory_order_seq_cst
+ case 5: // memory_order_seq_cst
Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
break;
}
@@ -4884,11 +4882,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
- llvm::BasicBlock *BBs[3] = {
- createBasicBlock("monotonic", CurFn),
- createBasicBlock("release", CurFn),
- createBasicBlock("seqcst", CurFn)
- };
+ llvm::BasicBlock *BBs[3] = {createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)};
llvm::AtomicOrdering Orders[3] = {
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
llvm::AtomicOrdering::SequentiallyConsistent};
@@ -4928,17 +4924,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case 0: // memory_order_relaxed
default: // invalid order
break;
- case 1: // memory_order_consume
- case 2: // memory_order_acquire
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
break;
- case 3: // memory_order_release
+ case 3: // memory_order_release
Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
break;
- case 4: // memory_order_acq_rel
+ case 4: // memory_order_acq_rel
Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
break;
- case 5: // memory_order_seq_cst
+ case 5: // memory_order_seq_cst
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
break;
}
@@ -5062,7 +5058,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
- default: llvm_unreachable("Unknown multiprecision builtin id.");
+ default:
+ llvm_unreachable("Unknown multiprecision builtin id.");
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
@@ -5081,13 +5078,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Construct our resulting LLVM IR expression.
llvm::Value *Carry1;
- llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
- X, Y, Carry1);
+ llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry1);
llvm::Value *Carry2;
- llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
- Sum1, Carryin, Carry2);
- llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
- X->getType());
+ llvm::Value *Sum2 =
+ EmitOverflowIntrinsic(*this, IntrinsicId, Sum1, Carryin, Carry2);
+ llvm::Value *CarryOut =
+ Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), X->getType());
Builder.CreateStore(CarryOut, CarryOutPtr);
return RValue::get(Sum2);
}
@@ -5181,7 +5177,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Finally, store the result using the pointer.
bool isVolatile =
- ResultArg->getType()->getPointeeType().isVolatileQualified();
+ ResultArg->getType()->getPointeeType().isVolatileQualified();
Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
return RValue::get(Overflow);
@@ -5216,7 +5212,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
- default: llvm_unreachable("Unknown overflow builtin id.");
+ default:
+ llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
@@ -5249,7 +5246,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
break;
}
-
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
Builder.CreateStore(Sum, SumOutPtr);
@@ -5284,9 +5280,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_call_with_static_chain: {
const CallExpr *Call = cast<CallExpr>(E->getArg(0));
const Expr *Chain = E->getArg(1);
- return EmitCall(Call->getCallee()->getType(),
- EmitCallee(Call->getCallee()), Call, ReturnValue,
- EmitScalarExpr(Chain));
+ return EmitCall(Call->getCallee()->getType(), EmitCallee(Call->getCallee()),
+ Call, ReturnValue, EmitScalarExpr(Chain));
}
case Builtin::BI_InterlockedExchange8:
case Builtin::BI_InterlockedExchange16:
@@ -5307,19 +5302,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Exchange = Builder.CreatePtrToInt(Exchange, IntType);
llvm::Value *Comparand =
- Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
+ Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
auto Ordering =
- BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
- AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
+ BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf
+ ? AtomicOrdering::Monotonic
+ : AtomicOrdering::SequentiallyConsistent;
auto Result = Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
Ordering, Ordering);
Result->setVolatile(true);
- return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
- 0),
- RTy));
+ return RValue::get(
+ Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, 0), RTy));
}
case Builtin::BI_InterlockedCompareExchange8:
case Builtin::BI_InterlockedCompareExchange16:
@@ -5566,8 +5561,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
: "__write_pipe_4";
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
- Int32Ty, Int32Ty};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty,
+ I8PTy, Int32Ty, Int32Ty};
Value *Arg2 = EmitScalarExpr(E->getArg(2)),
*Arg3 = EmitScalarExpr(E->getArg(3));
llvm::FunctionType *FTy = llvm::FunctionType::get(
@@ -5707,8 +5702,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
auto NewCall =
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
- return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
- ConvertType(E->getType())));
+ return RValue::get(
+ Builder.CreateBitOrPointerCast(NewCall, ConvertType(E->getType())));
}
// OpenCL v2.0, s6.13.17 - Enqueue kernel function.
@@ -5781,8 +5776,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
for (unsigned I = First; I < NumArgs; ++I) {
auto *Index = llvm::ConstantInt::get(IntTy, I - First);
- auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
- {Zero, Index});
+ auto *GEP =
+ Builder.CreateGEP(Tmp.getElementType(), TmpPtr, {Zero, Index});
if (I == First)
ElemPtr = GEP;
auto *V =
@@ -6124,7 +6119,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// can move this up to the beginning of the function.
checkTargetFeatures(E, FD);
- if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
+ if (unsigned VectorWidth =
+ getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
// See if we have a target specific intrinsic.
@@ -6145,7 +6141,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
if (IntrinsicID != Intrinsic::not_intrinsic) {
- SmallVector<Value*, 16> Args;
+ SmallVector<Value *, 16> Args;
// Find out if any arguments are required to be integer constant
// expressions.
@@ -6195,7 +6191,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (RetTy != V->getType()) {
// XXX - vector of pointers?
if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
- if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
+ if (PtrTy->getAddressSpace() !=
+ V->getType()->getPointerAddressSpace()) {
V = Builder.CreateAddrSpaceCast(
V, llvm::PointerType::get(getLLVMContext(),
PtrTy->getAddressSpace()));
@@ -6357,7 +6354,8 @@ static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::BFloat16:
if (AllowBFloatArgsAndRet)
- return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
+ return llvm::FixedVectorType::get(CGF->BFloatTy,
+ V1Ty ? 1 : (4 << IsQuad));
else
return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
@@ -6409,9 +6407,9 @@ Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
return EmitNeonSplat(V, C, EC);
}
-Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
- const char *name,
- unsigned shift, bool rightshift) {
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value *> &Ops,
+ const char *name, unsigned shift,
+ bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j) {
@@ -6507,996 +6505,1359 @@ struct ARMVectorIntrinsicInfo {
};
} // end anonymous namespace
-#define NEONMAP0(NameBase) \
- { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
-
-#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
- { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
- Intrinsic::LLVMIntrinsic, 0, TypeModifier }
-
-#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
- { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
- Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
- TypeModifier }
-
-static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
- NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
- NEONMAP0(splat_lane_v),
- NEONMAP0(splat_laneq_v),
- NEONMAP0(splatq_lane_v),
- NEONMAP0(splatq_laneq_v),
- NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
- NEONMAP1(vabs_v, arm_neon_vabs, 0),
- NEONMAP1(vabsq_v, arm_neon_vabs, 0),
- NEONMAP0(vadd_v),
- NEONMAP0(vaddhn_v),
- NEONMAP0(vaddq_v),
- NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
- NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
- NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
- NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
- NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
- NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
- NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
- NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
- NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
- NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
- NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
- NEONMAP1(vcadd_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcadd_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcadd_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcadd_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot270_f64, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot90_f64, arm_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcage_v, arm_neon_vacge, 0),
- NEONMAP1(vcageq_v, arm_neon_vacge, 0),
- NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
- NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
- NEONMAP1(vcale_v, arm_neon_vacge, 0),
- NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
- NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
- NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
- NEONMAP0(vceqz_v),
- NEONMAP0(vceqzq_v),
- NEONMAP0(vcgez_v),
- NEONMAP0(vcgezq_v),
- NEONMAP0(vcgtz_v),
- NEONMAP0(vcgtzq_v),
- NEONMAP0(vclez_v),
- NEONMAP0(vclezq_v),
- NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
- NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
- NEONMAP0(vcltz_v),
- NEONMAP0(vcltzq_v),
- NEONMAP1(vclz_v, ctlz, Add1ArgType),
- NEONMAP1(vclzq_v, ctlz, Add1ArgType),
- NEONMAP1(vcnt_v, ctpop, Add1ArgType),
- NEONMAP1(vcntq_v, ctpop, Add1ArgType),
- NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
- NEONMAP0(vcvt_f16_s16),
- NEONMAP0(vcvt_f16_u16),
- NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
- NEONMAP0(vcvt_f32_v),
- NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
- NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvt_s16_f16),
- NEONMAP0(vcvt_s32_v),
- NEONMAP0(vcvt_s64_v),
- NEONMAP0(vcvt_u16_f16),
- NEONMAP0(vcvt_u32_v),
- NEONMAP0(vcvt_u64_v),
- NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
- NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
- NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
- NEONMAP0(vcvtq_f16_s16),
- NEONMAP0(vcvtq_f16_u16),
- NEONMAP0(vcvtq_f32_v),
- NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
- NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvtq_s16_f16),
- NEONMAP0(vcvtq_s32_v),
- NEONMAP0(vcvtq_s64_v),
- NEONMAP0(vcvtq_u16_f16),
- NEONMAP0(vcvtq_u32_v),
- NEONMAP0(vcvtq_u64_v),
- NEONMAP1(vdot_s32, arm_neon_sdot, 0),
- NEONMAP1(vdot_u32, arm_neon_udot, 0),
- NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
- NEONMAP1(vdotq_u32, arm_neon_udot, 0),
- NEONMAP0(vext_v),
- NEONMAP0(vextq_v),
- NEONMAP0(vfma_v),
- NEONMAP0(vfmaq_v),
- NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
- NEONMAP0(vld1_dup_v),
- NEONMAP1(vld1_v, arm_neon_vld1, 0),
- NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
- NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
- NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
- NEONMAP0(vld1q_dup_v),
- NEONMAP1(vld1q_v, arm_neon_vld1, 0),
- NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
- NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
- NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
- NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
- NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
- NEONMAP1(vld2_v, arm_neon_vld2, 0),
- NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
- NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
- NEONMAP1(vld2q_v, arm_neon_vld2, 0),
- NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
- NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
- NEONMAP1(vld3_v, arm_neon_vld3, 0),
- NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
- NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
- NEONMAP1(vld3q_v, arm_neon_vld3, 0),
- NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
- NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
- NEONMAP1(vld4_v, arm_neon_vld4, 0),
- NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
- NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
- NEONMAP1(vld4q_v, arm_neon_vld4, 0),
- NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
- NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
- NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
- NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
- NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
- NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
- NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
- NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
- NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
- NEONMAP0(vmovl_v),
- NEONMAP0(vmovn_v),
- NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
- NEONMAP0(vmull_v),
- NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
- NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
- NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
- NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
- NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
- NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
- NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
- NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
- NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
- NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
- NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
- NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
- NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
- NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
- NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
- NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
- NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
- NEONMAP1(vqrdmlah_s16, arm_neon_vqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlah_s32, arm_neon_vqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlahq_s16, arm_neon_vqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlahq_s32, arm_neon_vqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlsh_s16, arm_neon_vqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmlsh_s32, arm_neon_vqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmlshq_s16, arm_neon_vqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmlshq_s32, arm_neon_vqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
- NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
- NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
- NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
- NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
- NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
- NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
- NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
- NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
- NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
- NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
- NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
- NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
- NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
- NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
- NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
- NEONMAP0(vrndi_v),
- NEONMAP0(vrndiq_v),
- NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
- NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
- NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
- NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
- NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
- NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
- NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
- NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
- NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
- NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
- NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
- NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
- NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
- NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
- NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
- NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
- NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
- NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
- NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
- NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
- NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
- NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
- NEONMAP0(vshl_n_v),
- NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshll_n_v),
- NEONMAP0(vshlq_n_v),
- NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshr_n_v),
- NEONMAP0(vshrn_n_v),
- NEONMAP0(vshrq_n_v),
- NEONMAP1(vst1_v, arm_neon_vst1, 0),
- NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
- NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
- NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
- NEONMAP1(vst1q_v, arm_neon_vst1, 0),
- NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
- NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
- NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
- NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
- NEONMAP1(vst2_v, arm_neon_vst2, 0),
- NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
- NEONMAP1(vst2q_v, arm_neon_vst2, 0),
- NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
- NEONMAP1(vst3_v, arm_neon_vst3, 0),
- NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
- NEONMAP1(vst3q_v, arm_neon_vst3, 0),
- NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
- NEONMAP1(vst4_v, arm_neon_vst4, 0),
- NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
- NEONMAP1(vst4q_v, arm_neon_vst4, 0),
- NEONMAP0(vsubhn_v),
- NEONMAP0(vtrn_v),
- NEONMAP0(vtrnq_v),
- NEONMAP0(vtst_v),
- NEONMAP0(vtstq_v),
- NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
- NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
- NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
- NEONMAP0(vuzp_v),
- NEONMAP0(vuzpq_v),
- NEONMAP0(vzip_v),
- NEONMAP0(vzipq_v)
-};
+#define NEONMAP0(NameBase) \
+ { #NameBase, NEON::BI__builtin_neon_##NameBase, 0, 0, 0 }
+
+#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+ { \
+#NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
+ TypeModifier \
+ }
+
+#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
+ { \
+#NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, \
+ Intrinsic::AltLLVMIntrinsic, TypeModifier \
+ }
+
+static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap[] = {
+ NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
+ NEONMAP0(splat_lane_v),
+ NEONMAP0(splat_laneq_v),
+ NEONMAP0(splatq_lane_v),
+ NEONMAP0(splatq_laneq_v),
+ NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vabs_v, arm_neon_vabs, 0),
+ NEONMAP1(vabsq_v, arm_neon_vabs, 0),
+ NEONMAP0(vadd_v),
+ NEONMAP0(vaddhn_v),
+ NEONMAP0(vaddq_v),
+ NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
+ NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
+ NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
+ NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
+ NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
+ NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
+ NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
+ NEONMAP1(vcadd_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f64, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f64, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcage_v, arm_neon_vacge, 0),
+ NEONMAP1(vcageq_v, arm_neon_vacge, 0),
+ NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcale_v, arm_neon_vacge, 0),
+ NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
+ NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
+ NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
+ NEONMAP0(vceqz_v),
+ NEONMAP0(vceqzq_v),
+ NEONMAP0(vcgez_v),
+ NEONMAP0(vcgezq_v),
+ NEONMAP0(vcgtz_v),
+ NEONMAP0(vcgtzq_v),
+ NEONMAP0(vclez_v),
+ NEONMAP0(vclezq_v),
+ NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
+ NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
+ NEONMAP0(vcltz_v),
+ NEONMAP0(vcltzq_v),
+ NEONMAP1(vclz_v, ctlz, Add1ArgType),
+ NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+ NEONMAP1(vcnt_v, ctpop, Add1ArgType),
+ NEONMAP1(vcntq_v, ctpop, Add1ArgType),
+ NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
+ NEONMAP0(vcvt_f16_s16),
+ NEONMAP0(vcvt_f16_u16),
+ NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
+ NEONMAP0(vcvt_f32_v),
+ NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
+ NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvt_s16_f16),
+ NEONMAP0(vcvt_s32_v),
+ NEONMAP0(vcvt_s64_v),
+ NEONMAP0(vcvt_u16_f16),
+ NEONMAP0(vcvt_u32_v),
+ NEONMAP0(vcvt_u64_v),
+ NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
+ NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
+ NEONMAP0(vcvtq_f16_s16),
+ NEONMAP0(vcvtq_f16_u16),
+ NEONMAP0(vcvtq_f32_v),
+ NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
+ NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvtq_s16_f16),
+ NEONMAP0(vcvtq_s32_v),
+ NEONMAP0(vcvtq_s64_v),
+ NEONMAP0(vcvtq_u16_f16),
+ NEONMAP0(vcvtq_u32_v),
+ NEONMAP0(vcvtq_u64_v),
+ NEONMAP1(vdot_s32, arm_neon_sdot, 0),
+ NEONMAP1(vdot_u32, arm_neon_udot, 0),
+ NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
+ NEONMAP1(vdotq_u32, arm_neon_udot, 0),
+ NEONMAP0(vext_v),
+ NEONMAP0(vextq_v),
+ NEONMAP0(vfma_v),
+ NEONMAP0(vfmaq_v),
+ NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP0(vld1_dup_v),
+ NEONMAP1(vld1_v, arm_neon_vld1, 0),
+ NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
+ NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
+ NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
+ NEONMAP0(vld1q_dup_v),
+ NEONMAP1(vld1q_v, arm_neon_vld1, 0),
+ NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
+ NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
+ NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
+ NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
+ NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
+ NEONMAP1(vld2_v, arm_neon_vld2, 0),
+ NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
+ NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
+ NEONMAP1(vld2q_v, arm_neon_vld2, 0),
+ NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
+ NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
+ NEONMAP1(vld3_v, arm_neon_vld3, 0),
+ NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
+ NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
+ NEONMAP1(vld3q_v, arm_neon_vld3, 0),
+ NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
+ NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
+ NEONMAP1(vld4_v, arm_neon_vld4, 0),
+ NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
+ NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
+ NEONMAP1(vld4q_v, arm_neon_vld4, 0),
+ NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
+ NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
+ NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
+ NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
+ NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
+ NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
+ NEONMAP0(vmovl_v),
+ NEONMAP0(vmovn_v),
+ NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
+ NEONMAP0(vmull_v),
+ NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
+ NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
+ NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
+ NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
+ NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
+ NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
+ NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
+ NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
+ NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
+ NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
+ NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
+ NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
+ NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
+ NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
+ NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
+ NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_s16, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlah_s32, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s16, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s32, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s16, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s32, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s16, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s32, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
+ NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
+ NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
+ NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
+ NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
+ NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
+ NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
+ NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
+ NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
+ NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
+ NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
+ NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP0(vrndi_v),
+ NEONMAP0(vrndiq_v),
+ NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
+ NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
+ NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
+ NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
+ NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
+ NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
+ NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
+ NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
+ NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
+ NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
+ NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
+ NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
+ NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
+ NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
+ NEONMAP0(vshl_n_v),
+ NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshll_n_v),
+ NEONMAP0(vshlq_n_v),
+ NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshr_n_v),
+ NEONMAP0(vshrn_n_v),
+ NEONMAP0(vshrq_n_v),
+ NEONMAP1(vst1_v, arm_neon_vst1, 0),
+ NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
+ NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
+ NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
+ NEONMAP1(vst1q_v, arm_neon_vst1, 0),
+ NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
+ NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
+ NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
+ NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
+ NEONMAP1(vst2_v, arm_neon_vst2, 0),
+ NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
+ NEONMAP1(vst2q_v, arm_neon_vst2, 0),
+ NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
+ NEONMAP1(vst3_v, arm_neon_vst3, 0),
+ NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
+ NEONMAP1(vst3q_v, arm_neon_vst3, 0),
+ NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
+ NEONMAP1(vst4_v, arm_neon_vst4, 0),
+ NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
+ NEONMAP1(vst4q_v, arm_neon_vst4, 0),
+ NEONMAP0(vsubhn_v),
+ NEONMAP0(vtrn_v),
+ NEONMAP0(vtrnq_v),
+ NEONMAP0(vtst_v),
+ NEONMAP0(vtstq_v),
+ NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
+ NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
+ NEONMAP0(vuzp_v),
+ NEONMAP0(vuzpq_v),
+ NEONMAP0(vzip_v),
+ NEONMAP0(vzipq_v)};
static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
- NEONMAP1(__a64_vcvtq_low_bf16_f32, aarch64_neon_bfcvtn, 0),
- NEONMAP0(splat_lane_v),
- NEONMAP0(splat_laneq_v),
- NEONMAP0(splatq_lane_v),
- NEONMAP0(splatq_laneq_v),
- NEONMAP1(vabs_v, aarch64_neon_abs, 0),
- NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
- NEONMAP0(vadd_v),
- NEONMAP0(vaddhn_v),
- NEONMAP0(vaddq_p128),
- NEONMAP0(vaddq_v),
- NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
- NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
- NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
- NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
- NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
- NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
- NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
- NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
- NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
- NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcage_v, aarch64_neon_facge, 0),
- NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
- NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
- NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
- NEONMAP1(vcale_v, aarch64_neon_facge, 0),
- NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
- NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
- NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
- NEONMAP0(vceqz_v),
- NEONMAP0(vceqzq_v),
- NEONMAP0(vcgez_v),
- NEONMAP0(vcgezq_v),
- NEONMAP0(vcgtz_v),
- NEONMAP0(vcgtzq_v),
- NEONMAP0(vclez_v),
- NEONMAP0(vclezq_v),
- NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
- NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
- NEONMAP0(vcltz_v),
- NEONMAP0(vcltzq_v),
- NEONMAP1(vclz_v, ctlz, Add1ArgType),
- NEONMAP1(vclzq_v, ctlz, Add1ArgType),
- NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
- NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
- NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
- NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
- NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType),
- NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcnt_v, ctpop, Add1ArgType),
- NEONMAP1(vcntq_v, ctpop, Add1ArgType),
- NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
- NEONMAP0(vcvt_f16_s16),
- NEONMAP0(vcvt_f16_u16),
- NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
- NEONMAP0(vcvt_f32_v),
- NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
- NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvtq_f16_s16),
- NEONMAP0(vcvtq_f16_u16),
- NEONMAP0(vcvtq_f32_v),
- NEONMAP1(vcvtq_high_bf16_f32, aarch64_neon_bfcvtn2, 0),
- NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
- NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
- NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
- NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
- NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
- NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
- NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
- NEONMAP0(vext_v),
- NEONMAP0(vextq_v),
- NEONMAP0(vfma_v),
- NEONMAP0(vfmaq_v),
- NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
- NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
- NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
- NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
- NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
- NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
- NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
- NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
- NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
- NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
- NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
- NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
- NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
- NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
- NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
- NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
- NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
- NEONMAP0(vmovl_v),
- NEONMAP0(vmovn_v),
- NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
- NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
- NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
- NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
- NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
- NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
- NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
- NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
- NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
- NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
- NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
- NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
- NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
- NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
- NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
- NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
- NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
- NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
- NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
- NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
- NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
- NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
- NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
- NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
- NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
- NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
- NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
- NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
- NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
- NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
- NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
- NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
- NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
- NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
- NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32x_f64, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32xq_f64, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd32z_f64, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd32zq_f64, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64x_f64, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64xq_f64, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType),
- NEONMAP1(vrnd64z_f64, aarch64_neon_frint64z, Add1ArgType),
- NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType),
- NEONMAP1(vrnd64zq_f64, aarch64_neon_frint64z, Add1ArgType),
- NEONMAP0(vrndi_v),
- NEONMAP0(vrndiq_v),
- NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
- NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
- NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
- NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
- NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
- NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
- NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
- NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
- NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
- NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
- NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
- NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
- NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
- NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
- NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
- NEONMAP0(vshl_n_v),
- NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshll_n_v),
- NEONMAP0(vshlq_n_v),
- NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshr_n_v),
- NEONMAP0(vshrn_n_v),
- NEONMAP0(vshrq_n_v),
- NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
- NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
- NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
- NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
- NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
- NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
- NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
- NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
- NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
- NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
- NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
- NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
- NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
- NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
- NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
- NEONMAP0(vsubhn_v),
- NEONMAP0(vtst_v),
- NEONMAP0(vtstq_v),
- NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
- NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
- NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
- NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
+ NEONMAP1(__a64_vcvtq_low_bf16_f32, aarch64_neon_bfcvtn, 0),
+ NEONMAP0(splat_lane_v),
+ NEONMAP0(splat_laneq_v),
+ NEONMAP0(splatq_lane_v),
+ NEONMAP0(splatq_laneq_v),
+ NEONMAP1(vabs_v, aarch64_neon_abs, 0),
+ NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
+ NEONMAP0(vadd_v),
+ NEONMAP0(vaddhn_v),
+ NEONMAP0(vaddq_p128),
+ NEONMAP0(vaddq_v),
+ NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
+ NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
+ NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
+ NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
+ NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
+ NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcage_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcale_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
+ NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
+ NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
+ NEONMAP0(vceqz_v),
+ NEONMAP0(vceqzq_v),
+ NEONMAP0(vcgez_v),
+ NEONMAP0(vcgezq_v),
+ NEONMAP0(vcgtz_v),
+ NEONMAP0(vcgtzq_v),
+ NEONMAP0(vclez_v),
+ NEONMAP0(vclezq_v),
+ NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
+ NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
+ NEONMAP0(vcltz_v),
+ NEONMAP0(vcltzq_v),
+ NEONMAP1(vclz_v, ctlz, Add1ArgType),
+ NEONMAP1(vclzq_v, ctlz, Add1ArgType),
+ NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcnt_v, ctpop, Add1ArgType),
+ NEONMAP1(vcntq_v, ctpop, Add1ArgType),
+ NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
+ NEONMAP0(vcvt_f16_s16),
+ NEONMAP0(vcvt_f16_u16),
+ NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
+ NEONMAP0(vcvt_f32_v),
+ NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
+ NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP0(vcvtq_f16_s16),
+ NEONMAP0(vcvtq_f16_u16),
+ NEONMAP0(vcvtq_f32_v),
+ NEONMAP1(vcvtq_high_bf16_f32, aarch64_neon_bfcvtn2, 0),
+ NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
+ NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp,
+ 0),
+ NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp,
+ 0),
+ NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
+ NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
+ NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
+ NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
+ NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
+ NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP0(vext_v),
+ NEONMAP0(vextq_v),
+ NEONMAP0(vfma_v),
+ NEONMAP0(vfmaq_v),
+ NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
+ NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
+ NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
+ NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
+ NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
+ NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
+ NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
+ NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
+ NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
+ NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
+ NEONMAP0(vmovl_v),
+ NEONMAP0(vmovn_v),
+ NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
+ NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
+ NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
+ NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
+ NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
+ NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
+ NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
+ NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
+ NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
+ NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
+ NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
+ NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
+ NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
+ NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
+ NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
+ NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
+ NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
+ NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
+ NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
+ NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
+ NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
+ NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
+ NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
+ NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
+ NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
+ NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
+ NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
+ NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
+ NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32x_f64, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32xq_f64, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32z_f64, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32zq_f64, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64x_f64, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64xq_f64, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64z_f64, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64zq_f64, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP0(vrndi_v),
+ NEONMAP0(vrndiq_v),
+ NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
+ NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
+ NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
+ NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
+ NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
+ NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
+ NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
+ NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
+ NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
+ NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
+ NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
+ NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
+ NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
+ NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
+ NEONMAP0(vshl_n_v),
+ NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshll_n_v),
+ NEONMAP0(vshlq_n_v),
+ NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl,
+ Add1ArgType | UnsignedAlts),
+ NEONMAP0(vshr_n_v),
+ NEONMAP0(vshrn_n_v),
+ NEONMAP0(vshrq_n_v),
+ NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
+ NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
+ NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
+ NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
+ NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
+ NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
+ NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
+ NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
+ NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
+ NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
+ NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
+ NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
+ NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
+ NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
+ NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
+ NEONMAP0(vsubhn_v),
+ NEONMAP0(vtst_v),
+ NEONMAP0(vtstq_v),
+ NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
+ NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
+ NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
};
static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
- NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
- NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
- NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
- NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
- NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
- NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
- NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
- NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
- NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
- NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
- NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
- NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
- NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
- NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
- NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
- NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
- NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
- NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
- NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
- NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
- NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
- NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
- NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
- NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
- NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
- NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
- NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
- NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
- NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
- NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
- NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
- NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
- NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
- NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
- NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
- NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
- NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
- NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
- NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
- NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
- NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
- NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
- NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
- NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
- NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
- NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
- NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
- NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
- NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
- NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
- NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
- NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
- NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
- NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
- NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
- NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
- NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
- NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
- NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
- NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
- NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
- NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
- NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
- NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
- NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
- // FP16 scalar intrinisics go here.
- NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
- NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
- NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
- NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
- NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
+ NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
+ NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
+ NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
+ NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
+ NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
+ NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
+ NEONMAP1(vqabsb_s8, aarch64_neon_sqabs,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP1(vqabsh_s16, aarch64_neon_sqabs,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
+ NEONMAP1(vqaddb_s8, aarch64_neon_sqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqaddb_u8, aarch64_neon_uqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
+ NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
+ NEONMAP1(vqaddh_s16, aarch64_neon_sqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqaddh_u16, aarch64_neon_uqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
+ NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
+ NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
+ NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
+ NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
+ NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
+ NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
+ NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqnegb_s8, aarch64_neon_sqneg,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqnegh_s16, aarch64_neon_sqneg,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
+ NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
+ NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
+ NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
+ NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
+ NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
+ NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
+ NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
+ NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun,
+ VectorRet | Use64BitVectors),
+ NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun,
+ VectorRet | Use64BitVectors),
+ NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlb_s8, aarch64_neon_sqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlb_u8, aarch64_neon_uqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
+ NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
+ NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlh_s16, aarch64_neon_sqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlh_u16, aarch64_neon_uqshl,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
+ NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
+ NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
+ NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
+ NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
+ NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
+ NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
+ NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
+ NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
+ NEONMAP1(vqsubb_s8, aarch64_neon_sqsub,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubb_u8, aarch64_neon_uqsub,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
+ NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
+ NEONMAP1(vqsubh_s16, aarch64_neon_sqsub,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubh_u16, aarch64_neon_uqsub,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
+ NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
+ NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
+ NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
+ NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
+ NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
+ NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
+ NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
+ NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
+ NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
+ NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
+ NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
+ NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
+ NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
+ NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
+ NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
+ NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
+ NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
+ NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd,
+ Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
+ // FP16 scalar intrinisics go here.
+ NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu,
+ AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
};
// Some intrinsics are equivalent for codegen.
static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
- { NEON::BI__builtin_neon_splat_lane_bf16, NEON::BI__builtin_neon_splat_lane_v, },
- { NEON::BI__builtin_neon_splat_laneq_bf16, NEON::BI__builtin_neon_splat_laneq_v, },
- { NEON::BI__builtin_neon_splatq_lane_bf16, NEON::BI__builtin_neon_splatq_lane_v, },
- { NEON::BI__builtin_neon_splatq_laneq_bf16, NEON::BI__builtin_neon_splatq_laneq_v, },
- { NEON::BI__builtin_neon_vabd_f16, NEON::BI__builtin_neon_vabd_v, },
- { NEON::BI__builtin_neon_vabdq_f16, NEON::BI__builtin_neon_vabdq_v, },
- { NEON::BI__builtin_neon_vabs_f16, NEON::BI__builtin_neon_vabs_v, },
- { NEON::BI__builtin_neon_vabsq_f16, NEON::BI__builtin_neon_vabsq_v, },
- { NEON::BI__builtin_neon_vcage_f16, NEON::BI__builtin_neon_vcage_v, },
- { NEON::BI__builtin_neon_vcageq_f16, NEON::BI__builtin_neon_vcageq_v, },
- { NEON::BI__builtin_neon_vcagt_f16, NEON::BI__builtin_neon_vcagt_v, },
- { NEON::BI__builtin_neon_vcagtq_f16, NEON::BI__builtin_neon_vcagtq_v, },
- { NEON::BI__builtin_neon_vcale_f16, NEON::BI__builtin_neon_vcale_v, },
- { NEON::BI__builtin_neon_vcaleq_f16, NEON::BI__builtin_neon_vcaleq_v, },
- { NEON::BI__builtin_neon_vcalt_f16, NEON::BI__builtin_neon_vcalt_v, },
- { NEON::BI__builtin_neon_vcaltq_f16, NEON::BI__builtin_neon_vcaltq_v, },
- { NEON::BI__builtin_neon_vceqz_f16, NEON::BI__builtin_neon_vceqz_v, },
- { NEON::BI__builtin_neon_vceqzq_f16, NEON::BI__builtin_neon_vceqzq_v, },
- { NEON::BI__builtin_neon_vcgez_f16, NEON::BI__builtin_neon_vcgez_v, },
- { NEON::BI__builtin_neon_vcgezq_f16, NEON::BI__builtin_neon_vcgezq_v, },
- { NEON::BI__builtin_neon_vcgtz_f16, NEON::BI__builtin_neon_vcgtz_v, },
- { NEON::BI__builtin_neon_vcgtzq_f16, NEON::BI__builtin_neon_vcgtzq_v, },
- { NEON::BI__builtin_neon_vclez_f16, NEON::BI__builtin_neon_vclez_v, },
- { NEON::BI__builtin_neon_vclezq_f16, NEON::BI__builtin_neon_vclezq_v, },
- { NEON::BI__builtin_neon_vcltz_f16, NEON::BI__builtin_neon_vcltz_v, },
- { NEON::BI__builtin_neon_vcltzq_f16, NEON::BI__builtin_neon_vcltzq_v, },
- { NEON::BI__builtin_neon_vfma_f16, NEON::BI__builtin_neon_vfma_v, },
- { NEON::BI__builtin_neon_vfma_lane_f16, NEON::BI__builtin_neon_vfma_lane_v, },
- { NEON::BI__builtin_neon_vfma_laneq_f16, NEON::BI__builtin_neon_vfma_laneq_v, },
- { NEON::BI__builtin_neon_vfmaq_f16, NEON::BI__builtin_neon_vfmaq_v, },
- { NEON::BI__builtin_neon_vfmaq_lane_f16, NEON::BI__builtin_neon_vfmaq_lane_v, },
- { NEON::BI__builtin_neon_vfmaq_laneq_f16, NEON::BI__builtin_neon_vfmaq_laneq_v, },
- { NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v },
- { NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v },
- { NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v },
- { NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v },
- { NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v },
- { NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v },
- { NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v },
- { NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v },
- { NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v },
- { NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v },
- { NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v },
- { NEON::BI__builtin_neon_vld1q_lane_bf16, NEON::BI__builtin_neon_vld1q_lane_v },
- { NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v },
- { NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v },
- { NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v },
- { NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v },
- { NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v },
- { NEON::BI__builtin_neon_vld2q_lane_bf16, NEON::BI__builtin_neon_vld2q_lane_v },
- { NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v },
- { NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v },
- { NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v },
- { NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v },
- { NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v },
- { NEON::BI__builtin_neon_vld3q_lane_bf16, NEON::BI__builtin_neon_vld3q_lane_v },
- { NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v },
- { NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v },
- { NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v },
- { NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v },
- { NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v },
- { NEON::BI__builtin_neon_vld4q_lane_bf16, NEON::BI__builtin_neon_vld4q_lane_v },
- { NEON::BI__builtin_neon_vmax_f16, NEON::BI__builtin_neon_vmax_v, },
- { NEON::BI__builtin_neon_vmaxnm_f16, NEON::BI__builtin_neon_vmaxnm_v, },
- { NEON::BI__builtin_neon_vmaxnmq_f16, NEON::BI__builtin_neon_vmaxnmq_v, },
- { NEON::BI__builtin_neon_vmaxq_f16, NEON::BI__builtin_neon_vmaxq_v, },
- { NEON::BI__builtin_neon_vmin_f16, NEON::BI__builtin_neon_vmin_v, },
- { NEON::BI__builtin_neon_vminnm_f16, NEON::BI__builtin_neon_vminnm_v, },
- { NEON::BI__builtin_neon_vminnmq_f16, NEON::BI__builtin_neon_vminnmq_v, },
- { NEON::BI__builtin_neon_vminq_f16, NEON::BI__builtin_neon_vminq_v, },
- { NEON::BI__builtin_neon_vmulx_f16, NEON::BI__builtin_neon_vmulx_v, },
- { NEON::BI__builtin_neon_vmulxq_f16, NEON::BI__builtin_neon_vmulxq_v, },
- { NEON::BI__builtin_neon_vpadd_f16, NEON::BI__builtin_neon_vpadd_v, },
- { NEON::BI__builtin_neon_vpaddq_f16, NEON::BI__builtin_neon_vpaddq_v, },
- { NEON::BI__builtin_neon_vpmax_f16, NEON::BI__builtin_neon_vpmax_v, },
- { NEON::BI__builtin_neon_vpmaxnm_f16, NEON::BI__builtin_neon_vpmaxnm_v, },
- { NEON::BI__builtin_neon_vpmaxnmq_f16, NEON::BI__builtin_neon_vpmaxnmq_v, },
- { NEON::BI__builtin_neon_vpmaxq_f16, NEON::BI__builtin_neon_vpmaxq_v, },
- { NEON::BI__builtin_neon_vpmin_f16, NEON::BI__builtin_neon_vpmin_v, },
- { NEON::BI__builtin_neon_vpminnm_f16, NEON::BI__builtin_neon_vpminnm_v, },
- { NEON::BI__builtin_neon_vpminnmq_f16, NEON::BI__builtin_neon_vpminnmq_v, },
- { NEON::BI__builtin_neon_vpminq_f16, NEON::BI__builtin_neon_vpminq_v, },
- { NEON::BI__builtin_neon_vrecpe_f16, NEON::BI__builtin_neon_vrecpe_v, },
- { NEON::BI__builtin_neon_vrecpeq_f16, NEON::BI__builtin_neon_vrecpeq_v, },
- { NEON::BI__builtin_neon_vrecps_f16, NEON::BI__builtin_neon_vrecps_v, },
- { NEON::BI__builtin_neon_vrecpsq_f16, NEON::BI__builtin_neon_vrecpsq_v, },
- { NEON::BI__builtin_neon_vrnd_f16, NEON::BI__builtin_neon_vrnd_v, },
- { NEON::BI__builtin_neon_vrnda_f16, NEON::BI__builtin_neon_vrnda_v, },
- { NEON::BI__builtin_neon_vrndaq_f16, NEON::BI__builtin_neon_vrndaq_v, },
- { NEON::BI__builtin_neon_vrndi_f16, NEON::BI__builtin_neon_vrndi_v, },
- { NEON::BI__builtin_neon_vrndiq_f16, NEON::BI__builtin_neon_vrndiq_v, },
- { NEON::BI__builtin_neon_vrndm_f16, NEON::BI__builtin_neon_vrndm_v, },
- { NEON::BI__builtin_neon_vrndmq_f16, NEON::BI__builtin_neon_vrndmq_v, },
- { NEON::BI__builtin_neon_vrndn_f16, NEON::BI__builtin_neon_vrndn_v, },
- { NEON::BI__builtin_neon_vrndnq_f16, NEON::BI__builtin_neon_vrndnq_v, },
- { NEON::BI__builtin_neon_vrndp_f16, NEON::BI__builtin_neon_vrndp_v, },
- { NEON::BI__builtin_neon_vrndpq_f16, NEON::BI__builtin_neon_vrndpq_v, },
- { NEON::BI__builtin_neon_vrndq_f16, NEON::BI__builtin_neon_vrndq_v, },
- { NEON::BI__builtin_neon_vrndx_f16, NEON::BI__builtin_neon_vrndx_v, },
- { NEON::BI__builtin_neon_vrndxq_f16, NEON::BI__builtin_neon_vrndxq_v, },
- { NEON::BI__builtin_neon_vrsqrte_f16, NEON::BI__builtin_neon_vrsqrte_v, },
- { NEON::BI__builtin_neon_vrsqrteq_f16, NEON::BI__builtin_neon_vrsqrteq_v, },
- { NEON::BI__builtin_neon_vrsqrts_f16, NEON::BI__builtin_neon_vrsqrts_v, },
- { NEON::BI__builtin_neon_vrsqrtsq_f16, NEON::BI__builtin_neon_vrsqrtsq_v, },
- { NEON::BI__builtin_neon_vsqrt_f16, NEON::BI__builtin_neon_vsqrt_v, },
- { NEON::BI__builtin_neon_vsqrtq_f16, NEON::BI__builtin_neon_vsqrtq_v, },
- { NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v },
- { NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v },
- { NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v },
- { NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v },
- { NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v },
- { NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v },
- { NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v },
- { NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v },
- { NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v },
- { NEON::BI__builtin_neon_vst1q_lane_bf16, NEON::BI__builtin_neon_vst1q_lane_v },
- { NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v },
- { NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v },
- { NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v },
- { NEON::BI__builtin_neon_vst2q_lane_bf16, NEON::BI__builtin_neon_vst2q_lane_v },
- { NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v },
- { NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v },
- { NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v },
- { NEON::BI__builtin_neon_vst3q_lane_bf16, NEON::BI__builtin_neon_vst3q_lane_v },
- { NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v },
- { NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v },
- { NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v },
- { NEON::BI__builtin_neon_vst4q_lane_bf16, NEON::BI__builtin_neon_vst4q_lane_v },
- // The mangling rules cause us to have one ID for each type for vldap1(q)_lane
- // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
- // arbitrary one to be handled as tha canonical variation.
- { NEON::BI__builtin_neon_vldap1_lane_u64, NEON::BI__builtin_neon_vldap1_lane_s64 },
- { NEON::BI__builtin_neon_vldap1_lane_f64, NEON::BI__builtin_neon_vldap1_lane_s64 },
- { NEON::BI__builtin_neon_vldap1_lane_p64, NEON::BI__builtin_neon_vldap1_lane_s64 },
- { NEON::BI__builtin_neon_vldap1q_lane_u64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
- { NEON::BI__builtin_neon_vldap1q_lane_f64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
- { NEON::BI__builtin_neon_vldap1q_lane_p64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
- { NEON::BI__builtin_neon_vstl1_lane_u64, NEON::BI__builtin_neon_vstl1_lane_s64 },
- { NEON::BI__builtin_neon_vstl1_lane_f64, NEON::BI__builtin_neon_vstl1_lane_s64 },
- { NEON::BI__builtin_neon_vstl1_lane_p64, NEON::BI__builtin_neon_vstl1_lane_s64 },
- { NEON::BI__builtin_neon_vstl1q_lane_u64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
- { NEON::BI__builtin_neon_vstl1q_lane_f64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
- { NEON::BI__builtin_neon_vstl1q_lane_p64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
+ {
+ NEON::BI__builtin_neon_splat_lane_bf16,
+ NEON::BI__builtin_neon_splat_lane_v,
+ },
+ {
+ NEON::BI__builtin_neon_splat_laneq_bf16,
+ NEON::BI__builtin_neon_splat_laneq_v,
+ },
+ {
+ NEON::BI__builtin_neon_splatq_lane_bf16,
+ NEON::BI__builtin_neon_splatq_lane_v,
+ },
+ {
+ NEON::BI__builtin_neon_splatq_laneq_bf16,
+ NEON::BI__builtin_neon_splatq_laneq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vabd_f16,
+ NEON::BI__builtin_neon_vabd_v,
+ },
+ {
+ NEON::BI__builtin_neon_vabdq_f16,
+ NEON::BI__builtin_neon_vabdq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vabs_f16,
+ NEON::BI__builtin_neon_vabs_v,
+ },
+ {
+ NEON::BI__builtin_neon_vabsq_f16,
+ NEON::BI__builtin_neon_vabsq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcage_f16,
+ NEON::BI__builtin_neon_vcage_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcageq_f16,
+ NEON::BI__builtin_neon_vcageq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcagt_f16,
+ NEON::BI__builtin_neon_vcagt_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcagtq_f16,
+ NEON::BI__builtin_neon_vcagtq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcale_f16,
+ NEON::BI__builtin_neon_vcale_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcaleq_f16,
+ NEON::BI__builtin_neon_vcaleq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcalt_f16,
+ NEON::BI__builtin_neon_vcalt_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcaltq_f16,
+ NEON::BI__builtin_neon_vcaltq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vceqz_f16,
+ NEON::BI__builtin_neon_vceqz_v,
+ },
+ {
+ NEON::BI__builtin_neon_vceqzq_f16,
+ NEON::BI__builtin_neon_vceqzq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcgez_f16,
+ NEON::BI__builtin_neon_vcgez_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcgezq_f16,
+ NEON::BI__builtin_neon_vcgezq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcgtz_f16,
+ NEON::BI__builtin_neon_vcgtz_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcgtzq_f16,
+ NEON::BI__builtin_neon_vcgtzq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vclez_f16,
+ NEON::BI__builtin_neon_vclez_v,
+ },
+ {
+ NEON::BI__builtin_neon_vclezq_f16,
+ NEON::BI__builtin_neon_vclezq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcltz_f16,
+ NEON::BI__builtin_neon_vcltz_v,
+ },
+ {
+ NEON::BI__builtin_neon_vcltzq_f16,
+ NEON::BI__builtin_neon_vcltzq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vfma_f16,
+ NEON::BI__builtin_neon_vfma_v,
+ },
+ {
+ NEON::BI__builtin_neon_vfma_lane_f16,
+ NEON::BI__builtin_neon_vfma_lane_v,
+ },
+ {
+ NEON::BI__builtin_neon_vfma_laneq_f16,
+ NEON::BI__builtin_neon_vfma_laneq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vfmaq_f16,
+ NEON::BI__builtin_neon_vfmaq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vfmaq_lane_f16,
+ NEON::BI__builtin_neon_vfmaq_lane_v,
+ },
+ {
+ NEON::BI__builtin_neon_vfmaq_laneq_f16,
+ NEON::BI__builtin_neon_vfmaq_laneq_v,
+ },
+ {NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v},
+ {NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v},
+ {NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v},
+ {NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v},
+ {NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v},
+ {NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v},
+ {NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v},
+ {NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v},
+ {NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v},
+ {NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v},
+ {NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v},
+ {NEON::BI__builtin_neon_vld1q_lane_bf16,
+ NEON::BI__builtin_neon_vld1q_lane_v},
+ {NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v},
+ {NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v},
+ {NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v},
+ {NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v},
+ {NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v},
+ {NEON::BI__builtin_neon_vld2q_lane_bf16,
+ NEON::BI__builtin_neon_vld2q_lane_v},
+ {NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v},
+ {NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v},
+ {NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v},
+ {NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v},
+ {NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v},
+ {NEON::BI__builtin_neon_vld3q_lane_bf16,
+ NEON::BI__builtin_neon_vld3q_lane_v},
+ {NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v},
+ {NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v},
+ {NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v},
+ {NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v},
+ {NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v},
+ {NEON::BI__builtin_neon_vld4q_lane_bf16,
+ NEON::BI__builtin_neon_vld4q_lane_v},
+ {
+ NEON::BI__builtin_neon_vmax_f16,
+ NEON::BI__builtin_neon_vmax_v,
+ },
+ {
+ NEON::BI__builtin_neon_vmaxnm_f16,
+ NEON::BI__builtin_neon_vmaxnm_v,
+ },
+ {
+ NEON::BI__builtin_neon_vmaxnmq_f16,
+ NEON::BI__builtin_neon_vmaxnmq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vmaxq_f16,
+ NEON::BI__builtin_neon_vmaxq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vmin_f16,
+ NEON::BI__builtin_neon_vmin_v,
+ },
+ {
+ NEON::BI__builtin_neon_vminnm_f16,
+ NEON::BI__builtin_neon_vminnm_v,
+ },
+ {
+ NEON::BI__builtin_neon_vminnmq_f16,
+ NEON::BI__builtin_neon_vminnmq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vminq_f16,
+ NEON::BI__builtin_neon_vminq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vmulx_f16,
+ NEON::BI__builtin_neon_vmulx_v,
+ },
+ {
+ NEON::BI__builtin_neon_vmulxq_f16,
+ NEON::BI__builtin_neon_vmulxq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpadd_f16,
+ NEON::BI__builtin_neon_vpadd_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpaddq_f16,
+ NEON::BI__builtin_neon_vpaddq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpmax_f16,
+ NEON::BI__builtin_neon_vpmax_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpmaxnm_f16,
+ NEON::BI__builtin_neon_vpmaxnm_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpmaxnmq_f16,
+ NEON::BI__builtin_neon_vpmaxnmq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpmaxq_f16,
+ NEON::BI__builtin_neon_vpmaxq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpmin_f16,
+ NEON::BI__builtin_neon_vpmin_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpminnm_f16,
+ NEON::BI__builtin_neon_vpminnm_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpminnmq_f16,
+ NEON::BI__builtin_neon_vpminnmq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vpminq_f16,
+ NEON::BI__builtin_neon_vpminq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrecpe_f16,
+ NEON::BI__builtin_neon_vrecpe_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrecpeq_f16,
+ NEON::BI__builtin_neon_vrecpeq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrecps_f16,
+ NEON::BI__builtin_neon_vrecps_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrecpsq_f16,
+ NEON::BI__builtin_neon_vrecpsq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrnd_f16,
+ NEON::BI__builtin_neon_vrnd_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrnda_f16,
+ NEON::BI__builtin_neon_vrnda_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndaq_f16,
+ NEON::BI__builtin_neon_vrndaq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndi_f16,
+ NEON::BI__builtin_neon_vrndi_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndiq_f16,
+ NEON::BI__builtin_neon_vrndiq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndm_f16,
+ NEON::BI__builtin_neon_vrndm_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndmq_f16,
+ NEON::BI__builtin_neon_vrndmq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndn_f16,
+ NEON::BI__builtin_neon_vrndn_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndnq_f16,
+ NEON::BI__builtin_neon_vrndnq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndp_f16,
+ NEON::BI__builtin_neon_vrndp_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndpq_f16,
+ NEON::BI__builtin_neon_vrndpq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndq_f16,
+ NEON::BI__builtin_neon_vrndq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndx_f16,
+ NEON::BI__builtin_neon_vrndx_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrndxq_f16,
+ NEON::BI__builtin_neon_vrndxq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrsqrte_f16,
+ NEON::BI__builtin_neon_vrsqrte_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrsqrteq_f16,
+ NEON::BI__builtin_neon_vrsqrteq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrsqrts_f16,
+ NEON::BI__builtin_neon_vrsqrts_v,
+ },
+ {
+ NEON::BI__builtin_neon_vrsqrtsq_f16,
+ NEON::BI__builtin_neon_vrsqrtsq_v,
+ },
+ {
+ NEON::BI__builtin_neon_vsqrt_f16,
+ NEON::BI__builtin_neon_vsqrt_v,
+ },
+ {
+ NEON::BI__builtin_neon_vsqrtq_f16,
+ NEON::BI__builtin_neon_vsqrtq_v,
+ },
+ {NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v},
+ {NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v},
+ {NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v},
+ {NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v},
+ {NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v},
+ {NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v},
+ {NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v},
+ {NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v},
+ {NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v},
+ {NEON::BI__builtin_neon_vst1q_lane_bf16,
+ NEON::BI__builtin_neon_vst1q_lane_v},
+ {NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v},
+ {NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v},
+ {NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v},
+ {NEON::BI__builtin_neon_vst2q_lane_bf16,
+ NEON::BI__builtin_neon_vst2q_lane_v},
+ {NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v},
+ {NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v},
+ {NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v},
+ {NEON::BI__builtin_neon_vst3q_lane_bf16,
+ NEON::BI__builtin_neon_vst3q_lane_v},
+ {NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v},
+ {NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v},
+ {NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v},
+ {NEON::BI__builtin_neon_vst4q_lane_bf16,
+ NEON::BI__builtin_neon_vst4q_lane_v},
+ // The mangling rules cause us to have one ID for each type for
+ // vldap1(q)_lane and vstl1(q)_lane, but codegen is equivalent for all of
+ // them. Choose an arbitrary one to be handled as tha canonical variation.
+ {NEON::BI__builtin_neon_vldap1_lane_u64,
+ NEON::BI__builtin_neon_vldap1_lane_s64},
+ {NEON::BI__builtin_neon_vldap1_lane_f64,
+ NEON::BI__builtin_neon_vldap1_lane_s64},
+ {NEON::BI__builtin_neon_vldap1_lane_p64,
+ NEON::BI__builtin_neon_vldap1_lane_s64},
+ {NEON::BI__builtin_neon_vldap1q_lane_u64,
+ NEON::BI__builtin_neon_vldap1q_lane_s64},
+ {NEON::BI__builtin_neon_vldap1q_lane_f64,
+ NEON::BI__builtin_neon_vldap1q_lane_s64},
+ {NEON::BI__builtin_neon_vldap1q_lane_p64,
+ NEON::BI__builtin_neon_vldap1q_lane_s64},
+ {NEON::BI__builtin_neon_vstl1_lane_u64,
+ NEON::BI__builtin_neon_vstl1_lane_s64},
+ {NEON::BI__builtin_neon_vstl1_lane_f64,
+ NEON::BI__builtin_neon_vstl1_lane_s64},
+ {NEON::BI__builtin_neon_vstl1_lane_p64,
+ NEON::BI__builtin_neon_vstl1_lane_s64},
+ {NEON::BI__builtin_neon_vstl1q_lane_u64,
+ NEON::BI__builtin_neon_vstl1q_lane_s64},
+ {NEON::BI__builtin_neon_vstl1q_lane_f64,
+ NEON::BI__builtin_neon_vstl1q_lane_s64},
+ {NEON::BI__builtin_neon_vstl1q_lane_p64,
+ NEON::BI__builtin_neon_vstl1q_lane_s64},
};
#undef NEONMAP0
@@ -7513,8 +7874,8 @@ static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
{ #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
#define GET_SVE_LLVM_INTRINSIC_MAP
-#include "clang/Basic/arm_sve_builtin_cg.inc"
#include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
+#include "clang/Basic/arm_sve_builtin_cg.inc"
#undef GET_SVE_LLVM_INTRINSIC_MAP
};
@@ -7645,7 +8006,7 @@ static Value *EmitCommonNeonSISDBuiltinExpr(
ai != ae; ++ai, ++j) {
llvm::Type *ArgTy = ai->getType();
if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
- ArgTy->getPrimitiveSizeInBits())
+ ArgTy->getPrimitiveSizeInBits())
continue;
assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
@@ -7692,7 +8053,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
if (!Ty)
return nullptr;
- auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ auto getAlignmentValue32 = [&](Address addr) -> Value * {
return Builder.getInt32(addr.getAlignment().getQuantity());
};
@@ -7701,7 +8062,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Int = AltLLVMIntrinsic;
switch (BuiltinID) {
- default: break;
+ default:
+ break;
case NEON::BI__builtin_neon_splat_lane_v:
case NEON::BI__builtin_neon_splat_laneq_v:
case NEON::BI__builtin_neon_splatq_lane_v:
@@ -7732,7 +8094,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
return Builder.CreateBitCast(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vaddhn_v: {
@@ -7764,7 +8126,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcagtq_v: {
llvm::Type *Ty;
switch (VTy->getScalarSizeInBits()) {
- default: llvm_unreachable("unexpected type");
+ default:
+ llvm_unreachable("unexpected type");
case 32:
Ty = FloatTy;
break;
@@ -7776,7 +8139,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
break;
}
auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
- llvm::Type *Tys[] = { VTy, VecFlt };
+ llvm::Type *Tys[] = {VTy, VecFlt};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
}
@@ -7826,7 +8189,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_n_f16_u16:
case NEON::BI__builtin_neon_vcvtq_n_f16_s16:
case NEON::BI__builtin_neon_vcvtq_n_f16_u16: {
- llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
+ llvm::Type *Tys[2] = {GetFloatNeonType(this, Type), Ty};
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -7834,7 +8197,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_n_f64_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
- llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
+ llvm::Type *Tys[2] = {GetFloatNeonType(this, Type), Ty};
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
@@ -7851,7 +8214,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -7919,20 +8282,19 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvtmq_u16_f16:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vcvtx_f32_v: {
- llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
+ llvm::Type *Tys[2] = {VTy->getTruncatedElementVectorType(VTy), Ty};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
-
}
case NEON::BI__builtin_neon_vext_v:
case NEON::BI__builtin_neon_vextq_v: {
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
- Indices.push_back(i+CV);
+ Indices.push_back(i + CV);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -8034,11 +8396,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vpadalq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- llvm::Type *EltTy =
- llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
auto *NarrowTy =
llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
- llvm::Type *Tys[2] = { Ty, NarrowTy };
+ llvm::Type *Tys[2] = {Ty, NarrowTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vpaddl_v:
@@ -8048,7 +8409,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
auto *NarrowTy =
llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
- llvm::Type *Tys[2] = { Ty, NarrowTy };
+ llvm::Type *Tys[2] = {Ty, NarrowTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
case NEON::BI__builtin_neon_vqdmlal_v:
@@ -8084,12 +8445,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
- 1, false);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", 1, false);
case NEON::BI__builtin_neon_vqshlu_n_v:
case NEON::BI__builtin_neon_vqshluq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
- 1, false);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
case NEON::BI__builtin_neon_vrsqrte_v:
@@ -8104,8 +8463,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
- 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
case NEON::BI__builtin_neon_vsha512hq_u64:
case NEON::BI__builtin_neon_vsha512h2q_u64:
case NEON::BI__builtin_neon_vsha512su0q_u64:
@@ -8116,7 +8474,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
- return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0], Ty), Ops[1],
"vshl_n");
case NEON::BI__builtin_neon_vshll_n_v: {
llvm::FixedVectorType *SrcTy =
@@ -8220,8 +8578,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(i+vi);
- Indices.push_back(i+e+vi);
+ Indices.push_back(i + vi);
+ Indices.push_back(i + e + vi);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
@@ -8247,7 +8605,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
- Indices.push_back(2*i+vi);
+ Indices.push_back(2 * i + vi);
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
@@ -8269,8 +8627,8 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back((i + vi*e) >> 1);
- Indices.push_back(((i + vi*e) >> 1)+e);
+ Indices.push_back((i + vi * e) >> 1);
+ Indices.push_back(((i + vi * e) >> 1) + e);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
@@ -8284,70 +8642,69 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vdotq_u32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
case NEON::BI__builtin_neon_vfmlal_low_f16:
case NEON::BI__builtin_neon_vfmlalq_low_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
}
case NEON::BI__builtin_neon_vfmlsl_low_f16:
case NEON::BI__builtin_neon_vfmlslq_low_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
}
case NEON::BI__builtin_neon_vfmlal_high_f16:
case NEON::BI__builtin_neon_vfmlalq_high_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
}
case NEON::BI__builtin_neon_vfmlsl_high_f16:
case NEON::BI__builtin_neon_vfmlslq_high_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
}
case NEON::BI__builtin_neon_vmmlaq_s32:
case NEON::BI__builtin_neon_vmmlaq_u32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vmmla");
}
case NEON::BI__builtin_neon_vusmmlaq_s32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
}
case NEON::BI__builtin_neon_vusdot_s32:
case NEON::BI__builtin_neon_vusdotq_s32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
}
case NEON::BI__builtin_neon_vbfdot_f32:
case NEON::BI__builtin_neon_vbfdotq_f32: {
llvm::Type *InputTy =
llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
+ llvm::Type *Tys[2] = {Ty, InputTy};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
}
case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: {
- llvm::Type *Tys[1] = { Ty };
+ llvm::Type *Tys[1] = {Ty};
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvtfp2bf");
}
-
}
assert(Int && "Expected valid intrinsic number");
@@ -8398,15 +8755,14 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
SmallVector<int, 16> Indices;
auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
- Indices.push_back(2*i);
- Indices.push_back(2*i+1);
+ Indices.push_back(2 * i);
+ Indices.push_back(2 * i + 1);
}
int PairPos = 0, End = Ops.size() - 1;
while (PairPos < End) {
- TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
- Ops[PairPos+1], Indices,
- Name));
+ TblOps.push_back(CGF.Builder.CreateShuffleVector(
+ Ops[PairPos], Ops[PairPos + 1], Indices, Name));
PairPos += 2;
}
@@ -8414,8 +8770,8 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
// of the 128-bit lookup table with zero.
if (PairPos == End) {
Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
- TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
- ZeroTbl, Indices, Name));
+ TblOps.push_back(
+ CGF.Builder.CreateShuffleVector(Ops[PairPos], ZeroTbl, Indices, Name));
}
Function *TblF;
@@ -8508,15 +8864,15 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
}
- llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, SysReg)};
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Type *Types[] = { RegisterType };
+ llvm::Type *Types[] = {RegisterType};
bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
- assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
- && "Can't fit 64-bit value in 32-bit register");
+ assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) &&
+ "Can't fit 64-bit value in 32-bit register");
if (AccessKind != Write) {
assert(AccessKind == NormalRead || AccessKind == VolatileRead);
@@ -8542,23 +8898,24 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
if (MixedTypes) {
// Extend 32 bit write value to 64 bit to pass to write.
ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
- return Builder.CreateCall(F, { Metadata, ArgValue });
+ return Builder.CreateCall(F, {Metadata, ArgValue});
}
if (ValueType->isPointerTy()) {
// Have VoidPtrTy ArgValue but want to return an i32/i64.
ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
- return Builder.CreateCall(F, { Metadata, ArgValue });
+ return Builder.CreateCall(F, {Metadata, ArgValue});
}
- return Builder.CreateCall(F, { Metadata, ArgValue });
+ return Builder.CreateCall(F, {Metadata, ArgValue});
}
/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
/// argument that specifies the vector type.
static bool HasExtraNeonArgument(unsigned BuiltinID) {
switch (BuiltinID) {
- default: break;
+ default:
+ break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_bf16:
@@ -8632,8 +8989,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
- Value *RW = EmitScalarExpr(E->getArg(1));
- Value *IsData = EmitScalarExpr(E->getArg(2));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *IsData = EmitScalarExpr(E->getArg(2));
// Locality is not supported on ARM target
Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
@@ -8658,7 +9015,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Res;
}
-
if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
@@ -8686,7 +9042,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F;
switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin");
+ default:
+ llvm_unreachable("unexpected builtin");
case clang::ARM::BI__builtin_arm_mcrr:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
break;
@@ -8720,7 +9077,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F;
switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin");
+ default:
+ llvm_unreachable("unexpected builtin");
case clang::ARM::BI__builtin_arm_mrrc:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
break;
@@ -8731,7 +9089,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Coproc = EmitScalarExpr(E->getArg(0));
Value *Opc1 = EmitScalarExpr(E->getArg(1));
- Value *CRm = EmitScalarExpr(E->getArg(2));
+ Value *CRm = EmitScalarExpr(E->getArg(2));
Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
// Returns an unsigned 64 bit integer, represented
@@ -8757,7 +9115,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F;
switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin");
+ default:
+ llvm_unreachable("unexpected builtin");
case clang::ARM::BI__builtin_arm_ldaex:
F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
break;
@@ -8870,19 +9229,25 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case clang::ARM::BI__builtin_arm_crc32b:
- CRCIntrinsicID = Intrinsic::arm_crc32b; break;
+ CRCIntrinsicID = Intrinsic::arm_crc32b;
+ break;
case clang::ARM::BI__builtin_arm_crc32cb:
- CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
+ CRCIntrinsicID = Intrinsic::arm_crc32cb;
+ break;
case clang::ARM::BI__builtin_arm_crc32h:
- CRCIntrinsicID = Intrinsic::arm_crc32h; break;
+ CRCIntrinsicID = Intrinsic::arm_crc32h;
+ break;
case clang::ARM::BI__builtin_arm_crc32ch:
- CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
+ CRCIntrinsicID = Intrinsic::arm_crc32ch;
+ break;
case clang::ARM::BI__builtin_arm_crc32w:
case clang::ARM::BI__builtin_arm_crc32d:
- CRCIntrinsicID = Intrinsic::arm_crc32w; break;
+ CRCIntrinsicID = Intrinsic::arm_crc32w;
+ break;
case clang::ARM::BI__builtin_arm_crc32cw:
case clang::ARM::BI__builtin_arm_crc32cd:
- CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
+ CRCIntrinsicID = Intrinsic::arm_crc32cw;
+ break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
@@ -8974,13 +9339,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
- auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ auto getAlignmentValue32 = [&](Address addr) -> Value * {
return Builder.getInt32(addr.getAlignment().getQuantity());
};
Address PtrOp0 = Address::invalid();
Address PtrOp1 = Address::invalid();
- SmallVector<Value*, 4> Ops;
+ SmallVector<Value *, 4> Ops;
bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
for (unsigned i = 0, e = NumArgs; i != e; i++) {
@@ -9047,7 +9412,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
switch (BuiltinID) {
- default: break;
+ default:
+ break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
@@ -9069,7 +9435,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Tys[] = {Arg->getType()};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
- return Builder.CreateCall(F, {Arg}, "vrndn"); }
+ return Builder.CreateCall(F, {Arg}, "vrndn");
+ }
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
@@ -9110,14 +9477,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
? Intrinsic::arm_mcr
: Intrinsic::arm_mcr2);
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
- Ops[3], Ops[4], Ops[5]});
+ return Builder.CreateCall(F,
+ {Ops[1], Ops[2], Ops[0], Ops[3], Ops[4], Ops[5]});
}
}
// Get the last argument, which specifies the vector type.
assert(HasExtraArg);
- const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ const Expr *Arg = E->getArg(E->getNumArgs() - 1);
std::optional<llvm::APSInt> Result =
Arg->getIntegerConstantExpr(getContext());
if (!Result)
@@ -9165,7 +9532,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
- default: return nullptr;
+ default:
+ return nullptr;
case NEON::BI__builtin_neon_vld1q_lane_v:
// Handle 64-bit integer elements as a special case. Use shuffles of
// one-element vectors to avoid poor code for i64 in the backend.
@@ -9173,7 +9541,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
- Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
+ Value *SV =
+ llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1 - Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
@@ -9194,26 +9563,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
- usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
- 1, true);
+ usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", 1, true);
case NEON::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
- 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", 1, true);
case NEON::BI__builtin_neon_vqshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
Ops, "vqshrun_n", 1, true);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
- Ops, "vrecpe");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), Ops,
+ "vrecpe");
case NEON::BI__builtin_neon_vrshrn_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
- Ops, "vrshrn_n", 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), Ops,
+ "vrshrn_n", 1, true);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -9245,8 +9612,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
Ops[2] = getAlignmentValue32(PtrOp0);
llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
- Tys), Ops);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Tys),
+ Ops);
}
[[fallthrough]];
case NEON::BI__builtin_neon_vst1_lane_v: {
@@ -9256,33 +9623,33 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
PtrOp0.withElementType(Ops[1]->getType()));
}
case NEON::BI__builtin_neon_vtbl1_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
- Ops, "vtbl1");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), Ops,
+ "vtbl1");
case NEON::BI__builtin_neon_vtbl2_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
- Ops, "vtbl2");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), Ops,
+ "vtbl2");
case NEON::BI__builtin_neon_vtbl3_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
- Ops, "vtbl3");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), Ops,
+ "vtbl3");
case NEON::BI__builtin_neon_vtbl4_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
- Ops, "vtbl4");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), Ops,
+ "vtbl4");
case NEON::BI__builtin_neon_vtbx1_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
- Ops, "vtbx1");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), Ops,
+ "vtbx1");
case NEON::BI__builtin_neon_vtbx2_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
- Ops, "vtbx2");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), Ops,
+ "vtbx2");
case NEON::BI__builtin_neon_vtbx3_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
- Ops, "vtbx3");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), Ops,
+ "vtbx3");
case NEON::BI__builtin_neon_vtbx4_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
- Ops, "vtbx4");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), Ops,
+ "vtbx4");
}
}
-template<typename Integer>
+template <typename Integer>
static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
return E->getIntegerConstantExpr(Context)->getExtValue();
}
@@ -9350,7 +9717,8 @@ static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
}
}
-static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
+static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V,
+ bool Odd) {
// Make a shufflevector that extracts every other element of a vector (evens
// or odds, as desired).
SmallVector<int, 16> Indices;
@@ -9375,7 +9743,7 @@ static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
return Builder.CreateShuffleVector(V0, V1, Indices);
}
-template<unsigned HighBit, unsigned OtherBits>
+template <unsigned HighBit, unsigned OtherBits>
static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
// MVE-specific helper function to make a vector splat of a constant such as
// UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
@@ -9412,7 +9780,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
// Code autogenerated by Tablegen will handle all the simple builtins.
switch (BuiltinID) {
- #include "clang/Basic/arm_mve_builtin_cg.inc"
+#include "clang/Basic/arm_mve_builtin_cg.inc"
// If we didn't match an MVE builtin id at all, go back to the
// main EmitARMBuiltinExpr.
@@ -9514,10 +9882,10 @@ Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
}
}
-static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
- const CallExpr *E,
- SmallVectorImpl<Value *> &Ops,
- llvm::Triple::ArchType Arch) {
+static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
+ unsigned BuiltinID, const CallExpr *E,
+ SmallVectorImpl<Value *> &Ops,
+ llvm::Triple::ArchType Arch) {
unsigned int Int = 0;
const char *s = nullptr;
@@ -9611,8 +9979,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
Intrinsic::aarch64_neon_tbl2, "vtbl2");
llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
- Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
- TwentyFourV);
+ Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], TwentyFourV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
@@ -9625,28 +9992,44 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
}
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
- Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
+ Int = Intrinsic::aarch64_neon_tbl1;
+ s = "vtbl1";
+ break;
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v: {
- Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
+ Int = Intrinsic::aarch64_neon_tbl2;
+ s = "vtbl2";
+ break;
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
- Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
+ Int = Intrinsic::aarch64_neon_tbl3;
+ s = "vtbl3";
+ break;
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
- Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
+ Int = Intrinsic::aarch64_neon_tbl4;
+ s = "vtbl4";
+ break;
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
- Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
+ Int = Intrinsic::aarch64_neon_tbx1;
+ s = "vtbx1";
+ break;
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
- Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
+ Int = Intrinsic::aarch64_neon_tbx2;
+ s = "vtbx2";
+ break;
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
- Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
+ Int = Intrinsic::aarch64_neon_tbx3;
+ s = "vtbx3";
+ break;
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
- Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
+ Int = Intrinsic::aarch64_neon_tbx4;
+ s = "vtbx4";
+ break;
}
}
@@ -9724,7 +10107,8 @@ llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
llvm::ScalableVectorType *
CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
- default: llvm_unreachable("Unhandled SVETypeFlag!");
+ default:
+ llvm_unreachable("Unhandled SVETypeFlag!");
case SVETypeFlags::EltTyInt8:
return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
@@ -9987,7 +10371,7 @@ Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
}
Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
- SmallVectorImpl<Value*> &Ops,
+ SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
@@ -10002,7 +10386,7 @@ Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
}
Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
- SmallVectorImpl<Value*> &Ops,
+ SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
@@ -10037,11 +10421,11 @@ Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
// The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
// need to break up the tuple vector.
- SmallVector<llvm::Value*, 5> Operands;
+ SmallVector<llvm::Value *, 5> Operands;
for (unsigned I = Ops.size() - N; I < Ops.size(); ++I)
Operands.push_back(Ops[I]);
Operands.append({Predicate, BasePtr});
- Function *F = CGM.getIntrinsic(IntID, { VTy });
+ Function *F = CGM.getIntrinsic(IntID, {VTy});
return Builder.CreateCall(F, Operands);
}
@@ -10260,7 +10644,7 @@ Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
cast<llvm::VectorType>(Ty)->getElementCount(), Scalar);
}
-Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
+Value *CodeGenFunction::EmitSVEDupX(Value *Scalar) {
return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
}
@@ -10570,7 +10954,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svmov_b_z: {
// svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
SVETypeFlags TypeFlags(Builtin->TypeModifier);
- llvm::Type* OverloadedTy = getSVEType(TypeFlags);
+ llvm::Type *OverloadedTy = getSVEType(TypeFlags);
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
}
@@ -10578,7 +10962,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svnot_b_z: {
// svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
SVETypeFlags TypeFlags(Builtin->TypeModifier);
- llvm::Type* OverloadedTy = getSVEType(TypeFlags);
+ llvm::Type *OverloadedTy = getSVEType(TypeFlags);
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
}
@@ -10642,8 +11026,8 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svdupq_n_u32:
case SVE::BI__builtin_sve_svdupq_n_f32:
case SVE::BI__builtin_sve_svdupq_n_s32: {
- // These builtins are implemented by storing each element to an array and using
- // ld1rq to materialize a vector.
+ // These builtins are implemented by storing each element to an array and
+ // using ld1rq to materialize a vector.
unsigned NumOpnds = Ops.size();
bool IsBoolTy =
@@ -10658,7 +11042,7 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
SmallVector<llvm::Value *, 16> VecOps;
for (unsigned I = 0; I < NumOpnds; ++I)
- VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
+ VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
Value *Vec = BuildVector(VecOps);
llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
@@ -10871,7 +11255,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
- default: break;
+ default:
+ break;
case clang::AArch64::BI__builtin_arm_nop:
HintID = 0;
break;
@@ -10998,8 +11383,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
assert((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs),
+ Arg);
}
if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
@@ -11214,21 +11599,29 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case clang::AArch64::BI__builtin_arm_crc32b:
- CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32b;
+ break;
case clang::AArch64::BI__builtin_arm_crc32cb:
- CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32cb;
+ break;
case clang::AArch64::BI__builtin_arm_crc32h:
- CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32h;
+ break;
case clang::AArch64::BI__builtin_arm_crc32ch:
- CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32ch;
+ break;
case clang::AArch64::BI__builtin_arm_crc32w:
- CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32w;
+ break;
case clang::AArch64::BI__builtin_arm_crc32cw:
- CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32cw;
+ break;
case clang::AArch64::BI__builtin_arm_crc32d:
- CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32x;
+ break;
case clang::AArch64::BI__builtin_arm_crc32cd:
- CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
+ CRCIntrinsicID = Intrinsic::aarch64_crc32cx;
+ break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
@@ -11258,17 +11651,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case clang::AArch64::BI__builtin_arm_irg:
- MTEIntrinsicID = Intrinsic::aarch64_irg; break;
+ MTEIntrinsicID = Intrinsic::aarch64_irg;
+ break;
case clang::AArch64::BI__builtin_arm_addg:
- MTEIntrinsicID = Intrinsic::aarch64_addg; break;
+ MTEIntrinsicID = Intrinsic::aarch64_addg;
+ break;
case clang::AArch64::BI__builtin_arm_gmi:
- MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
+ MTEIntrinsicID = Intrinsic::aarch64_gmi;
+ break;
case clang::AArch64::BI__builtin_arm_ldg:
- MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
+ MTEIntrinsicID = Intrinsic::aarch64_ldg;
+ break;
case clang::AArch64::BI__builtin_arm_stg:
- MTEIntrinsicID = Intrinsic::aarch64_stg; break;
+ MTEIntrinsicID = Intrinsic::aarch64_stg;
+ break;
case clang::AArch64::BI__builtin_arm_subp:
- MTEIntrinsicID = Intrinsic::aarch64_subp; break;
+ MTEIntrinsicID = Intrinsic::aarch64_subp;
+ break;
}
if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
@@ -11280,9 +11679,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
Mask = Builder.CreateZExt(Mask, Int64Ty);
- Value *RV = Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
- return Builder.CreatePointerCast(RV, T);
+ Value *RV =
+ Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
+ return Builder.CreatePointerCast(RV, T);
}
if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
Value *Pointer = EmitScalarExpr(E->getArg(0));
@@ -11290,8 +11689,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
- Value *RV = Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
+ Value *RV = Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID),
+ {Pointer, TagOffset});
return Builder.CreatePointerCast(RV, T);
}
if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
@@ -11300,8 +11699,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
- return Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
+ return Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID),
+ {Pointer, ExcludedMask});
}
// Although it is possible to supply a different return
// address (first arg) to this intrinsic, for now we set
@@ -11309,26 +11708,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
Value *TagAddress = EmitScalarExpr(E->getArg(0));
TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
- Value *RV = Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
+ Value *RV = Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID),
+ {TagAddress, TagAddress});
return Builder.CreatePointerCast(RV, T);
}
// Although it is possible to supply a different tag (to set)
// to this intrinsic (as first arg), for now we supply
// the tag that is in input address arg (common use case).
if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
- Value *TagAddress = EmitScalarExpr(E->getArg(0));
- TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
- return Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
+ Value *TagAddress = EmitScalarExpr(E->getArg(0));
+ TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
+ return Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID),
+ {TagAddress, TagAddress});
}
if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
Value *PointerA = EmitScalarExpr(E->getArg(0));
Value *PointerB = EmitScalarExpr(E->getArg(1));
PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
- return Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
+ return Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID),
+ {PointerA, PointerB});
}
}
@@ -11381,33 +11780,33 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
LLVMContext &Context = CGM.getLLVMContext();
unsigned SysReg =
- E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
std::string SysRegStr;
- llvm::raw_string_ostream(SysRegStr) <<
- ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
- ((SysReg >> 11) & 7) << ":" <<
- ((SysReg >> 7) & 15) << ":" <<
- ((SysReg >> 3) & 15) << ":" <<
- ( SysReg & 7);
-
- llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
+ llvm::raw_string_ostream(SysRegStr)
+ << ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << ((SysReg >> 11) & 7)
+ << ":" << ((SysReg >> 7) & 15) << ":" << ((SysReg >> 3) & 15) << ":"
+ << (SysReg & 7);
+
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, SysRegStr)};
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Type *RegisterType = Int64Ty;
- llvm::Type *Types[] = { RegisterType };
+ llvm::Type *Types[] = {RegisterType};
if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
return Builder.CreateCall(F, Metadata);
}
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
- return Builder.CreateCall(F, { Metadata, ArgValue });
+ return Builder.CreateCall(F, {Metadata, ArgValue});
}
if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
@@ -11583,7 +11982,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
- llvm::SmallVector<Value*, 4> Ops;
+ llvm::SmallVector<Value *, 4> Ops;
Address PtrOp0 = Address::invalid();
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
if (i == 0) {
@@ -11623,7 +12022,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Result;
}
- const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ const Expr *Arg = E->getArg(E->getNumArgs() - 1);
NeonTypeFlags Type(0);
if (std::optional<llvm::APSInt> Result =
Arg->getIntegerConstantExpr(getContext()))
@@ -11635,7 +12034,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Handle non-overloaded intrinsics first.
switch (BuiltinID) {
- default: break;
+ default:
+ break;
case NEON::BI__builtin_neon_vabsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
@@ -11644,7 +12044,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
return Builder.CreateBitCast(Ops[0], Int128Ty);
}
@@ -11706,32 +12106,43 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtph_s16_f16:
case NEON::BI__builtin_neon_vcvth_s16_f16: {
unsigned Int;
- llvm::Type* InTy = Int32Ty;
- llvm::Type* FTy = HalfTy;
+ llvm::Type *InTy = Int32Ty;
+ llvm::Type *FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvtah_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtau; break;
+ Int = Intrinsic::aarch64_neon_fcvtau;
+ break;
case NEON::BI__builtin_neon_vcvtmh_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtmu; break;
+ Int = Intrinsic::aarch64_neon_fcvtmu;
+ break;
case NEON::BI__builtin_neon_vcvtnh_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtnu; break;
+ Int = Intrinsic::aarch64_neon_fcvtnu;
+ break;
case NEON::BI__builtin_neon_vcvtph_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtpu; break;
+ Int = Intrinsic::aarch64_neon_fcvtpu;
+ break;
case NEON::BI__builtin_neon_vcvth_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtzu; break;
+ Int = Intrinsic::aarch64_neon_fcvtzu;
+ break;
case NEON::BI__builtin_neon_vcvtah_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtas; break;
+ Int = Intrinsic::aarch64_neon_fcvtas;
+ break;
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtms; break;
+ Int = Intrinsic::aarch64_neon_fcvtms;
+ break;
case NEON::BI__builtin_neon_vcvtnh_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtns; break;
+ Int = Intrinsic::aarch64_neon_fcvtns;
+ break;
case NEON::BI__builtin_neon_vcvtph_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtps; break;
+ Int = Intrinsic::aarch64_neon_fcvtps;
+ break;
case NEON::BI__builtin_neon_vcvth_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtzs; break;
+ Int = Intrinsic::aarch64_neon_fcvtzs;
+ break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -11741,20 +12152,27 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcageh_f16:
case NEON::BI__builtin_neon_vcagth_f16: {
unsigned Int;
- llvm::Type* InTy = Int32Ty;
- llvm::Type* FTy = HalfTy;
+ llvm::Type *InTy = Int32Ty;
+ llvm::Type *FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcageh_f16:
- Int = Intrinsic::aarch64_neon_facge; break;
+ Int = Intrinsic::aarch64_neon_facge;
+ break;
case NEON::BI__builtin_neon_vcagth_f16:
- Int = Intrinsic::aarch64_neon_facgt; break;
+ Int = Intrinsic::aarch64_neon_facgt;
+ break;
case NEON::BI__builtin_neon_vcaleh_f16:
- Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
+ Int = Intrinsic::aarch64_neon_facge;
+ std::swap(Ops[0], Ops[1]);
+ break;
case NEON::BI__builtin_neon_vcalth_f16:
- Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
+ Int = Intrinsic::aarch64_neon_facgt;
+ std::swap(Ops[0], Ops[1]);
+ break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -11762,16 +12180,19 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvth_n_s16_f16:
case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
unsigned Int;
- llvm::Type* InTy = Int32Ty;
- llvm::Type* FTy = HalfTy;
+ llvm::Type *InTy = Int32Ty;
+ llvm::Type *FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvth_n_s16_f16:
- Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
+ Int = Intrinsic::aarch64_neon_vcvtfp2fxs;
+ break;
case NEON::BI__builtin_neon_vcvth_n_u16_f16:
- Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
+ Int = Intrinsic::aarch64_neon_vcvtfp2fxu;
+ break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -11779,12 +12200,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvth_n_f16_s16:
case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
unsigned Int;
- llvm::Type* FTy = HalfTy;
- llvm::Type* InTy = Int32Ty;
+ llvm::Type *FTy = HalfTy;
+ llvm::Type *InTy = Int32Ty;
llvm::Type *Tys[2] = {FTy, InTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvth_n_f16_s16:
Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
@@ -11887,12 +12309,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgtd_f64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
- case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
- case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
- case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
- case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqd_f64:
+ P = llvm::FCmpInst::FCMP_OEQ;
+ break;
+ case NEON::BI__builtin_neon_vcled_f64:
+ P = llvm::FCmpInst::FCMP_OLE;
+ break;
+ case NEON::BI__builtin_neon_vcltd_f64:
+ P = llvm::FCmpInst::FCMP_OLT;
+ break;
+ case NEON::BI__builtin_neon_vcged_f64:
+ P = llvm::FCmpInst::FCMP_OGE;
+ break;
+ case NEON::BI__builtin_neon_vcgtd_f64:
+ P = llvm::FCmpInst::FCMP_OGT;
+ break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
@@ -11910,12 +12343,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgts_f32: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
- case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
- case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
- case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
- case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqs_f32:
+ P = llvm::FCmpInst::FCMP_OEQ;
+ break;
+ case NEON::BI__builtin_neon_vcles_f32:
+ P = llvm::FCmpInst::FCMP_OLE;
+ break;
+ case NEON::BI__builtin_neon_vclts_f32:
+ P = llvm::FCmpInst::FCMP_OLT;
+ break;
+ case NEON::BI__builtin_neon_vcges_f32:
+ P = llvm::FCmpInst::FCMP_OGE;
+ break;
+ case NEON::BI__builtin_neon_vcgts_f32:
+ P = llvm::FCmpInst::FCMP_OGT;
+ break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
@@ -11933,12 +12377,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgth_f16: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
- case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
- case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
- case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
- case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqh_f16:
+ P = llvm::FCmpInst::FCMP_OEQ;
+ break;
+ case NEON::BI__builtin_neon_vcleh_f16:
+ P = llvm::FCmpInst::FCMP_OLE;
+ break;
+ case NEON::BI__builtin_neon_vclth_f16:
+ P = llvm::FCmpInst::FCMP_OLT;
+ break;
+ case NEON::BI__builtin_neon_vcgeh_f16:
+ P = llvm::FCmpInst::FCMP_OGE;
+ break;
+ case NEON::BI__builtin_neon_vcgth_f16:
+ P = llvm::FCmpInst::FCMP_OGT;
+ break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -11961,17 +12416,36 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcled_s64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
+ default:
+ llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_s64:
- case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
- case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
- case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
- case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
- case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
- case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
- case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
- case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
- case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
+ case NEON::BI__builtin_neon_vceqd_u64:
+ P = llvm::ICmpInst::ICMP_EQ;
+ break;
+ case NEON::BI__builtin_neon_vcgtd_s64:
+ P = llvm::ICmpInst::ICMP_SGT;
+ break;
+ case NEON::BI__builtin_neon_vcgtd_u64:
+ P = llvm::ICmpInst::ICMP_UGT;
+ break;
+ case NEON::BI__builtin_neon_vcltd_s64:
+ P = llvm::ICmpInst::ICMP_SLT;
+ break;
+ case NEON::BI__builtin_neon_vcltd_u64:
+ P = llvm::ICmpInst::ICMP_ULT;
+ break;
+ case NEON::BI__builtin_neon_vcged_u64:
+ P = llvm::ICmpInst::ICMP_UGE;
+ break;
+ case NEON::BI__builtin_neon_vcged_s64:
+ P = llvm::ICmpInst::ICMP_SGE;
+ break;
+ case NEON::BI__builtin_neon_vcled_u64:
+ P = llvm::ICmpInst::ICMP_ULE;
+ break;
+ case NEON::BI__builtin_neon_vcled_s64:
+ P = llvm::ICmpInst::ICMP_SLE;
+ break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
@@ -12114,7 +12588,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
{EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
case NEON::BI__builtin_neon_vfmsh_f16: {
- Value* Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh");
+ Value *Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh");
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return emitCallMaybeConstrainedFPBuiltin(
@@ -12133,27 +12607,29 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
- ProductOps, "vqdmlXl");
+ Ops[1] =
+ EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
+ ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
- ? Intrinsic::aarch64_neon_sqadd
- : Intrinsic::aarch64_neon_sqsub;
+ ? Intrinsic::aarch64_neon_sqadd
+ : Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqshlud_n_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
- Ops, "vqshlu_n");
+ return EmitNeonCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), Ops,
+ "vqshlu_n");
}
case NEON::BI__builtin_neon_vqshld_n_u64:
case NEON::BI__builtin_neon_vqshld_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
- ? Intrinsic::aarch64_neon_uqshl
- : Intrinsic::aarch64_neon_sqshl;
+ ? Intrinsic::aarch64_neon_uqshl
+ : Intrinsic::aarch64_neon_sqshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
@@ -12161,8 +12637,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vrshrd_n_u64:
case NEON::BI__builtin_neon_vrshrd_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
- ? Intrinsic::aarch64_neon_urshl
- : Intrinsic::aarch64_neon_srshl;
+ ? Intrinsic::aarch64_neon_urshl
+ : Intrinsic::aarch64_neon_srshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
Ops[1] = ConstantInt::get(Int64Ty, -SV);
@@ -12171,8 +12647,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vrsrad_n_u64:
case NEON::BI__builtin_neon_vrsrad_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
- ? Intrinsic::aarch64_neon_urshl
- : Intrinsic::aarch64_neon_srshl;
+ ? Intrinsic::aarch64_neon_urshl
+ : Intrinsic::aarch64_neon_srshl;
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
@@ -12188,8 +12664,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vshrd_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateAShr(
- Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
- Amt->getZExtValue())),
+ Ops[0],
+ ConstantInt::get(
+ Int64Ty, std::min(static_cast<uint64_t>(63), Amt->getZExtValue())),
"shrd_n");
}
case NEON::BI__builtin_neon_vshrd_n_u64: {
@@ -12204,8 +12681,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vsrad_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
Ops[1] = Builder.CreateAShr(
- Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
- Amt->getZExtValue())),
+ Ops[1],
+ ConstantInt::get(
+ Int64Ty, std::min(static_cast<uint64_t>(63), Amt->getZExtValue())),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
@@ -12230,8 +12708,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(Ops[2]));
auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
- ProductOps, "vqdmlXl");
+ Ops[1] =
+ EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
+ ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
Ops.pop_back();
@@ -12252,8 +12731,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
ProductOps, "vqdmlXl");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
- ? Intrinsic::aarch64_neon_sqadd
- : Intrinsic::aarch64_neon_sqsub;
+ ? Intrinsic::aarch64_neon_sqadd
+ : Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_lane_s32:
@@ -12321,7 +12800,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
- default: return nullptr;
+ default:
+ return nullptr;
case NEON::BI__builtin_neon_vbsl_v:
case NEON::BI__builtin_neon_vbslq_v: {
llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
@@ -12416,13 +12896,15 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vmull_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
- if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
+ if (Type.isPoly())
+ Int = Intrinsic::aarch64_neon_pmull;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
+ if (Ty->isFPOrFPVectorTy())
+ Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
case NEON::BI__builtin_neon_vmaxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
@@ -12433,7 +12915,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
+ if (Ty->isFPOrFPVectorTy())
+ Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case NEON::BI__builtin_neon_vminh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
@@ -12444,7 +12927,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vabdq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
+ if (Ty->isFPOrFPVectorTy())
+ Int = Intrinsic::aarch64_neon_fabd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
@@ -12453,9 +12937,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
unsigned BitWidth = EltTy->getBitWidth();
auto *ArgTy = llvm::FixedVectorType::get(
llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
- llvm::Type* Tys[2] = { VTy, ArgTy };
- Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
- SmallVector<llvm::Value*, 1> TmpOps;
+ llvm::Type *Tys[2] = {VTy, ArgTy};
+ Int =
+ usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
+ SmallVector<llvm::Value *, 1> TmpOps;
TmpOps.push_back(Ops[1]);
Function *F = CGM.getIntrinsic(Int, Tys);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
@@ -12466,13 +12951,15 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vpminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
+ if (Ty->isFPOrFPVectorTy())
+ Int = Intrinsic::aarch64_neon_fminp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
+ if (Ty->isFPOrFPVectorTy())
+ Int = Intrinsic::aarch64_neon_fmaxp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
@@ -12492,17 +12979,20 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
- Ops, "vrecps");
+ return EmitNeonCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), Ops,
+ "vrecps");
}
case NEON::BI__builtin_neon_vrecpsd_f64:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
- Ops, "vrecps");
+ return EmitNeonCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), Ops,
+ "vrecps");
case NEON::BI__builtin_neon_vrecpsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
- Ops, "vrecps");
+ return EmitNeonCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), Ops,
+ "vrecps");
case NEON::BI__builtin_neon_vqshrun_n_v:
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
@@ -12510,13 +13000,15 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_sqrshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
case NEON::BI__builtin_neon_vqshrn_n_v:
- Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
+ Int =
+ usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
case NEON::BI__builtin_neon_vrshrn_n_v:
Int = Intrinsic::aarch64_neon_rshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
case NEON::BI__builtin_neon_vqrshrn_n_v:
- Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
+ Int = usgn ? Intrinsic::aarch64_neon_uqrshrn
+ : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrndah_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
@@ -12699,8 +13191,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ Int =
+ usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
case NEON::BI__builtin_neon_vcvtm_s16_f16:
@@ -12715,8 +13208,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ Int =
+ usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
case NEON::BI__builtin_neon_vcvtn_s16_f16:
@@ -12731,8 +13225,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ Int =
+ usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
case NEON::BI__builtin_neon_vcvtp_s16_f16:
@@ -12747,8 +13242,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
+ Int =
+ usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
+ llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
}
case NEON::BI__builtin_neon_vmulx_v:
@@ -12822,7 +13318,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12834,7 +13330,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12846,7 +13342,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12858,7 +13354,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12867,7 +13363,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12876,7 +13372,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12885,7 +13381,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12894,7 +13390,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12903,7 +13399,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12912,7 +13408,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12921,7 +13417,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12930,7 +13426,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12939,7 +13435,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -12948,7 +13444,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -12957,7 +13453,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12966,7 +13462,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12975,7 +13471,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -12984,7 +13480,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -12993,7 +13489,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -13002,7 +13498,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -13011,7 +13507,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
@@ -13020,7 +13516,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -13029,7 +13525,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -13038,7 +13534,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -13047,7 +13543,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -13056,7 +13552,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -13065,7 +13561,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -13074,7 +13570,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
VTy = llvm::FixedVectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
@@ -13088,7 +13584,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -13097,7 +13593,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
@@ -13105,7 +13601,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -13114,7 +13610,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
@@ -13122,7 +13618,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -13131,7 +13627,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
@@ -13139,7 +13635,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
@@ -13148,7 +13644,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
+ llvm::Type *Tys[2] = {Ty, VTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
@@ -13172,10 +13668,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v: {
Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
- SmallVector<llvm::Value*,2> TmpOps;
+ SmallVector<llvm::Value *, 2> TmpOps;
TmpOps.push_back(Ops[1]);
TmpOps.push_back(Ops[2]);
- Function* F = CGM.getIntrinsic(Int, Ty);
+ Function *F = CGM.getIntrinsic(Int, Ty);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return Builder.CreateAdd(Ops[0], tmp);
@@ -13271,7 +13767,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v: {
- llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+ llvm::Type *Tys[2] = {VTy, Ops[1]->getType()};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -13282,7 +13778,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v: {
- llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+ llvm::Type *Tys[2] = {VTy, Ops[1]->getType()};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -13294,7 +13790,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
- llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
+ llvm::Type *Tys[2] = {VTy, Ops[1]->getType()};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -13308,45 +13804,45 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
- llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
- Ops, "");
+ llvm::Type *Tys[2] = {VTy, Ops[2]->getType()};
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), Ops,
+ "");
}
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
- llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
+ llvm::Type *Tys[2] = {VTy, Ops[3]->getType()};
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
- llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
- Ops, "");
+ llvm::Type *Tys[2] = {VTy, Ops[3]->getType()};
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), Ops,
+ "");
}
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
- llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
+ llvm::Type *Tys[2] = {VTy, Ops[4]->getType()};
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
- llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
- Ops, "");
+ llvm::Type *Tys[2] = {VTy, Ops[4]->getType()};
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), Ops,
+ "");
}
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
- llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
+ llvm::Type *Tys[2] = {VTy, Ops[5]->getType()};
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
Ops, "");
}
@@ -13359,8 +13855,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(i+vi);
- Indices.push_back(i+e+vi);
+ Indices.push_back(i + vi);
+ Indices.push_back(i + e + vi);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
@@ -13377,7 +13873,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
- Indices.push_back(2*i+vi);
+ Indices.push_back(2 * i + vi);
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
@@ -13394,8 +13890,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<int, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back((i + vi*e) >> 1);
- Indices.push_back(((i + vi*e) >> 1)+e);
+ Indices.push_back((i + vi * e) >> 1);
+ Indices.push_back(((i + vi * e) >> 1) + e);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
@@ -13404,36 +13900,36 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return SV;
}
case NEON::BI__builtin_neon_vqtbl1q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
- Ops, "vtbl1");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), Ops,
+ "vtbl1");
}
case NEON::BI__builtin_neon_vqtbl2q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
- Ops, "vtbl2");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), Ops,
+ "vtbl2");
}
case NEON::BI__builtin_neon_vqtbl3q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
- Ops, "vtbl3");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), Ops,
+ "vtbl3");
}
case NEON::BI__builtin_neon_vqtbl4q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
- Ops, "vtbl4");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), Ops,
+ "vtbl4");
}
case NEON::BI__builtin_neon_vqtbx1q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
- Ops, "vtbx1");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), Ops,
+ "vtbx1");
}
case NEON::BI__builtin_neon_vqtbx2q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
- Ops, "vtbx2");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), Ops,
+ "vtbx2");
}
case NEON::BI__builtin_neon_vqtbx3q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
- Ops, "vtbx3");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), Ops,
+ "vtbx3");
}
case NEON::BI__builtin_neon_vqtbx4q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
- Ops, "vtbx4");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), Ops,
+ "vtbx4");
}
case NEON::BI__builtin_neon_vsqadd_v:
case NEON::BI__builtin_neon_vsqaddq_v: {
@@ -13662,8 +14158,7 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
}
}
-llvm::Value *CodeGenFunction::
-BuildVector(ArrayRef<llvm::Value*> Ops) {
+llvm::Value *CodeGenFunction::BuildVector(ArrayRef<llvm::Value *> Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
@@ -13672,7 +14167,7 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
- SmallVector<llvm::Constant*, 16> CstOps;
+ SmallVector<llvm::Constant *, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return llvm::ConstantVector::get(CstOps);
@@ -13731,21 +14226,19 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
}
-static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops) {
+static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
Value *Ptr = Ops[0];
Value *MaskVec = getMaskVecValue(
CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
- llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
- ResultTy);
- return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
+ llvm::Function *F =
+ CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, ResultTy);
+ return CGF.Builder.CreateCall(F, {Ptr, MaskVec, Ops[1]});
}
-static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops,
+static Value *EmitX86CompressExpand(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
bool IsCompress) {
auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
@@ -13754,7 +14247,7 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
- return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
+ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], MaskVec});
}
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
@@ -13764,14 +14257,13 @@ static Value *EmitX86CompressStore(CodeGenFunction &CGF,
Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
- llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
- ResultTy);
- return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
+ llvm::Function *F =
+ CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, ResultTy);
+ return CGF.Builder.CreateCall(F, {Ops[1], Ptr, MaskVec});
}
static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
- ArrayRef<Value *> Ops,
- bool InvertLHS = false) {
+ ArrayRef<Value *> Ops, bool InvertLHS = false) {
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
@@ -13841,8 +14333,8 @@ static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
return Res;
}
-static Value *EmitX86Select(CodeGenFunction &CGF,
- Value *Mask, Value *Op0, Value *Op1) {
+static Value *EmitX86Select(CodeGenFunction &CGF, Value *Mask, Value *Op0,
+ Value *Op1) {
// If the mask is all ones just return first argument.
if (const auto *C = dyn_cast<Constant>(Mask))
@@ -13855,8 +14347,8 @@ static Value *EmitX86Select(CodeGenFunction &CGF,
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
-static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
- Value *Mask, Value *Op0, Value *Op1) {
+static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, Value *Mask, Value *Op0,
+ Value *Op1) {
// If the mask is all ones just return first argument.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
@@ -13887,9 +14379,8 @@ static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
}
- return CGF.Builder.CreateBitCast(Cmp,
- IntegerType::get(CGF.getLLVMContext(),
- std::max(NumElts, 8U)));
+ return CGF.Builder.CreateBitCast(
+ Cmp, IntegerType::get(CGF.getLLVMContext(), std::max(NumElts, 8U)));
}
static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
@@ -13909,13 +14400,26 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
} else {
ICmpInst::Predicate Pred;
switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case 0: Pred = ICmpInst::ICMP_EQ; break;
- case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
- case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
- case 4: Pred = ICmpInst::ICMP_NE; break;
- case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
- case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
+ default:
+ llvm_unreachable("Unknown condition code");
+ case 0:
+ Pred = ICmpInst::ICMP_EQ;
+ break;
+ case 1:
+ Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ break;
+ case 2:
+ Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
+ break;
+ case 4:
+ Pred = ICmpInst::ICMP_NE;
+ break;
+ case 5:
+ Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
+ break;
+ case 6:
+ Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ break;
}
Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
}
@@ -13929,7 +14433,7 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
Value *Zero = Constant::getNullValue(In->getType());
- return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
+ return EmitX86MaskedCompare(CGF, 1, true, {In, Zero});
}
static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
@@ -13941,8 +14445,8 @@ static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
if (Rnd != 4) {
Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
: Intrinsic::x86_avx512_uitofp_round;
- Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
- Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
+ Function *F = CGF.CGM.getIntrinsic(IID, {Ty, Ops[0]->getType()});
+ Res = CGF.Builder.CreateCall(F, {Ops[0], Ops[3]});
} else {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
@@ -13960,7 +14464,8 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
bool Subtract = false;
Intrinsic::ID IID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- default: break;
+ default:
+ break;
case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
Subtract = true;
[[fallthrough]];
@@ -13983,14 +14488,16 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
+ IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512;
+ break;
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
Subtract = true;
[[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
+ IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512;
+ break;
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
Subtract = true;
[[fallthrough]];
@@ -14071,7 +14578,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
(cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
IsAddSub)) {
Function *Intr = CGF.CGM.getIntrinsic(IID);
- Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
+ Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back()});
} else {
llvm::Type *Ty = A->getType();
Function *FMA;
@@ -14194,8 +14701,8 @@ static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
}
// If we have more than 3 arguments, we need to do masking.
if (Ops.size() > 3) {
- Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
- : Ops[PTIdx];
+ Value *PassThru =
+ ZeroMask ? Constant::getNullValue(Res->getType()) : Ops[PTIdx];
// If we negated the accumulator and the its the PassThru value we need to
// bypass the negate. Conveniently Upper should be the same thing in this
@@ -14259,8 +14766,8 @@ static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
else
llvm_unreachable("Unexpected intrinsic");
- Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
- Ops.drop_back());
+ Value *Ternlog =
+ CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), Ops.drop_back());
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
}
@@ -14356,12 +14863,11 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, Index)};
llvm::Value *CpuValue = Builder.CreateInBoundsGEP(STy, CpuModel, Idxs);
- CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
- CharUnits::fromQuantity(4));
+ CpuValue =
+ Builder.CreateAlignedLoad(Int32Ty, CpuValue, CharUnits::fromQuantity(4));
// Check the value of the field against the requested value.
- return Builder.CreateICmpEQ(CpuValue,
- llvm::ConstantInt::get(Int32Ty, Value));
+ return Builder.CreateICmpEQ(CpuValue, llvm::ConstantInt::get(Int32Ty, Value));
}
Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
@@ -14578,7 +15084,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
if (std::optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
- SmallVector<Value*, 4> Ops;
+ SmallVector<Value *, 4> Ops;
bool IsMaskFCmp = false;
bool IsConjFMA = false;
@@ -14624,7 +15130,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
};
switch (BuiltinID) {
- default: return nullptr;
+ default:
+ return nullptr;
case X86::BI_mm_prefetch: {
Value *Address = Ops[0];
ConstantInt *C = cast<ConstantInt>(Ops[1]);
@@ -14744,32 +15251,33 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_xsetbv:
case X86::BI_xsetbv: {
Intrinsic::ID ID;
-#define INTRINSIC_X86_XSAVE_ID(NAME) \
- case X86::BI__builtin_ia32_##NAME: \
- ID = Intrinsic::x86_##NAME; \
- break
+#define INTRINSIC_X86_XSAVE_ID(NAME) \
+ case X86::BI__builtin_ia32_##NAME: \
+ ID = Intrinsic::x86_##NAME; \
+ break
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- INTRINSIC_X86_XSAVE_ID(xsave);
- INTRINSIC_X86_XSAVE_ID(xsave64);
- INTRINSIC_X86_XSAVE_ID(xrstor);
- INTRINSIC_X86_XSAVE_ID(xrstor64);
- INTRINSIC_X86_XSAVE_ID(xsaveopt);
- INTRINSIC_X86_XSAVE_ID(xsaveopt64);
- INTRINSIC_X86_XSAVE_ID(xrstors);
- INTRINSIC_X86_XSAVE_ID(xrstors64);
- INTRINSIC_X86_XSAVE_ID(xsavec);
- INTRINSIC_X86_XSAVE_ID(xsavec64);
- INTRINSIC_X86_XSAVE_ID(xsaves);
- INTRINSIC_X86_XSAVE_ID(xsaves64);
- INTRINSIC_X86_XSAVE_ID(xsetbv);
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
+ INTRINSIC_X86_XSAVE_ID(xsave);
+ INTRINSIC_X86_XSAVE_ID(xsave64);
+ INTRINSIC_X86_XSAVE_ID(xrstor);
+ INTRINSIC_X86_XSAVE_ID(xrstor64);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt64);
+ INTRINSIC_X86_XSAVE_ID(xrstors);
+ INTRINSIC_X86_XSAVE_ID(xrstors64);
+ INTRINSIC_X86_XSAVE_ID(xsavec);
+ INTRINSIC_X86_XSAVE_ID(xsavec64);
+ INTRINSIC_X86_XSAVE_ID(xsaves);
+ INTRINSIC_X86_XSAVE_ID(xsaves64);
+ INTRINSIC_X86_XSAVE_ID(xsetbv);
case X86::BI_xsetbv:
ID = Intrinsic::x86_xsetbv;
break;
}
#undef INTRINSIC_X86_XSAVE_ID
Value *Mhi = Builder.CreateTrunc(
- Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
+ Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
Ops[1] = Mhi;
Ops.push_back(Mlo);
@@ -15074,7 +15582,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_expandqi128_mask:
case X86::BI__builtin_ia32_expandqi256_mask:
case X86::BI__builtin_ia32_expandqi512_mask:
- return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
+ return EmitX86CompressExpand(*this, Ops, /*IsCompress*/ false);
case X86::BI__builtin_ia32_compressdf128_mask:
case X86::BI__builtin_ia32_compressdf256_mask:
@@ -15094,7 +15602,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_compressqi128_mask:
case X86::BI__builtin_ia32_compressqi256_mask:
case X86::BI__builtin_ia32_compressqi512_mask:
- return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
+ return EmitX86CompressExpand(*this, Ops, /*IsCompress*/ true);
case X86::BI__builtin_ia32_gather3div2df:
case X86::BI__builtin_ia32_gather3div2di:
@@ -15122,7 +15630,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_gatherdiv16si: {
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unexpected builtin");
+ default:
+ llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_gather3div2df:
IID = Intrinsic::x86_avx512_mask_gather3div2_df;
break;
@@ -15231,7 +15740,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_scattersiv8si: {
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unexpected builtin");
+ default:
+ llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_scattersiv8df:
IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
break;
@@ -15409,7 +15919,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pmovdb512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_db_512;
break;
@@ -15675,14 +16186,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
else
OutOps[l] = Ops[0];
- for (unsigned i = 0; i != NumElts/2; ++i) {
+ for (unsigned i = 0; i != NumElts / 2; ++i) {
// Start with ith element of the source for this lane.
unsigned Idx = (l * NumElts) + i;
// If bit 0 of the immediate half is set, switch to the high half of
// the source.
if (Imm & (1 << (l * 4)))
- Idx += NumElts/2;
- Indices[(l * (NumElts/2)) + i] = Idx;
+ Idx += NumElts / 2;
+ Indices[(l * (NumElts / 2)) + i] = Idx;
}
}
@@ -15707,7 +16218,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = NumElts + i - ShiftVal;
- if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
+ if (Idx < NumElts)
+ Idx -= NumElts - 16; // end of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
@@ -15736,7 +16248,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = i + ShiftVal;
- if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
+ if (Idx >= 16)
+ Idx += NumElts - 16; // end of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
@@ -15950,7 +16463,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_ktestzdi: {
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_ktestcqi:
IID = Intrinsic::x86_avx512_ktestc_b;
break;
@@ -15990,7 +16504,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_kadddi: {
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_kaddqi:
IID = Intrinsic::x86_avx512_kadd_b;
break;
@@ -16036,15 +16551,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_kxorhi:
case X86::BI__builtin_ia32_kxorsi:
case X86::BI__builtin_ia32_kxordi:
- return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
+ return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
case X86::BI__builtin_ia32_knotqi:
case X86::BI__builtin_ia32_knothi:
case X86::BI__builtin_ia32_knotsi:
case X86::BI__builtin_ia32_knotdi: {
unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
- return Builder.CreateBitCast(Builder.CreateNot(Res),
- Ops[0]->getType());
+ return Builder.CreateBitCast(Builder.CreateNot(Res), Ops[0]->getType());
}
case X86::BI__builtin_ia32_kmovb:
case X86::BI__builtin_ia32_kmovw:
@@ -16086,7 +16600,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vplzcntq_256:
case X86::BI__builtin_ia32_vplzcntq_512: {
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
@@ -16191,12 +16705,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
case X86::BI__builtin_ia32_pmuludq512:
- return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
+ return EmitX86Muldq(*this, /*IsSigned*/ false, Ops);
case X86::BI__builtin_ia32_pmuldq128:
case X86::BI__builtin_ia32_pmuldq256:
case X86::BI__builtin_ia32_pmuldq512:
- return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
+ return EmitX86Muldq(*this, /*IsSigned*/ true, Ops);
case X86::BI__builtin_ia32_pternlogd512_mask:
case X86::BI__builtin_ia32_pternlogq512_mask:
@@ -16204,7 +16718,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogq128_mask:
case X86::BI__builtin_ia32_pternlogq256_mask:
- return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
+ return EmitX86Ternlog(*this, /*ZeroMask*/ false, Ops);
case X86::BI__builtin_ia32_pternlogd512_maskz:
case X86::BI__builtin_ia32_pternlogq512_maskz:
@@ -16212,7 +16726,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pternlogd256_maskz:
case X86::BI__builtin_ia32_pternlogq128_maskz:
case X86::BI__builtin_ia32_pternlogq256_maskz:
- return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
+ return EmitX86Ternlog(*this, /*ZeroMask*/ true, Ops);
case X86::BI__builtin_ia32_vpshldd128:
case X86::BI__builtin_ia32_vpshldd256:
@@ -16314,7 +16828,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_rdseed64_step: {
Intrinsic::ID ID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_rdrand16_step:
ID = Intrinsic::x86_rdrand_16;
break;
@@ -16346,7 +16861,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_subborrow_u64: {
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_addcarryx_u32:
IID = Intrinsic::x86_addcarry_32;
break;
@@ -16361,8 +16877,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
- Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
- { Ops[0], Ops[1], Ops[2] });
+ Value *Call =
+ Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
Ops[3]);
return Builder.CreateExtractValue(Call, 0);
@@ -16387,7 +16903,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vfpclasspbf16128_mask:
ID = Intrinsic::x86_avx10_fpclass_nepbf16_128;
break;
@@ -16441,7 +16958,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vp2intersect_q_512:
ID = Intrinsic::x86_avx512_vp2intersect_q_512;
break;
@@ -16477,7 +16995,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vpmultishiftqb512: {
Intrinsic::ID ID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vpmultishiftqb128:
ID = Intrinsic::x86_avx512_pmultishift_qb_128;
break;
@@ -16502,7 +17021,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
break;
@@ -16521,28 +17041,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
- return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
+ return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/ false);
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpltpd:
- return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
+ return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/ true);
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmplepd:
- return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
+ return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/ true);
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpunordpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
+ return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/ false);
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpneqpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
+ return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/ false);
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnltpd:
- return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
+ return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/ true);
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpnlepd:
- return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
+ return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/ true);
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
- return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
+ return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/ false);
case X86::BI__builtin_ia32_cmpph128_mask:
case X86::BI__builtin_ia32_cmpph256_mask:
case X86::BI__builtin_ia32_cmpph512_mask:
@@ -16581,23 +17101,72 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
// behavior is inverted. We'll handle that after the switch.
switch (CC & 0xf) {
- case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
- case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
- case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
- case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
- case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
- case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
- case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
- case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
- case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
- case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
- case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
- case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
- case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
- case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
- case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
- case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
- default: llvm_unreachable("Unhandled CC");
+ case 0x00:
+ Pred = FCmpInst::FCMP_OEQ;
+ IsSignaling = false;
+ break;
+ case 0x01:
+ Pred = FCmpInst::FCMP_OLT;
+ IsSignaling = true;
+ break;
+ case 0x02:
+ Pred = FCmpInst::FCMP_OLE;
+ IsSignaling = true;
+ break;
+ case 0x03:
+ Pred = FCmpInst::FCMP_UNO;
+ IsSignaling = false;
+ break;
+ case 0x04:
+ Pred = FCmpInst::FCMP_UNE;
+ IsSignaling = false;
+ break;
+ case 0x05:
+ Pred = FCmpInst::FCMP_UGE;
+ IsSignaling = true;
+ break;
+ case 0x06:
+ Pred = FCmpInst::FCMP_UGT;
+ IsSignaling = true;
+ break;
+ case 0x07:
+ Pred = FCmpInst::FCMP_ORD;
+ IsSignaling = false;
+ break;
+ case 0x08:
+ Pred = FCmpInst::FCMP_UEQ;
+ IsSignaling = false;
+ break;
+ case 0x09:
+ Pred = FCmpInst::FCMP_ULT;
+ IsSignaling = true;
+ break;
+ case 0x0a:
+ Pred = FCmpInst::FCMP_ULE;
+ IsSignaling = true;
+ break;
+ case 0x0b:
+ Pred = FCmpInst::FCMP_FALSE;
+ IsSignaling = false;
+ break;
+ case 0x0c:
+ Pred = FCmpInst::FCMP_ONE;
+ IsSignaling = false;
+ break;
+ case 0x0d:
+ Pred = FCmpInst::FCMP_OGE;
+ IsSignaling = true;
+ break;
+ case 0x0e:
+ Pred = FCmpInst::FCMP_OGT;
+ IsSignaling = true;
+ break;
+ case 0x0f:
+ Pred = FCmpInst::FCMP_TRUE;
+ IsSignaling = false;
+ break;
+ default:
+ llvm_unreachable("Unhandled CC");
}
// Invert the signalling behavior for 16-31.
@@ -16615,7 +17184,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unexpected builtin");
+ default:
+ llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_cmpps:
IID = Intrinsic::x86_sse_cmp_ps;
break;
@@ -16748,7 +17318,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
Intrinsic::ID IID;
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
break;
@@ -17166,7 +17737,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
};
switch (BuiltinID) {
- default: return nullptr;
+ default:
+ return nullptr;
case Builtin::BI__builtin_cpu_is: {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
@@ -17218,13 +17790,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
unsigned>
CPUSupportType;
std::tie(SupportMethod, FieldIdx, Mask, CompOp, Value) =
- static_cast<CPUSupportType>(StringSwitch<CPUSupportType>(CPUStr)
+ static_cast<CPUSupportType>(
+ StringSwitch<CPUSupportType>(CPUStr)
#define PPC_AIX_FEATURE(NAME, DESC, SUPPORT_METHOD, INDEX, MASK, COMP_OP, \
VALUE) \
.Case(NAME, {SUPPORT_METHOD, INDEX, MASK, COMP_OP, VALUE})
#include "llvm/TargetParser/PPCTargetParser.def"
- .Default({BUILTIN_PPC_FALSE, 0, 0,
- CmpInst::Predicate(), 0}));
+ .Default({BUILTIN_PPC_FALSE, 0, 0, CmpInst::Predicate(), 0}));
return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, Mask, CompOp,
Value);
}
@@ -17270,8 +17842,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_lxvd2x_be:
case PPC::BI__builtin_vsx_lxvw4x_be:
case PPC::BI__builtin_vsx_lxvl:
- case PPC::BI__builtin_vsx_lxvll:
- {
+ case PPC::BI__builtin_vsx_lxvll: {
SmallVector<Value *, 2> Ops;
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops.push_back(EmitScalarExpr(E->getArg(1)));
@@ -17282,7 +17853,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
+ default:
+ llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
case PPC::BI__builtin_altivec_lvx:
ID = Intrinsic::ppc_altivec_lvx;
break;
@@ -17338,8 +17910,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_stxvd2x_be:
case PPC::BI__builtin_vsx_stxvw4x_be:
case PPC::BI__builtin_vsx_stxvl:
- case PPC::BI__builtin_vsx_stxvll:
- {
+ case PPC::BI__builtin_vsx_stxvll: {
SmallVector<Value *, 3> Ops;
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops.push_back(EmitScalarExpr(E->getArg(1)));
@@ -17351,7 +17922,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported st intrinsic!");
+ default:
+ llvm_unreachable("Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
@@ -17426,7 +17998,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Op0 = IsLE ? HiLd : LoLd;
Op1 = IsLE ? LoLd : HiLd;
Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1");
- Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
+ Constant *Zero =
+ llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
if (IsLE) {
SmallVector<int, 16> Consts;
@@ -17726,8 +18299,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
+ Result =
+ Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
return Result;
}
case PPC::BI__builtin_ppc_cmpb: {
@@ -17903,39 +18476,38 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
else
F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
switch (BuiltinID) {
- case PPC::BI__builtin_vsx_xvmaddadp:
- case PPC::BI__builtin_vsx_xvmaddasp:
- if (Builder.getIsFPConstrained())
- return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
- else
- return Builder.CreateCall(F, {X, Y, Z});
- case PPC::BI__builtin_vsx_xvnmaddadp:
- case PPC::BI__builtin_vsx_xvnmaddasp:
- if (Builder.getIsFPConstrained())
- return Builder.CreateFNeg(
- Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
- else
- return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
- case PPC::BI__builtin_vsx_xvmsubadp:
- case PPC::BI__builtin_vsx_xvmsubasp:
- if (Builder.getIsFPConstrained())
- return Builder.CreateConstrainedFPCall(
- F, {X, Y, Builder.CreateFNeg(Z, "neg")});
- else
- return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
- case PPC::BI__builtin_ppc_fnmsub:
- case PPC::BI__builtin_ppc_fnmsubs:
- case PPC::BI__builtin_vsx_xvnmsubadp:
- case PPC::BI__builtin_vsx_xvnmsubasp:
- if (Builder.getIsFPConstrained())
- return Builder.CreateFNeg(
- Builder.CreateConstrainedFPCall(
- F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
- "neg");
- else
- return Builder.CreateCall(
- CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z});
- }
+ case PPC::BI__builtin_vsx_xvmaddadp:
+ case PPC::BI__builtin_vsx_xvmaddasp:
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
+ else
+ return Builder.CreateCall(F, {X, Y, Z});
+ case PPC::BI__builtin_vsx_xvnmaddadp:
+ case PPC::BI__builtin_vsx_xvnmaddasp:
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}),
+ "neg");
+ else
+ return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
+ case PPC::BI__builtin_vsx_xvmsubadp:
+ case PPC::BI__builtin_vsx_xvmsubasp:
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ else
+ return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ case PPC::BI__builtin_ppc_fnmsub:
+ case PPC::BI__builtin_ppc_fnmsubs:
+ case PPC::BI__builtin_vsx_xvnmsubadp:
+ case PPC::BI__builtin_vsx_xvnmsubasp:
+ if (Builder.getIsFPConstrained())
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
+ "neg");
+ else
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z});
+ }
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
}
@@ -18115,90 +18687,90 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// use custom code generation to expand a builtin call with a pointer to a
// load (if the corresponding instruction accumulates its result) followed by
// the call to the intrinsic and a store of the result.
-#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
+#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
case PPC::BI__builtin_##Name:
#include "clang/Basic/BuiltinsPPC.def"
- {
- SmallVector<Value *, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- if (E->getArg(i)->getType()->isArrayType())
- Ops.push_back(
- EmitArrayToPointerDecay(E->getArg(i)).emitRawPointer(*this));
- else
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- // The first argument of these two builtins is a pointer used to store their
- // result. However, the llvm intrinsics return their result in multiple
- // return values. So, here we emit code extracting these values from the
- // intrinsic results and storing them using that pointer.
- if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
- BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
- BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
- unsigned NumVecs = 2;
- auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
- if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
- NumVecs = 4;
- Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
+ {
+ SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ if (E->getArg(i)->getType()->isArrayType())
+ Ops.push_back(
+ EmitArrayToPointerDecay(E->getArg(i)).emitRawPointer(*this));
+ else
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // The first argument of these two builtins is a pointer used to store
+ // their result. However, the llvm intrinsics return their result in
+ // multiple return values. So, here we emit code extracting these values
+ // from the intrinsic results and storing them using that pointer.
+ if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
+ BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
+ BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
+ unsigned NumVecs = 2;
+ auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
+ if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
+ NumVecs = 4;
+ Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
+ }
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic);
+ Address Addr = EmitPointerWithAlignment(E->getArg(1));
+ Value *Vec = Builder.CreateLoad(Addr);
+ Value *Call = Builder.CreateCall(F, {Vec});
+ llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
+ Value *Ptr = Ops[0];
+ for (unsigned i = 0; i < NumVecs; i++) {
+ Value *Vec = Builder.CreateExtractValue(Call, i);
+ llvm::ConstantInt *Index = llvm::ConstantInt::get(IntTy, i);
+ Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
+ Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
+ }
+ return Call;
}
- llvm::Function *F = CGM.getIntrinsic(Intrinsic);
- Address Addr = EmitPointerWithAlignment(E->getArg(1));
- Value *Vec = Builder.CreateLoad(Addr);
- Value *Call = Builder.CreateCall(F, {Vec});
- llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- Value *Ptr = Ops[0];
- for (unsigned i=0; i<NumVecs; i++) {
- Value *Vec = Builder.CreateExtractValue(Call, i);
- llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
- Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
- Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
+ if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
+ BuiltinID == PPC::BI__builtin_mma_build_acc) {
+ // Reverse the order of the operands for LE, so the
+ // same builtin call can be used on both LE and BE
+ // without the need for the programmer to swap operands.
+ // The operands are reversed starting from the second argument,
+ // the first operand is the pointer to the pair/accumulator
+ // that is being built.
+ if (getTarget().isLittleEndian())
+ std::reverse(Ops.begin() + 1, Ops.end());
+ }
+ bool Accumulate;
+ switch (BuiltinID) {
+#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
+ case PPC::BI__builtin_##Name: \
+ ID = Intrinsic::ppc_##Intr; \
+ Accumulate = Acc; \
+ break;
+#include "clang/Basic/BuiltinsPPC.def"
}
- return Call;
- }
- if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
- BuiltinID == PPC::BI__builtin_mma_build_acc) {
- // Reverse the order of the operands for LE, so the
- // same builtin call can be used on both LE and BE
- // without the need for the programmer to swap operands.
- // The operands are reversed starting from the second argument,
- // the first operand is the pointer to the pair/accumulator
- // that is being built.
- if (getTarget().isLittleEndian())
- std::reverse(Ops.begin() + 1, Ops.end());
- }
- bool Accumulate;
- switch (BuiltinID) {
- #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
- case PPC::BI__builtin_##Name: \
- ID = Intrinsic::ppc_##Intr; \
- Accumulate = Acc; \
- break;
- #include "clang/Basic/BuiltinsPPC.def"
- }
- if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
- BuiltinID == PPC::BI__builtin_vsx_stxvp ||
- BuiltinID == PPC::BI__builtin_mma_lxvp ||
- BuiltinID == PPC::BI__builtin_mma_stxvp) {
if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
- BuiltinID == PPC::BI__builtin_mma_lxvp) {
- Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
- } else {
- Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
+ BuiltinID == PPC::BI__builtin_vsx_stxvp ||
+ BuiltinID == PPC::BI__builtin_mma_lxvp ||
+ BuiltinID == PPC::BI__builtin_mma_stxvp) {
+ if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
+ BuiltinID == PPC::BI__builtin_mma_lxvp) {
+ Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
+ } else {
+ Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
+ }
+ Ops.pop_back();
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
}
- Ops.pop_back();
+ SmallVector<Value *, 4> CallOps;
+ if (Accumulate) {
+ Address Addr = EmitPointerWithAlignment(E->getArg(0));
+ Value *Acc = Builder.CreateLoad(Addr);
+ CallOps.push_back(Acc);
+ }
+ for (unsigned i = 1; i < Ops.size(); i++)
+ CallOps.push_back(Ops[i]);
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, "");
- }
- SmallVector<Value*, 4> CallOps;
- if (Accumulate) {
- Address Addr = EmitPointerWithAlignment(E->getArg(0));
- Value *Acc = Builder.CreateLoad(Addr);
- CallOps.push_back(Acc);
+ Value *Call = Builder.CreateCall(F, CallOps);
+ return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
}
- for (unsigned i=1; i<Ops.size(); i++)
- CallOps.push_back(Ops[i]);
- llvm::Function *F = CGM.getIntrinsic(ID);
- Value *Call = Builder.CreateCall(F, CallOps);
- return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
- }
case PPC::BI__builtin_ppc_compare_and_swap:
case PPC::BI__builtin_ppc_compare_and_swaplp: {
@@ -18485,8 +19057,8 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
}
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
- llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
- APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
+ llvm::MDNode *RNode = MDHelper.createRange(
+ APInt(16, 1), APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
LD->setMetadata(llvm::LLVMContext::MD_noundef,
llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
@@ -18716,8 +19288,8 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getFracIntrinsic(),
ArrayRef<Value *>{Op0}, nullptr, "hlsl.frac");
-}
-case Builtin::BI__builtin_hlsl_elementwise_isinf: {
+ }
+ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
llvm::Type *Xty = Op0->getType();
llvm::Type *retType = llvm::Type::getInt1Ty(this->getLLVMContext());
@@ -18846,6 +19418,40 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
retType, CGM.getHLSLRuntime().getSignIntrinsic(),
ArrayRef<Value *>{Op0}, nullptr, "hlsl.sign");
}
+ // This should only be called when targeting DXIL
+ case Builtin::BI__builtin_hlsl_asuint_splitdouble: {
+
+ assert((E->getArg(0)->getType()->isDoubleType() ||
+ E->getArg(1)->getType()->isUnsignedIntegerType() ||
+ E->getArg(2)->getType()->isUnsignedIntegerType()) &&
+ "asuint operands types mismatch");
+
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *retType = llvm::StructType::get(Int32Ty, Int32Ty);
+ if (Op0->getType()->isVectorTy()) {
+ auto *XVecTy = E->getArg(0)->getType()->getAs<VectorType>();
+
+ llvm::VectorType *i32VecTy = llvm::VectorType::get(
+ Int32Ty, ElementCount::getFixed(XVecTy->getNumElements()));
+
+ retType = llvm::StructType::get(i32VecTy, i32VecTy);
+ }
+
+ auto ptr = CreateMemTemp(E->getArg(1)->getType());
+ EmitAnyExprToMem(E->getArg(1), ptr, Qualifiers(), /*init*/ true);
+ Address x = EmitPointerWithAlignment(E->getArg(1));
+
+ CallInst *CI =
+ Builder.CreateIntrinsic(retType, llvm::Intrinsic::dx_asuint_splitdouble,
+ {Op0}, nullptr, "hlsl.asuint");
+
+ // Value* arg1 = Builder.CreateExtractValue(CI, 1);
+
+ // Address y = EmitPointerWithAlignment(E->getArg(2));
+ // Builder.CreateLoad(ptr);
+ // return Builder.CreateStore(arg1, ptr);
+ }
}
return nullptr;
}
@@ -18889,8 +19495,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Z = EmitScalarExpr(E->getArg(2));
- llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
- X->getType());
+ llvm::Function *Callee =
+ CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, X->getType());
llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
@@ -18910,8 +19516,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
- Src0->getType());
+ llvm::Function *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, Src0->getType());
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
}
@@ -19024,13 +19630,13 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
- { Builder.getInt32Ty(), Src0->getType() });
+ {Builder.getInt32Ty(), Src0->getType()});
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
- { Builder.getInt16Ty(), Src0->getType() });
+ {Builder.getInt16Ty(), Src0->getType()});
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_fract:
@@ -19051,8 +19657,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
llvm::Type *ResultType = ConvertType(E->getType());
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, { ResultType });
- return Builder.CreateCall(F, { Src });
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, {ResultType});
+ return Builder.CreateCall(F, {Src});
}
case AMDGPU::BI__builtin_amdgcn_uicmp:
case AMDGPU::BI__builtin_amdgcn_uicmpl:
@@ -19064,8 +19670,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
// FIXME-GFX10: How should 32 bit mask be handled?
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
- { Builder.getInt64Ty(), Src0->getType() });
- return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ {Builder.getInt64Ty(), Src0->getType()});
+ return Builder.CreateCall(F, {Src0, Src1, Src2});
}
case AMDGPU::BI__builtin_amdgcn_fcmp:
case AMDGPU::BI__builtin_amdgcn_fcmpf: {
@@ -19075,8 +19681,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
// FIXME-GFX10: How should 32 bit mask be handled?
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
- { Builder.getInt64Ty(), Src0->getType() });
- return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ {Builder.getInt64Ty(), Src0->getType()});
+ return Builder.CreateCall(F, {Src0, Src1, Src2});
}
case AMDGPU::BI__builtin_amdgcn_class:
case AMDGPU::BI__builtin_amdgcn_classf:
@@ -19088,11 +19694,12 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Intrinsic::amdgcn_fmed3);
case AMDGPU::BI__builtin_amdgcn_ds_append:
case AMDGPU::BI__builtin_amdgcn_ds_consume: {
- Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
- Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
+ Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append
+ ? Intrinsic::amdgcn_ds_append
+ : Intrinsic::amdgcn_ds_consume;
Value *Src0 = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
- return Builder.CreateCall(F, { Src0, Builder.getFalse() });
+ Function *F = CGM.getIntrinsic(Intrin, {Src0->getType()});
+ return Builder.CreateCall(F, {Src0, Builder.getFalse()});
}
case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
@@ -19176,8 +19783,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Value *Rtn = Builder.CreateExtractValue(Call, 0);
Value *A = Builder.CreateExtractValue(Call, 1);
llvm::Type *RetTy = ConvertType(E->getType());
- Value *I0 = Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn,
- (uint64_t)0);
+ Value *I0 =
+ Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn, (uint64_t)0);
return Builder.CreateInsertElement(I0, A, 1);
}
@@ -19449,7 +20056,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
- return Builder.CreateCall(F, { Src0, Src1, Src2 });
+ return Builder.CreateCall(F, {Src0, Src1, Src2});
}
case AMDGPU::BI__builtin_amdgcn_fence: {
ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
@@ -19694,10 +20301,10 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Data, Address});
}
- // Vector builtins. Note that most vector builtins are mapped automatically
- // to target-specific LLVM intrinsics. The ones handled specially here can
- // be represented via standard LLVM IR, which is preferable to enable common
- // LLVM optimizations.
+ // Vector builtins. Note that most vector builtins are mapped automatically
+ // to target-specific LLVM intrinsics. The ones handled specially here can
+ // be represented via standard LLVM IR, which is preferable to enable common
+ // LLVM optimizations.
case SystemZ::BI__builtin_s390_vpopctb:
case SystemZ::BI__builtin_s390_vpopcth:
@@ -19739,11 +20346,12 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Value *Amt = EmitScalarExpr(E->getArg(1));
// Splat scalar rotate amount to vector type.
- unsigned NumElts = cast<llvm::FixedVectorType>(ResultType)->getNumElements();
+ unsigned NumElts =
+ cast<llvm::FixedVectorType>(ResultType)->getNumElements();
Amt = Builder.CreateIntCast(Amt, ResultType->getScalarType(), false);
Amt = Builder.CreateVectorSplat(NumElts, Amt);
Function *F = CGM.getIntrinsic(Intrinsic::fshl, ResultType);
- return Builder.CreateCall(F, { Src, Src, Amt });
+ return Builder.CreateCall(F, {Src, Src, Amt});
}
case SystemZ::BI__builtin_s390_verllvb:
@@ -19754,7 +20362,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Value *Amt = EmitScalarExpr(E->getArg(1));
Function *F = CGM.getIntrinsic(Intrinsic::fshl, ResultType);
- return Builder.CreateCall(F, { Src, Src, Amt });
+ return Builder.CreateCall(F, {Src, Src, Amt});
}
case SystemZ::BI__builtin_s390_vfsqsb:
@@ -19762,8 +20370,9 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (Builder.getIsFPConstrained()) {
- Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
- return Builder.CreateConstrainedFPCall(F, { X });
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ ResultType);
+ return Builder.CreateConstrainedFPCall(F, {X});
} else {
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
return Builder.CreateCall(F, X);
@@ -19776,7 +20385,8 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
- Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
@@ -19790,8 +20400,10 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
- Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
- return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateConstrainedFPCall(
+ F, {X, Y, Builder.CreateFNeg(Z, "neg")});
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
@@ -19804,8 +20416,10 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
- Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
- return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}),
+ "neg");
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
@@ -19818,9 +20432,11 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
if (Builder.getIsFPConstrained()) {
- Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
Value *NegZ = Builder.CreateFNeg(Z, "sub");
- return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
+ return Builder.CreateFNeg(
+ Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
} else {
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
Value *NegZ = Builder.CreateFNeg(Z, "neg");
@@ -19853,27 +20469,42 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
Intrinsic::ID CI;
switch (M4.getZExtValue()) {
- default: break;
- case 0: // IEEE-inexact exception allowed
+ default:
+ break;
+ case 0: // IEEE-inexact exception allowed
switch (M5.getZExtValue()) {
- default: break;
- case 0: ID = Intrinsic::rint;
- CI = Intrinsic::experimental_constrained_rint; break;
+ default:
+ break;
+ case 0:
+ ID = Intrinsic::rint;
+ CI = Intrinsic::experimental_constrained_rint;
+ break;
}
break;
- case 4: // IEEE-inexact exception suppressed
+ case 4: // IEEE-inexact exception suppressed
switch (M5.getZExtValue()) {
- default: break;
- case 0: ID = Intrinsic::nearbyint;
- CI = Intrinsic::experimental_constrained_nearbyint; break;
- case 1: ID = Intrinsic::round;
- CI = Intrinsic::experimental_constrained_round; break;
- case 5: ID = Intrinsic::trunc;
- CI = Intrinsic::experimental_constrained_trunc; break;
- case 6: ID = Intrinsic::ceil;
- CI = Intrinsic::experimental_constrained_ceil; break;
- case 7: ID = Intrinsic::floor;
- CI = Intrinsic::experimental_constrained_floor; break;
+ default:
+ break;
+ case 0:
+ ID = Intrinsic::nearbyint;
+ CI = Intrinsic::experimental_constrained_nearbyint;
+ break;
+ case 1:
+ ID = Intrinsic::round;
+ CI = Intrinsic::experimental_constrained_round;
+ break;
+ case 5:
+ ID = Intrinsic::trunc;
+ CI = Intrinsic::experimental_constrained_trunc;
+ break;
+ case 6:
+ ID = Intrinsic::ceil;
+ CI = Intrinsic::experimental_constrained_ceil;
+ break;
+ case 7:
+ ID = Intrinsic::floor;
+ CI = Intrinsic::experimental_constrained_floor;
+ break;
}
break;
}
@@ -19887,9 +20518,14 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
}
}
switch (BuiltinID) { // FIXME: constrained version?
- case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
- case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
- default: llvm_unreachable("Unknown BuiltinID");
+ case SystemZ::BI__builtin_s390_vfisb:
+ ID = Intrinsic::s390_vfisb;
+ break;
+ case SystemZ::BI__builtin_s390_vfidb:
+ ID = Intrinsic::s390_vfidb;
+ break;
+ default:
+ llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
@@ -19908,9 +20544,12 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
Intrinsic::ID CI;
switch (M4.getZExtValue()) {
- default: break;
- case 4: ID = Intrinsic::maxnum;
- CI = Intrinsic::experimental_constrained_maxnum; break;
+ default:
+ break;
+ case 4:
+ ID = Intrinsic::maxnum;
+ CI = Intrinsic::experimental_constrained_maxnum;
+ break;
}
if (ID != Intrinsic::not_intrinsic) {
if (Builder.getIsFPConstrained()) {
@@ -19922,9 +20561,14 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
}
}
switch (BuiltinID) {
- case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
- case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
- default: llvm_unreachable("Unknown BuiltinID");
+ case SystemZ::BI__builtin_s390_vfmaxsb:
+ ID = Intrinsic::s390_vfmaxsb;
+ break;
+ case SystemZ::BI__builtin_s390_vfmaxdb:
+ ID = Intrinsic::s390_vfmaxdb;
+ break;
+ default:
+ llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
@@ -19942,9 +20586,12 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
Intrinsic::ID CI;
switch (M4.getZExtValue()) {
- default: break;
- case 4: ID = Intrinsic::minnum;
- CI = Intrinsic::experimental_constrained_minnum; break;
+ default:
+ break;
+ case 4:
+ ID = Intrinsic::minnum;
+ CI = Intrinsic::experimental_constrained_minnum;
+ break;
}
if (ID != Intrinsic::not_intrinsic) {
if (Builder.getIsFPConstrained()) {
@@ -19956,9 +20603,14 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
}
}
switch (BuiltinID) {
- case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
- case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
- default: llvm_unreachable("Unknown BuiltinID");
+ case SystemZ::BI__builtin_s390_vfminsb:
+ ID = Intrinsic::s390_vfminsb;
+ break;
+ case SystemZ::BI__builtin_s390_vfmindb:
+ ID = Intrinsic::s390_vfmindb;
+ break;
+ default:
+ llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
@@ -19976,86 +20628,86 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
// Vector intrinsics that output the post-instruction CC value.
-#define INTRINSIC_WITH_CC(NAME) \
- case SystemZ::BI__builtin_##NAME: \
- return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
+#define INTRINSIC_WITH_CC(NAME) \
+ case SystemZ::BI__builtin_##NAME: \
+ return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
- INTRINSIC_WITH_CC(s390_vpkshs);
- INTRINSIC_WITH_CC(s390_vpksfs);
- INTRINSIC_WITH_CC(s390_vpksgs);
+ INTRINSIC_WITH_CC(s390_vpkshs);
+ INTRINSIC_WITH_CC(s390_vpksfs);
+ INTRINSIC_WITH_CC(s390_vpksgs);
- INTRINSIC_WITH_CC(s390_vpklshs);
- INTRINSIC_WITH_CC(s390_vpklsfs);
- INTRINSIC_WITH_CC(s390_vpklsgs);
+ INTRINSIC_WITH_CC(s390_vpklshs);
+ INTRINSIC_WITH_CC(s390_vpklsfs);
+ INTRINSIC_WITH_CC(s390_vpklsgs);
- INTRINSIC_WITH_CC(s390_vceqbs);
- INTRINSIC_WITH_CC(s390_vceqhs);
- INTRINSIC_WITH_CC(s390_vceqfs);
- INTRINSIC_WITH_CC(s390_vceqgs);
+ INTRINSIC_WITH_CC(s390_vceqbs);
+ INTRINSIC_WITH_CC(s390_vceqhs);
+ INTRINSIC_WITH_CC(s390_vceqfs);
+ INTRINSIC_WITH_CC(s390_vceqgs);
- INTRINSIC_WITH_CC(s390_vchbs);
- INTRINSIC_WITH_CC(s390_vchhs);
- INTRINSIC_WITH_CC(s390_vchfs);
- INTRINSIC_WITH_CC(s390_vchgs);
+ INTRINSIC_WITH_CC(s390_vchbs);
+ INTRINSIC_WITH_CC(s390_vchhs);
+ INTRINSIC_WITH_CC(s390_vchfs);
+ INTRINSIC_WITH_CC(s390_vchgs);
- INTRINSIC_WITH_CC(s390_vchlbs);
- INTRINSIC_WITH_CC(s390_vchlhs);
- INTRINSIC_WITH_CC(s390_vchlfs);
- INTRINSIC_WITH_CC(s390_vchlgs);
+ INTRINSIC_WITH_CC(s390_vchlbs);
+ INTRINSIC_WITH_CC(s390_vchlhs);
+ INTRINSIC_WITH_CC(s390_vchlfs);
+ INTRINSIC_WITH_CC(s390_vchlgs);
- INTRINSIC_WITH_CC(s390_vfaebs);
- INTRINSIC_WITH_CC(s390_vfaehs);
- INTRINSIC_WITH_CC(s390_vfaefs);
+ INTRINSIC_WITH_CC(s390_vfaebs);
+ INTRINSIC_WITH_CC(s390_vfaehs);
+ INTRINSIC_WITH_CC(s390_vfaefs);
- INTRINSIC_WITH_CC(s390_vfaezbs);
- INTRINSIC_WITH_CC(s390_vfaezhs);
- INTRINSIC_WITH_CC(s390_vfaezfs);
+ INTRINSIC_WITH_CC(s390_vfaezbs);
+ INTRINSIC_WITH_CC(s390_vfaezhs);
+ INTRINSIC_WITH_CC(s390_vfaezfs);
- INTRINSIC_WITH_CC(s390_vfeebs);
- INTRINSIC_WITH_CC(s390_vfeehs);
- INTRINSIC_WITH_CC(s390_vfeefs);
+ INTRINSIC_WITH_CC(s390_vfeebs);
+ INTRINSIC_WITH_CC(s390_vfeehs);
+ INTRINSIC_WITH_CC(s390_vfeefs);
- INTRINSIC_WITH_CC(s390_vfeezbs);
- INTRINSIC_WITH_CC(s390_vfeezhs);
- INTRINSIC_WITH_CC(s390_vfeezfs);
+ INTRINSIC_WITH_CC(s390_vfeezbs);
+ INTRINSIC_WITH_CC(s390_vfeezhs);
+ INTRINSIC_WITH_CC(s390_vfeezfs);
- INTRINSIC_WITH_CC(s390_vfenebs);
- INTRINSIC_WITH_CC(s390_vfenehs);
- INTRINSIC_WITH_CC(s390_vfenefs);
+ INTRINSIC_WITH_CC(s390_vfenebs);
+ INTRINSIC_WITH_CC(s390_vfenehs);
+ INTRINSIC_WITH_CC(s390_vfenefs);
- INTRINSIC_WITH_CC(s390_vfenezbs);
- INTRINSIC_WITH_CC(s390_vfenezhs);
- INTRINSIC_WITH_CC(s390_vfenezfs);
+ INTRINSIC_WITH_CC(s390_vfenezbs);
+ INTRINSIC_WITH_CC(s390_vfenezhs);
+ INTRINSIC_WITH_CC(s390_vfenezfs);
- INTRINSIC_WITH_CC(s390_vistrbs);
- INTRINSIC_WITH_CC(s390_vistrhs);
- INTRINSIC_WITH_CC(s390_vistrfs);
+ INTRINSIC_WITH_CC(s390_vistrbs);
+ INTRINSIC_WITH_CC(s390_vistrhs);
+ INTRINSIC_WITH_CC(s390_vistrfs);
- INTRINSIC_WITH_CC(s390_vstrcbs);
- INTRINSIC_WITH_CC(s390_vstrchs);
- INTRINSIC_WITH_CC(s390_vstrcfs);
+ INTRINSIC_WITH_CC(s390_vstrcbs);
+ INTRINSIC_WITH_CC(s390_vstrchs);
+ INTRINSIC_WITH_CC(s390_vstrcfs);
- INTRINSIC_WITH_CC(s390_vstrczbs);
- INTRINSIC_WITH_CC(s390_vstrczhs);
- INTRINSIC_WITH_CC(s390_vstrczfs);
+ INTRINSIC_WITH_CC(s390_vstrczbs);
+ INTRINSIC_WITH_CC(s390_vstrczhs);
+ INTRINSIC_WITH_CC(s390_vstrczfs);
- INTRINSIC_WITH_CC(s390_vfcesbs);
- INTRINSIC_WITH_CC(s390_vfcedbs);
- INTRINSIC_WITH_CC(s390_vfchsbs);
- INTRINSIC_WITH_CC(s390_vfchdbs);
- INTRINSIC_WITH_CC(s390_vfchesbs);
- INTRINSIC_WITH_CC(s390_vfchedbs);
+ INTRINSIC_WITH_CC(s390_vfcesbs);
+ INTRINSIC_WITH_CC(s390_vfcedbs);
+ INTRINSIC_WITH_CC(s390_vfchsbs);
+ INTRINSIC_WITH_CC(s390_vfchdbs);
+ INTRINSIC_WITH_CC(s390_vfchesbs);
+ INTRINSIC_WITH_CC(s390_vfchedbs);
- INTRINSIC_WITH_CC(s390_vftcisb);
- INTRINSIC_WITH_CC(s390_vftcidb);
+ INTRINSIC_WITH_CC(s390_vftcisb);
+ INTRINSIC_WITH_CC(s390_vftcidb);
- INTRINSIC_WITH_CC(s390_vstrsb);
- INTRINSIC_WITH_CC(s390_vstrsh);
- INTRINSIC_WITH_CC(s390_vstrsf);
+ INTRINSIC_WITH_CC(s390_vstrsb);
+ INTRINSIC_WITH_CC(s390_vstrsh);
+ INTRINSIC_WITH_CC(s390_vstrsf);
- INTRINSIC_WITH_CC(s390_vstrszb);
- INTRINSIC_WITH_CC(s390_vstrszh);
- INTRINSIC_WITH_CC(s390_vstrszf);
+ INTRINSIC_WITH_CC(s390_vstrszb);
+ INTRINSIC_WITH_CC(s390_vstrszh);
+ INTRINSIC_WITH_CC(s390_vstrszf);
#undef INTRINSIC_WITH_CC
@@ -20067,13 +20719,13 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
namespace {
// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
struct NVPTXMmaLdstInfo {
- unsigned NumResults; // Number of elements to load/store
+ unsigned NumResults; // Number of elements to load/store
// Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
unsigned IID_col;
unsigned IID_row;
};
-#define MMA_INTR(geom_op_type, layout) \
+#define MMA_INTR(geom_op_type, layout) \
Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
#define MMA_LDST(n, geom_op_type) \
{ n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
@@ -20231,7 +20883,6 @@ static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
#undef MMA_LDST
#undef MMA_INTR
-
struct NVPTXMmaInfo {
unsigned NumEltsA;
unsigned NumEltsB;
@@ -20251,8 +20902,8 @@ struct NVPTXMmaInfo {
}
};
- // Returns an intrinsic that matches Layout and Satf for valid combinations of
- // Layout and Satf, 0 otherwise.
+// Returns an intrinsic that matches Layout and Satf for valid combinations of
+// Layout and Satf, 0 otherwise.
static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
// clang-format off
#define MMA_VARIANTS(geom, type) \
@@ -20693,8 +21344,8 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
llvm::Type *ElemTy =
ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
- CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
+ CGM.getIntrinsic(Intrinsic::nvvm_atomic_cas_gen_i_cta,
+ {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_atom_sys_cas_gen_us:
@@ -20705,8 +21356,8 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
llvm::Type *ElemTy =
ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
- CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
+ CGM.getIntrinsic(Intrinsic::nvvm_atomic_cas_gen_i_sys,
+ {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_match_all_sync_i32p:
@@ -20836,8 +21487,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
unsigned IID = isColMajor ? II.IID_col : II.IID_row;
if (IID == 0)
return nullptr;
- Function *Intrinsic =
- CGM.getIntrinsic(IID, Dst->getType());
+ Function *Intrinsic = CGM.getIntrinsic(IID, Dst->getType());
llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
SmallVector<Value *, 10> Values = {Dst};
for (unsigned i = 0; i < II.NumResults; ++i) {
@@ -20896,7 +21546,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
llvm::APSInt SatfArg;
if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
- SatfArg = 0; // .b1 does not have satf argument.
+ SatfArg = 0; // .b1 does not have satf argument.
else if (std::optional<llvm::APSInt> OptSatfArg =
E->getArg(5)->getIntegerConstantExpr(getContext()))
SatfArg = *OptSatfArg;
@@ -20905,7 +21555,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
bool Satf = SatfArg.getSExtValue();
NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
- if (IID == 0) // Unsupported combination of Layout/Satf.
+ if (IID == 0) // Unsupported combination of Layout/Satf.
return nullptr;
SmallVector<Value *, 24> Values;
@@ -21169,9 +21819,8 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
case NVPTX::BI__nvvm_getctarank:
- return Builder.CreateCall(
- CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
- EmitScalarExpr(E->getArg(0)));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
+ EmitScalarExpr(E->getArg(0)));
case NVPTX::BI__nvvm_getctarank_shared_cluster:
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
@@ -21964,44 +22613,46 @@ getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) {
unsigned VecLen;
};
static Info Infos[] = {
-#define CUSTOM_BUILTIN_MAPPING(x,s) \
- { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
- CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
- CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
- // Legacy builtins that take a vector in place of a vector predicate.
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
- CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
+#define CUSTOM_BUILTIN_MAPPING(x, s) \
+ {Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s},
+ CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) CUSTOM_BUILTIN_MAPPING(
+ L2_loadrb_pci,
+ 0) CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci,
+ 0) CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) CUSTOM_BUILTIN_MAPPING(
+ L2_loadrd_pci, 0) CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) CUSTOM_BUILTIN_MAPPING(
+ L2_loadruh_pcr, 0) CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(
+ L2_loadri_pcr, 0) CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(
+ S2_storerb_pci,
+ 0) CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr,
+ 0)
+ CUSTOM_BUILTIN_MAPPING(
+ S2_storerf_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(
+ S2_storeri_pcr, 0)
+ CUSTOM_BUILTIN_MAPPING(
+ S2_storerd_pcr, 0)
+ // Legacy builtins that take a vector in place of a vector predicate.
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) CUSTOM_BUILTIN_MAPPING(
+ V6_vmaskedstorenq, 64) CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
+ CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
#undef CUSTOM_BUILTIN_MAPPING
};
- auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
+ auto CmpInfo = [](Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
(void)SortOnce;
@@ -22031,15 +22682,15 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// Store:
// builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
// builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
- SmallVector<llvm::Value*,5> Ops = { Base };
+ SmallVector<llvm::Value *, 5> Ops = {Base};
for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
// The load intrinsics generate two results (Value, NewBase), stores
// generate one (NewBase). The new base address needs to be stored.
- llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
- : Result;
+ llvm::Value *NewBase =
+ IsLoad ? Builder.CreateExtractValue(Result, 1) : Result;
llvm::Value *LV = EmitScalarExpr(E->getArg(0));
Address Dest = EmitPointerWithAlignment(E->getArg(0));
llvm::Value *RetVal =
@@ -22084,13 +22735,13 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractValue(Result, 1);
};
- auto V2Q = [this, VecLen] (llvm::Value *Vec) {
+ auto V2Q = [this, VecLen](llvm::Value *Vec) {
Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
: Intrinsic::hexagon_V6_vandvrt;
return Builder.CreateCall(CGM.getIntrinsic(ID),
{Vec, Builder.getInt32(-1)});
};
- auto Q2V = [this, VecLen] (llvm::Value *Pred) {
+ auto Q2V = [this, VecLen](llvm::Value *Pred) {
Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
: Intrinsic::hexagon_V6_vandqrt;
return Builder.CreateCall(CGM.getIntrinsic(ID),
@@ -22110,7 +22761,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
Address PredAddr =
EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
+ llvm::Value *Result = Builder.CreateCall(
+ CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
@@ -22129,7 +22781,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
Address PredAddr =
EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
+ llvm::Value *Result = Builder.CreateCall(
+ CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
@@ -22146,7 +22799,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B:
case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B:
case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: {
- SmallVector<llvm::Value*,4> Ops;
+ SmallVector<llvm::Value *, 4> Ops;
const Expr *PredOp = E->getArg(0);
// There will be an implicit cast to a boolean vector. Strip it.
if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) {
@@ -22257,7 +22910,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin ID");
+ default:
+ llvm_unreachable("unexpected builtin ID");
case RISCV::BI__builtin_riscv_orc_b_32:
case RISCV::BI__builtin_riscv_orc_b_64:
case RISCV::BI__builtin_riscv_clz_32:
@@ -22279,7 +22933,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_zip_32:
case RISCV::BI__builtin_riscv_unzip_32: {
switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin ID");
+ default:
+ llvm_unreachable("unexpected builtin ID");
// Zbb
case RISCV::BI__builtin_riscv_orc_b_32:
case RISCV::BI__builtin_riscv_orc_b_64:
@@ -22290,7 +22945,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true,
"cast");
return Result;
}
@@ -22299,7 +22954,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true,
"cast");
return Result;
}
@@ -22391,7 +23046,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
int Width;
- if(ResTy->isScalableTy()) {
+ if (ResTy->isScalableTy()) {
const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
llvm::Type *ScalarTy = ResTy->getScalarType();
Width = ScalarTy->getPrimitiveSizeInBits() *
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index a8aabca7348ffb..74876657646d07 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -87,7 +87,6 @@ class CGHLSLRuntime {
GENERATE_HLSL_INTRINSIC_FUNCTION(SDot, sdot)
GENERATE_HLSL_INTRINSIC_FUNCTION(UDot, udot)
GENERATE_HLSL_INTRINSIC_FUNCTION(WaveIsFirstLane, wave_is_first_lane)
-
//===----------------------------------------------------------------------===//
// End of reserved area for HLSL intrinsic getters.
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 6a50d50ebd3479..9385fdcd7f2863 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -423,6 +423,26 @@ template <typename T> _HLSL_INLINE uint asuint(T F) {
return __detail::bit_cast<uint, T>(F);
}
+//===----------------------------------------------------------------------===//
+// asuint splitdouble builtins
+//===----------------------------------------------------------------------===//
+
+/// \fn void asuint(double D, out uint lowbits, out int highbits)
+/// \brief Split and interprets the lowbits and highbits of double D into uints.
+/// \param D The input double.
+/// \param lowbits The output lowbits of D.
+/// \param highbits The highbits lowbits D.
+#if __is_target_arch(dxil)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_asuint_splitdouble)
+void asuint(double, out uint, out uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_asuint_splitdouble)
+void asuint(double2, out uint2, out uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_asuint_splitdouble)
+void asuint(double3, out uint3, out uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_asuint_splitdouble)
+void asuint(double4, out uint4, out uint4);
+#endif
+
//===----------------------------------------------------------------------===//
// atan builtins
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 67792be994fa89..0a29be1fdc75c7 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -1531,18 +1531,27 @@ bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
return true;
}
+bool CheckArgTypeIsCorrect(
+ Sema *S, Expr *Arg, QualType ExpectedType,
+ llvm::function_ref<bool(clang::QualType PassedType)> Check) {
+ QualType PassedType = Arg->getType();
+ if (Check(PassedType)) {
+ if (auto *VecTyA = PassedType->getAs<VectorType>())
+ ExpectedType = S->Context.getVectorType(
+ ExpectedType, VecTyA->getNumElements(), VecTyA->getVectorKind());
+ S->Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
+ << PassedType << ExpectedType << 1 << 0 << 0;
+ return true;
+ }
+ return false;
+}
+
bool CheckArgsTypesAreCorrect(
Sema *S, CallExpr *TheCall, QualType ExpectedType,
llvm::function_ref<bool(clang::QualType PassedType)> Check) {
for (unsigned i = 0; i < TheCall->getNumArgs(); ++i) {
- QualType PassedType = TheCall->getArg(i)->getType();
- if (Check(PassedType)) {
- if (auto *VecTyA = PassedType->getAs<VectorType>())
- ExpectedType = S->Context.getVectorType(
- ExpectedType, VecTyA->getNumElements(), VecTyA->getVectorKind());
- S->Diag(TheCall->getArg(0)->getBeginLoc(),
- diag::err_typecheck_convert_incompatible)
- << PassedType << ExpectedType << 1 << 0 << 0;
+ Expr *Arg = TheCall->getArg(i);
+ if (CheckArgTypeIsCorrect(S, Arg, ExpectedType, Check)) {
return true;
}
}
@@ -1826,6 +1835,37 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return true;
break;
}
+ case Builtin::BI__builtin_hlsl_asuint_splitdouble: {
+ if (SemaRef.checkArgCount(TheCall, 3))
+ return true;
+
+ Expr *Op0 = TheCall->getArg(0);
+
+ auto CheckIsNotDouble = [](clang::QualType PassedType) -> bool {
+ return !PassedType->isDoubleType();
+ };
+
+ if (CheckArgTypeIsCorrect(&SemaRef, Op0, SemaRef.Context.DoubleTy,
+ CheckIsNotDouble)) {
+ return true;
+ }
+
+ Expr *Op1 = TheCall->getArg(1);
+ Expr *Op2 = TheCall->getArg(2);
+
+ auto CheckIsNotUint = [](clang::QualType PassedType) -> bool {
+ return !PassedType->isUnsignedIntegerType();
+ };
+
+ if (CheckArgTypeIsCorrect(&SemaRef, Op1, SemaRef.Context.UnsignedIntTy,
+ CheckIsNotUint) ||
+ CheckArgTypeIsCorrect(&SemaRef, Op2, SemaRef.Context.UnsignedIntTy,
+ CheckIsNotUint)) {
+ return true;
+ }
+
+ break;
+ }
case Builtin::BI__builtin_elementwise_acos:
case Builtin::BI__builtin_elementwise_asin:
case Builtin::BI__builtin_elementwise_atan:
diff --git a/clang/test/CodeGenHLSL/builtins/asuint-splitdouble.hlsl b/clang/test/CodeGenHLSL/builtins/asuint-splitdouble.hlsl
new file mode 100644
index 00000000000000..1dc8017cfd9421
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/asuint-splitdouble.hlsl
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple dxil-pc-shadermodel6.3-library %s -fnative-half-type -emit-llvm -O0 -o - | FileCheck %s
+
+// CHECK: define {{.*}}test_scalar{{.*}}(double {{.*}} [[VAL1:%.*]], i32 {{.*}} [[VAL2:%.*]], i32 {{.*}} [[VAL3:%.*]]){{.*}}
+// CHECK: [[VALD:%.*]] = load double, ptr [[VAL1]].addr{{.*}}
+// CHECK: call { i32, i32 } @llvm.dx.asuint.splitdouble.{{.*}}(double [[VALD]])
+uint test_scalar(double d, uint lb, uint hb) {
+ asuint(d, lb, hb);
+ return lb;
+}
\ No newline at end of file
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index 3ce7b8b987ef86..d8092397881550 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -88,4 +88,9 @@ def int_dx_rsqrt : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]
def int_dx_wave_is_first_lane : DefaultAttrsIntrinsic<[llvm_i1_ty], [], [IntrConvergent]>;
def int_dx_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>;
def int_dx_step : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_dx_asuint_splitdouble : DefaultAttrsIntrinsic<
+ [llvm_anyint_ty, LLVMMatchType<0>],
+ [LLVMScalarOrSameVectorWidth<0, llvm_double_ty>],
+ [IntrNoMem, IntrWillReturn]>;
}
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 902ab37bf741ed..d5f869e7f4bd08 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -767,6 +767,7 @@ def FlattenedThreadIdInGroup : DXILOp<96, flattenedThreadIdInGroup> {
let stages = [Stages<DXIL1_0, [compute, mesh, amplification, node]>];
let attributes = [Attributes<DXIL1_0, [ReadNone]>];
}
+//
def AnnotateHandle : DXILOp<217, annotateHandle> {
let Doc = "annotate handle with resource properties";
diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
index dd73b895b14d37..2bc9ebc962e71a 100644
--- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
+++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
@@ -12,6 +12,7 @@
#include "DXILIntrinsicExpansion.h"
#include "DirectX.h"
+#include "llvm-c/Core.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/DXILResource.h"
@@ -346,6 +347,15 @@ static Value *expandStepIntrinsic(CallInst *Orig) {
return Builder.CreateSelect(Cond, Zero, One);
}
+// static Value *expandSplitdoubleIntrinsic(CallInst *Orig) {
+// Value *X = Orig->getOperand(0);
+// Type *Ty = X->getType();
+// IRBuilder<> Builder(Orig);
+
+// Builder.CreateIntrinsic()
+
+// }
+
static Intrinsic::ID getMaxForClamp(Type *ElemTy,
Intrinsic::ID ClampIntrinsic) {
if (ClampIntrinsic == Intrinsic::dx_uclamp)
@@ -459,6 +469,9 @@ static bool expandIntrinsic(Function &F, CallInst *Orig) {
break;
case Intrinsic::dx_step:
Result = expandStepIntrinsic(Orig);
+ break;
+ // case Intrinsic::dx_asuint_splitdouble:
+ // Result = expandSplitdoubleIntrinsic(Orig);
}
if (Result) {
Orig->replaceAllUsesWith(Result);
More information about the llvm-commits
mailing list