[clang] [Clang][NFC] Code cleanup in CGBuiltin.cpp (PR #132060)
Rahul Joshi via cfe-commits
cfe-commits at lists.llvm.org
Wed Mar 19 10:07:41 PDT 2025
https://github.com/jurahul created https://github.com/llvm/llvm-project/pull/132060
- Use `Intrinsic::` directly instead of `llvm::Intrinsic::`.
- Eliminate redundant `nullptr` for some `CreateIntrinsic` calls.
- Eliminate redundant `ArrayRef` casts.
- Use C++17 structured binding instead of `std::tie`.
>From 17c36a35d4cb54773e808ca0876ba4f42265f379 Mon Sep 17 00:00:00 2001
From: Rahul Joshi <rjoshi at nvidia.com>
Date: Wed, 19 Mar 2025 09:54:12 -0700
Subject: [PATCH] [Clang][NFC] Code cleanup in CGBuiltin.cpp
- Use `Intrinsic::` directly instead of `llvm::Intrinsic::`.
- Eliminate redundant `nullptr` for some `CreateIntrinsic` calls.
- Eliminate redundant `ArrayRef` casts.
- Use C++17 structured binding instead of `std::tie`.
---
clang/lib/CodeGen/CGBuiltin.cpp | 497 +++++++++++++++-----------------
1 file changed, 232 insertions(+), 265 deletions(-)
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index c126f88b9e3a5..3536bdd91ebc6 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -113,13 +113,13 @@ static Value *handleHlslClip(const CallExpr *E, CodeGenFunction *CGF) {
auto *FCompInst = CGF->Builder.CreateFCmpOLT(Op0, FZeroConst);
CMP = CGF->Builder.CreateIntrinsic(
CGF->Builder.getInt1Ty(), CGF->CGM.getHLSLRuntime().getAnyIntrinsic(),
- {FCompInst}, nullptr);
+ {FCompInst});
} else
CMP = CGF->Builder.CreateFCmpOLT(Op0, FZeroConst);
if (CGF->CGM.getTarget().getTriple().isDXIL())
- LastInstr = CGF->Builder.CreateIntrinsic(
- CGF->VoidTy, llvm::Intrinsic::dx_discard, {CMP}, nullptr);
+ LastInstr =
+ CGF->Builder.CreateIntrinsic(CGF->VoidTy, Intrinsic::dx_discard, {CMP});
else if (CGF->CGM.getTarget().getTriple().isSPIRV()) {
BasicBlock *LT0 = CGF->createBasicBlock("lt0", CGF->CurFn);
BasicBlock *End = CGF->createBasicBlock("end", CGF->CurFn);
@@ -127,12 +127,9 @@ static Value *handleHlslClip(const CallExpr *E, CodeGenFunction *CGF) {
CGF->Builder.CreateCondBr(CMP, LT0, End);
CGF->Builder.SetInsertPoint(LT0);
-
- CGF->Builder.CreateIntrinsic(CGF->VoidTy, llvm::Intrinsic::spv_discard, {},
- nullptr);
+ CGF->Builder.CreateIntrinsic(CGF->VoidTy, Intrinsic::spv_discard, {});
LastInstr = CGF->Builder.CreateBr(End);
-
CGF->Builder.SetInsertPoint(End);
} else {
llvm_unreachable("Backend Codegen not supported.");
@@ -228,7 +225,7 @@ static Value *handleAsDoubleBuiltin(CodeGenFunction &CGF, const CallExpr *E) {
if (CGF.CGM.getTarget().getTriple().isDXIL())
return CGF.Builder.CreateIntrinsic(
/*ReturnType=*/ResultType, Intrinsic::dx_asdouble,
- ArrayRef<Value *>{OpLowBits, OpHighBits}, nullptr, "hlsl.asdouble");
+ {OpLowBits, OpHighBits}, nullptr, "hlsl.asdouble");
if (!E->getArg(0)->getType()->isVectorType()) {
OpLowBits = CGF.Builder.CreateVectorSplat(1, OpLowBits);
@@ -254,7 +251,7 @@ Value *readX18AsPtr(CodeGenFunction &CGF) {
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Function *F =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::read_register, {CGF.Int64Ty});
+ CGF.CGM.getIntrinsic(Intrinsic::read_register, {CGF.Int64Ty});
llvm::Value *X18 = CGF.Builder.CreateCall(F, Metadata);
return CGF.Builder.CreateIntToPtr(X18, CGF.Int8PtrTy);
}
@@ -706,9 +703,10 @@ static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
}
// Has second type mangled argument.
-static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
- CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
- llvm::Intrinsic::ID ConstrainedIntrinsicID) {
+static Value *
+emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ Intrinsic::ID IntrinsicID,
+ Intrinsic::ID ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
@@ -810,7 +808,7 @@ emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
}
static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
- llvm::Intrinsic::ID IntrinsicID) {
+ Intrinsic::ID IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
@@ -828,7 +826,7 @@ static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
}
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
- llvm::Intrinsic::ID IntrinsicID) {
+ Intrinsic::ID IntrinsicID) {
llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
@@ -860,7 +858,7 @@ static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
}
static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
- llvm::Intrinsic::ID IntrinsicID) {
+ Intrinsic::ID IntrinsicID) {
llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
@@ -971,7 +969,7 @@ static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
/// \returns The result (i.e. sum/product) returned by the intrinsic.
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
- const llvm::Intrinsic::ID IntrinsicID,
+ const Intrinsic::ID IntrinsicID,
llvm::Value *X, llvm::Value *Y,
llvm::Value *&Carry) {
// Make sure we have integers of the same width.
@@ -2661,7 +2659,7 @@ static RValue EmitCheckedUnsignedMultiplySignedResult(
llvm::Value *HasOverflow;
llvm::Value *Result = EmitOverflowIntrinsic(
- CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
+ CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
// The intrinsic call will detect overflow when the value is > UINT_MAX,
// however, since the original builtin had a signed result, we need to report
@@ -2731,7 +2729,7 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
// Perform a checked unsigned multiplication.
llvm::Value *UnsignedOverflow;
llvm::Value *UnsignedResult =
- EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
+ EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
Unsigned, UnsignedOverflow);
llvm::Value *Overflow, *Result;
@@ -3909,7 +3907,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
LLVMContext &Ctx = CGM.getLLVMContext();
llvm::Value *Allow = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::allow_runtime_check),
+ CGM.getIntrinsic(Intrinsic::allow_runtime_check),
llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
return RValue::get(Allow);
}
@@ -4281,102 +4279,102 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QT = VecTy->getElementType();
if (QT->isIntegerType())
Result = Builder.CreateBinaryIntrinsic(
- llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
- Builder.getFalse(), nullptr, "elt.abs");
+ Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
+ nullptr, "elt.abs");
else
- Result = emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::fabs, "elt.abs");
+ Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
+ "elt.abs");
return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_acos:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::acos, "elt.acos"));
+ *this, E, Intrinsic::acos, "elt.acos"));
case Builtin::BI__builtin_elementwise_asin:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::asin, "elt.asin"));
+ *this, E, Intrinsic::asin, "elt.asin"));
case Builtin::BI__builtin_elementwise_atan:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::atan, "elt.atan"));
+ *this, E, Intrinsic::atan, "elt.atan"));
case Builtin::BI__builtin_elementwise_atan2:
return RValue::get(emitBuiltinWithOneOverloadedType<2>(
- *this, E, llvm::Intrinsic::atan2, "elt.atan2"));
+ *this, E, Intrinsic::atan2, "elt.atan2"));
case Builtin::BI__builtin_elementwise_ceil:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::ceil, "elt.ceil"));
+ *this, E, Intrinsic::ceil, "elt.ceil"));
case Builtin::BI__builtin_elementwise_exp:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::exp, "elt.exp"));
+ *this, E, Intrinsic::exp, "elt.exp"));
case Builtin::BI__builtin_elementwise_exp2:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::exp2, "elt.exp2"));
+ *this, E, Intrinsic::exp2, "elt.exp2"));
case Builtin::BI__builtin_elementwise_exp10:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::exp10, "elt.exp10"));
+ *this, E, Intrinsic::exp10, "elt.exp10"));
case Builtin::BI__builtin_elementwise_log:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::log, "elt.log"));
+ *this, E, Intrinsic::log, "elt.log"));
case Builtin::BI__builtin_elementwise_log2:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::log2, "elt.log2"));
+ *this, E, Intrinsic::log2, "elt.log2"));
case Builtin::BI__builtin_elementwise_log10:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::log10, "elt.log10"));
+ *this, E, Intrinsic::log10, "elt.log10"));
case Builtin::BI__builtin_elementwise_pow: {
return RValue::get(
- emitBuiltinWithOneOverloadedType<2>(*this, E, llvm::Intrinsic::pow));
+ emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::pow));
}
case Builtin::BI__builtin_elementwise_bitreverse:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::bitreverse, "elt.bitreverse"));
+ *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
case Builtin::BI__builtin_elementwise_cos:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::cos, "elt.cos"));
+ *this, E, Intrinsic::cos, "elt.cos"));
case Builtin::BI__builtin_elementwise_cosh:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::cosh, "elt.cosh"));
+ *this, E, Intrinsic::cosh, "elt.cosh"));
case Builtin::BI__builtin_elementwise_floor:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::floor, "elt.floor"));
+ *this, E, Intrinsic::floor, "elt.floor"));
case Builtin::BI__builtin_elementwise_popcount:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::ctpop, "elt.ctpop"));
+ *this, E, Intrinsic::ctpop, "elt.ctpop"));
case Builtin::BI__builtin_elementwise_roundeven:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::roundeven, "elt.roundeven"));
+ *this, E, Intrinsic::roundeven, "elt.roundeven"));
case Builtin::BI__builtin_elementwise_round:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::round, "elt.round"));
+ *this, E, Intrinsic::round, "elt.round"));
case Builtin::BI__builtin_elementwise_rint:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::rint, "elt.rint"));
+ *this, E, Intrinsic::rint, "elt.rint"));
case Builtin::BI__builtin_elementwise_nearbyint:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::nearbyint, "elt.nearbyint"));
+ *this, E, Intrinsic::nearbyint, "elt.nearbyint"));
case Builtin::BI__builtin_elementwise_sin:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::sin, "elt.sin"));
+ *this, E, Intrinsic::sin, "elt.sin"));
case Builtin::BI__builtin_elementwise_sinh:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::sinh, "elt.sinh"));
+ *this, E, Intrinsic::sinh, "elt.sinh"));
case Builtin::BI__builtin_elementwise_tan:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::tan, "elt.tan"));
+ *this, E, Intrinsic::tan, "elt.tan"));
case Builtin::BI__builtin_elementwise_tanh:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::tanh, "elt.tanh"));
+ *this, E, Intrinsic::tanh, "elt.tanh"));
case Builtin::BI__builtin_elementwise_trunc:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::trunc, "elt.trunc"));
+ *this, E, Intrinsic::trunc, "elt.trunc"));
case Builtin::BI__builtin_elementwise_canonicalize:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
+ *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
case Builtin::BI__builtin_elementwise_copysign:
- return RValue::get(emitBuiltinWithOneOverloadedType<2>(
- *this, E, llvm::Intrinsic::copysign));
+ return RValue::get(
+ emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
case Builtin::BI__builtin_elementwise_fma:
return RValue::get(
- emitBuiltinWithOneOverloadedType<3>(*this, E, llvm::Intrinsic::fma));
+ emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fma));
case Builtin::BI__builtin_elementwise_add_sat:
case Builtin::BI__builtin_elementwise_sub_sat: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -4389,9 +4387,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
bool IsSigned = Ty->isSignedIntegerType();
unsigned Opc;
if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
- Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat;
+ Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
else
- Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat;
+ Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
return RValue::get(Result);
}
@@ -4404,10 +4402,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QualType Ty = E->getArg(0)->getType();
if (auto *VecTy = Ty->getAs<VectorType>())
Ty = VecTy->getElementType();
- Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
- ? llvm::Intrinsic::smax
- : llvm::Intrinsic::umax,
- Op0, Op1, nullptr, "elt.max");
+ Result = Builder.CreateBinaryIntrinsic(
+ Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
+ Op1, nullptr, "elt.max");
} else
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
return RValue::get(Result);
@@ -4420,10 +4417,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QualType Ty = E->getArg(0)->getType();
if (auto *VecTy = Ty->getAs<VectorType>())
Ty = VecTy->getElementType();
- Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
- ? llvm::Intrinsic::smin
- : llvm::Intrinsic::umin,
- Op0, Op1, nullptr, "elt.min");
+ Result = Builder.CreateBinaryIntrinsic(
+ Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
+ Op1, nullptr, "elt.min");
} else
Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
return RValue::get(Result);
@@ -4432,16 +4428,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_elementwise_maximum: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
- Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maximum, Op0,
- Op1, nullptr, "elt.maximum");
+ Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
+ nullptr, "elt.maximum");
return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_minimum: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
- Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minimum, Op0,
- Op1, nullptr, "elt.minimum");
+ Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
+ nullptr, "elt.minimum");
return RValue::get(Result);
}
@@ -4453,11 +4449,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QT = QT->getSizelessVectorEltType(CGM.getContext());
if (QT->isSignedIntegerType())
- return llvm::Intrinsic::vector_reduce_smax;
+ return Intrinsic::vector_reduce_smax;
if (QT->isUnsignedIntegerType())
- return llvm::Intrinsic::vector_reduce_umax;
+ return Intrinsic::vector_reduce_umax;
assert(QT->isFloatingType() && "must have a float here");
- return llvm::Intrinsic::vector_reduce_fmax;
+ return Intrinsic::vector_reduce_fmax;
};
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
*this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
@@ -4471,11 +4467,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
QT = QT->getSizelessVectorEltType(CGM.getContext());
if (QT->isSignedIntegerType())
- return llvm::Intrinsic::vector_reduce_smin;
+ return Intrinsic::vector_reduce_smin;
if (QT->isUnsignedIntegerType())
- return llvm::Intrinsic::vector_reduce_umin;
+ return Intrinsic::vector_reduce_umin;
assert(QT->isFloatingType() && "must have a float here");
- return llvm::Intrinsic::vector_reduce_fmin;
+ return Intrinsic::vector_reduce_fmin;
};
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
@@ -4484,25 +4480,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_reduce_add:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_add, "rdx.add"));
+ *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
case Builtin::BI__builtin_reduce_mul:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_mul, "rdx.mul"));
+ *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
case Builtin::BI__builtin_reduce_xor:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
+ *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
case Builtin::BI__builtin_reduce_or:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
+ *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
case Builtin::BI__builtin_reduce_and:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
+ *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
case Builtin::BI__builtin_reduce_maximum:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
+ *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
case Builtin::BI__builtin_reduce_minimum:
return RValue::get(emitBuiltinWithOneOverloadedType<1>(
- *this, E, llvm::Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
+ *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
case Builtin::BI__builtin_matrix_transpose: {
auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
@@ -5532,17 +5528,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Build and MDTuple of MDStrings and emit the intrinsic call.
- llvm::Function *F =
- CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
return RValue::getIgnored();
}
case Builtin::BI__builtin_annotation: {
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
- llvm::Function *F =
- CGM.getIntrinsic(llvm::Intrinsic::annotation,
- {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
+ llvm::Function *F = CGM.getIntrinsic(
+ Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
// Get the annotation string, go through casts. Sema requires this to be a
// non-wide string literal, potentially casted, so the cast<> is safe.
@@ -5586,7 +5580,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
- llvm::Intrinsic::ID IntrinsicId;
+ Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown multiprecision builtin id.");
case Builtin::BI__builtin_addcb:
@@ -5594,14 +5588,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
- IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+ IntrinsicId = Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll:
- IntrinsicId = llvm::Intrinsic::usub_with_overflow;
+ IntrinsicId = Intrinsic::usub_with_overflow;
break;
}
@@ -5656,24 +5650,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
- llvm::Intrinsic::ID IntrinsicId;
+ Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default:
llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_add_overflow:
- IntrinsicId = EncompassingInfo.Signed
- ? llvm::Intrinsic::sadd_with_overflow
- : llvm::Intrinsic::uadd_with_overflow;
+ IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
+ : Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_sub_overflow:
- IntrinsicId = EncompassingInfo.Signed
- ? llvm::Intrinsic::ssub_with_overflow
- : llvm::Intrinsic::usub_with_overflow;
+ IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
+ : Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_mul_overflow:
- IntrinsicId = EncompassingInfo.Signed
- ? llvm::Intrinsic::smul_with_overflow
- : llvm::Intrinsic::umul_with_overflow;
+ IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
+ : Intrinsic::umul_with_overflow;
break;
}
@@ -5740,38 +5731,38 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
- llvm::Intrinsic::ID IntrinsicId;
+ Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
- IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+ IntrinsicId = Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
- IntrinsicId = llvm::Intrinsic::usub_with_overflow;
+ IntrinsicId = Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
- IntrinsicId = llvm::Intrinsic::umul_with_overflow;
+ IntrinsicId = Intrinsic::umul_with_overflow;
break;
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
- IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
+ IntrinsicId = Intrinsic::sadd_with_overflow;
break;
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
- IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
+ IntrinsicId = Intrinsic::ssub_with_overflow;
break;
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow:
- IntrinsicId = llvm::Intrinsic::smul_with_overflow;
+ IntrinsicId = Intrinsic::smul_with_overflow;
break;
}
@@ -5940,17 +5931,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto IntrinsicID = [&]() -> unsigned {
switch (BuiltinID) {
case Builtin::BI__builtin_ptrauth_auth:
- return llvm::Intrinsic::ptrauth_auth;
+ return Intrinsic::ptrauth_auth;
case Builtin::BI__builtin_ptrauth_auth_and_resign:
- return llvm::Intrinsic::ptrauth_resign;
+ return Intrinsic::ptrauth_resign;
case Builtin::BI__builtin_ptrauth_blend_discriminator:
- return llvm::Intrinsic::ptrauth_blend;
+ return Intrinsic::ptrauth_blend;
case Builtin::BI__builtin_ptrauth_sign_generic_data:
- return llvm::Intrinsic::ptrauth_sign_generic;
+ return Intrinsic::ptrauth_sign_generic;
case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
- return llvm::Intrinsic::ptrauth_sign;
+ return Intrinsic::ptrauth_sign;
case Builtin::BI__builtin_ptrauth_strip:
- return llvm::Intrinsic::ptrauth_strip;
+ return Intrinsic::ptrauth_strip;
}
llvm_unreachable("bad ptrauth intrinsic");
}();
@@ -6059,8 +6050,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Creating a generic function type to be able to call with any builtin or
// user defined type.
llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
return RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
@@ -6075,8 +6065,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Int32Ty, Int32Ty};
Value *Arg2 = EmitScalarExpr(E->getArg(2)),
*Arg3 = EmitScalarExpr(E->getArg(3));
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
// We know the third argument is an integer type, but we may need to cast
// it to i32.
@@ -6119,8 +6108,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Building the generic function prototype.
llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
// We know the second argument is an integer type, but we may need to cast
// it to i32.
if (Arg1->getType() != Int32Ty)
@@ -6158,9 +6147,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Building the generic function prototype.
llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(
+ llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, PacketSize, PacketAlign}));
@@ -6183,8 +6171,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, PacketSize, PacketAlign}));
@@ -6256,8 +6243,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Name = "__enqueue_kernel_basic";
llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
GenericVoidPtrTy};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
@@ -6307,7 +6293,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Return the Alloca itself rather than a potential ascast as this is only
// used by the paired EmitLifetimeEnd.
- return std::tie(ElemPtr, TmpSize, Alloca);
+ return {ElemPtr, TmpSize, Alloca};
};
// Could have events and/or varargs.
@@ -6319,8 +6305,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
- std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
+ auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
@@ -6390,11 +6375,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (NumArgs == 7) {
// Has events but no variadics.
Name = "__enqueue_kernel_basic_events";
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int32Ty, ArgTys, false);
return RValue::get(
- EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
}
// Has event info and variadics
// Pass the number of variadics to the runtime function too.
@@ -6402,16 +6386,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
- llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
- std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
+ auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(7);
Args.push_back(ElemPtr);
ArgTys.push_back(ElemPtr->getType());
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- auto Call =
- RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
+ auto Call = RValue::get(
+ EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
if (TmpSize)
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
@@ -9096,10 +9077,10 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
if (AccessKind != Write) {
assert(AccessKind == NormalRead || AccessKind == VolatileRead);
- llvm::Function *F = CGM.getIntrinsic(
- AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
- : llvm::Intrinsic::read_register,
- Types);
+ llvm::Function *F = CGM.getIntrinsic(AccessKind == VolatileRead
+ ? Intrinsic::read_volatile_register
+ : Intrinsic::read_register,
+ Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
@@ -9113,7 +9094,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
return Call;
}
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::write_register, Types);
llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
if (MixedTypes) {
// Extend 32 bit write value to 64 bit to pass to write.
@@ -10024,7 +10005,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(Addr));
Tys.push_back(ConvertType(Addr->getType()));
- Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
+ Function *F = CGM.getIntrinsic(IRIntr, Tys);
Value *LoadResult = Builder.CreateCall(F, Ops);
Value *MvecOut = PoisonValue::get(MvecLType);
for (unsigned i = 0; i < NumVectors; ++i) {
@@ -10066,7 +10047,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < NumVectors; i++)
Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
- Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
+ Function *F = CGM.getIntrinsic(IRIntr, Tys);
Value *ToReturn = nullptr;
for (unsigned i = 0; i < NumVectors; i++) {
Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
@@ -11783,8 +11764,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Function *F =
- CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::read_register, {Int64Ty});
return Builder.CreateCall(F, Metadata);
}
@@ -11793,7 +11773,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::aarch64_break);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::aarch64_break);
return Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))});
}
@@ -11982,12 +11962,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Types[] = { RegisterType };
if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::read_register, Types);
return Builder.CreateCall(F, Metadata);
}
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::write_register, Types);
llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
return Builder.CreateCall(F, { Metadata, ArgValue });
@@ -14404,8 +14384,8 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
// Built the IR for the preserve_field_info intrinsic.
- llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getOrInsertDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
+ llvm::Function *FnGetFieldInfo = Intrinsic::getOrInsertDeclaration(
+ &CGM.getModule(), Intrinsic::bpf_preserve_field_info,
{FieldAddr->getType()});
return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
}
@@ -14426,11 +14406,11 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
llvm::Function *FnDecl;
if (BuiltinID == BPF::BI__builtin_btf_type_id)
- FnDecl = llvm::Intrinsic::getOrInsertDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
+ FnDecl = Intrinsic::getOrInsertDeclaration(
+ &CGM.getModule(), Intrinsic::bpf_btf_type_id, {});
else
- FnDecl = llvm::Intrinsic::getOrInsertDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
+ FnDecl = Intrinsic::getOrInsertDeclaration(
+ &CGM.getModule(), Intrinsic::bpf_preserve_type_info, {});
CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
return Fn;
@@ -14464,8 +14444,8 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
- llvm::Function *IntrinsicFn = llvm::Intrinsic::getOrInsertDeclaration(
- &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
+ llvm::Function *IntrinsicFn = Intrinsic::getOrInsertDeclaration(
+ &CGM.getModule(), Intrinsic::bpf_preserve_enum_value, {});
CallInst *Fn =
Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
@@ -14779,7 +14759,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
- IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
+ IID = Intrinsic::x86_avx512fp16_vfmadd_ph_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
Subtract = true;
@@ -14787,7 +14767,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
- IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
+ IID = Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
Subtract = true;
@@ -14795,21 +14775,23 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
+ IID = Intrinsic::x86_avx512_vfmadd_ps_512;
+ break;
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
Subtract = true;
[[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
+ IID = Intrinsic::x86_avx512_vfmadd_pd_512;
+ break;
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
Subtract = true;
[[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
+ IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
Subtract = true;
@@ -14817,7 +14799,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
+ IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubph256_round_mask3:
Subtract = true;
@@ -14825,7 +14807,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddph256_round_mask:
case clang::X86::BI__builtin_ia32_vfmaddph256_round_maskz:
case clang::X86::BI__builtin_ia32_vfmaddph256_round_mask3:
- IID = llvm::Intrinsic::x86_avx10_vfmaddph256;
+ IID = Intrinsic::x86_avx10_vfmaddph256;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddph256_round_mask3:
Subtract = true;
@@ -14833,7 +14815,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddsubph256_round_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubph256_round_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubph256_round_mask3:
- IID = llvm::Intrinsic::x86_avx10_vfmaddsubph256;
+ IID = Intrinsic::x86_avx10_vfmaddsubph256;
break;
case clang::X86::BI__builtin_ia32_vfmsubps256_round_mask3:
Subtract = true;
@@ -14841,7 +14823,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddps256_round_mask:
case clang::X86::BI__builtin_ia32_vfmaddps256_round_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps256_round_mask3:
- IID = llvm::Intrinsic::x86_avx10_vfmaddps256;
+ IID = Intrinsic::x86_avx10_vfmaddps256;
break;
case clang::X86::BI__builtin_ia32_vfmsubpd256_round_mask3:
Subtract = true;
@@ -14849,7 +14831,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddpd256_round_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd256_round_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd256_round_mask3:
- IID = llvm::Intrinsic::x86_avx10_vfmaddpd256;
+ IID = Intrinsic::x86_avx10_vfmaddpd256;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddps256_round_mask3:
Subtract = true;
@@ -14857,7 +14839,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddsubps256_round_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps256_round_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps256_round_mask3:
- IID = llvm::Intrinsic::x86_avx10_vfmaddsubps256;
+ IID = Intrinsic::x86_avx10_vfmaddsubps256;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddpd256_round_mask3:
Subtract = true;
@@ -14865,7 +14847,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
case clang::X86::BI__builtin_ia32_vfmaddsubpd256_round_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd256_round_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd256_round_mask3:
- IID = llvm::Intrinsic::x86_avx10_vfmaddsubpd256;
+ IID = Intrinsic::x86_avx10_vfmaddsubpd256;
break;
}
@@ -15112,7 +15094,7 @@ static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
if (NumDstElts !=
cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
assert(NumDstElts == 4 && "Unexpected vector size");
- Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
+ Src = CGF.Builder.CreateShuffleVector(Src, {0, 1, 2, 3});
}
// Bitcast from vXi16 to vXf16.
@@ -15147,9 +15129,7 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
// Calculate the index needed to access the correct field based on the
// range. Also adjust the expected value.
- unsigned Index;
- unsigned Value;
- std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
+ auto [Index, Value] = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
#define X86_VENDOR(ENUM, STRING) \
.Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
@@ -15161,7 +15141,7 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
#define X86_CPU_SUBTYPE(ENUM, STR) \
.Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
#include "llvm/TargetParser/X86TargetParser.def"
- .Default({0, 0});
+ .Default({0, 0});
assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
// Grab the appropriate field from __cpu_model.
@@ -18037,10 +18017,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
llvm::Triple Triple = getTarget().getTriple();
- unsigned LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue;
typedef std::tuple<unsigned, unsigned, unsigned, unsigned> CPUInfo;
- std::tie(LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue) =
+ auto [LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue] =
static_cast<CPUInfo>(StringSwitch<CPUInfo>(CPUStr)
#define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \
AIXID) \
@@ -18076,28 +18055,24 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
if (Triple.isOSAIX()) {
- unsigned SupportMethod, FieldIdx, Mask, Value;
- CmpInst::Predicate CompOp;
typedef std::tuple<unsigned, unsigned, unsigned, CmpInst::Predicate,
unsigned>
CPUSupportType;
- std::tie(SupportMethod, FieldIdx, Mask, CompOp, Value) =
- static_cast<CPUSupportType>(StringSwitch<CPUSupportType>(CPUStr)
+ auto [SupportMethod, FieldIdx, Mask, CompOp, Value] =
+ static_cast<CPUSupportType>(
+ StringSwitch<CPUSupportType>(CPUStr)
#define PPC_AIX_FEATURE(NAME, DESC, SUPPORT_METHOD, INDEX, MASK, COMP_OP, \
VALUE) \
.Case(NAME, {SUPPORT_METHOD, INDEX, MASK, COMP_OP, VALUE})
#include "llvm/TargetParser/PPCTargetParser.def"
- .Default({BUILTIN_PPC_FALSE, 0, 0,
- CmpInst::Predicate(), 0}));
+ .Default({BUILTIN_PPC_FALSE, 0, 0, CmpInst::Predicate(), 0}));
return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, Mask, CompOp,
Value);
}
assert(Triple.isOSLinux() &&
"__builtin_cpu_supports() is only supported for AIX and Linux.");
- unsigned FeatureWord;
- unsigned BitMask;
- std::tie(FeatureWord, BitMask) =
+ auto [FeatureWord, BitMask] =
StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
#define PPC_LNX_FEATURE(Name, Description, EnumName, Bitmask, FA_WORD) \
.Case(Name, {FA_WORD, Bitmask})
@@ -18823,7 +18798,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
if (getTarget().isLittleEndian()) {
// Reverse the double words in the vector we will extract from.
Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
- Op0 = Builder.CreateShuffleVector(Op0, Op0, ArrayRef<int>{1, 0});
+ Op0 = Builder.CreateShuffleVector(Op0, Op0, {1, 0});
// Reverse the index.
Index = MaxIndex - Index;
@@ -18859,8 +18834,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Emit the call, then reverse the double words of the results vector.
Value *Call = Builder.CreateCall(F, {Op0, Op1});
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
+ Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, {1, 0});
return ShuffleCall;
} else {
Op1 = ConstantInt::getSigned(Int32Ty, Index);
@@ -19475,11 +19449,11 @@ static Intrinsic::ID getWaveActiveSumIntrinsic(llvm::Triple::ArchType Arch,
CGHLSLRuntime &RT, QualType QT) {
switch (Arch) {
case llvm::Triple::spirv:
- return llvm::Intrinsic::spv_wave_reduce_sum;
+ return Intrinsic::spv_wave_reduce_sum;
case llvm::Triple::dxil: {
if (QT->isUnsignedIntegerType())
- return llvm::Intrinsic::dx_wave_reduce_usum;
- return llvm::Intrinsic::dx_wave_reduce_sum;
+ return Intrinsic::dx_wave_reduce_usum;
+ return Intrinsic::dx_wave_reduce_sum;
}
default:
llvm_unreachable("Intrinsic WaveActiveSum"
@@ -19493,12 +19467,12 @@ static Intrinsic::ID getWaveActiveMaxIntrinsic(llvm::Triple::ArchType Arch,
switch (Arch) {
case llvm::Triple::spirv:
if (QT->isUnsignedIntegerType())
- return llvm::Intrinsic::spv_wave_reduce_umax;
- return llvm::Intrinsic::spv_wave_reduce_max;
+ return Intrinsic::spv_wave_reduce_umax;
+ return Intrinsic::spv_wave_reduce_max;
case llvm::Triple::dxil: {
if (QT->isUnsignedIntegerType())
- return llvm::Intrinsic::dx_wave_reduce_umax;
- return llvm::Intrinsic::dx_wave_reduce_max;
+ return Intrinsic::dx_wave_reduce_umax;
+ return Intrinsic::dx_wave_reduce_max;
}
default:
llvm_unreachable("Intrinsic WaveActiveMax"
@@ -19537,17 +19511,17 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
LowB = Builder.CreateExtractElement(OpB, (uint64_t)0, "LowB");
HighB = Builder.CreateExtractElement(OpB, (uint64_t)1, "HighB");
} else {
- LowA = Builder.CreateShuffleVector(OpA, ArrayRef<int>{0, 2}, "LowA");
- HighA = Builder.CreateShuffleVector(OpA, ArrayRef<int>{1, 3}, "HighA");
- LowB = Builder.CreateShuffleVector(OpB, ArrayRef<int>{0, 2}, "LowB");
- HighB = Builder.CreateShuffleVector(OpB, ArrayRef<int>{1, 3}, "HighB");
+ LowA = Builder.CreateShuffleVector(OpA, {0, 2}, "LowA");
+ HighA = Builder.CreateShuffleVector(OpA, {1, 3}, "HighA");
+ LowB = Builder.CreateShuffleVector(OpB, {0, 2}, "LowB");
+ HighB = Builder.CreateShuffleVector(OpB, {1, 3}, "HighB");
}
// Use an uadd_with_overflow to compute the sum of low words and obtain a
// carry value
llvm::Value *Carry;
llvm::Value *LowSum = EmitOverflowIntrinsic(
- *this, llvm::Intrinsic::uadd_with_overflow, LowA, LowB, Carry);
+ *this, Intrinsic::uadd_with_overflow, LowA, LowB, Carry);
llvm::Value *ZExtCarry =
Builder.CreateZExt(Carry, HighA->getType(), "CarryZExt");
@@ -19557,8 +19531,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Builder.CreateAdd(HighSum, ZExtCarry, "HighSumPlusCarry");
if (NumElements == 4) {
- return Builder.CreateShuffleVector(LowSum, HighSumPlusCarry,
- ArrayRef<int>{0, 2, 1, 3},
+ return Builder.CreateShuffleVector(LowSum, HighSumPlusCarry, {0, 2, 1, 3},
"hlsl.AddUint64");
}
@@ -19578,14 +19551,13 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
RetTy, CGM.getHLSLRuntime().getCreateResourceGetPointerIntrinsic(),
- ArrayRef<Value *>{HandleOp, IndexOp});
+ {HandleOp, IndexOp});
}
case Builtin::BI__builtin_hlsl_all: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
/*ReturnType=*/llvm::Type::getInt1Ty(getLLVMContext()),
- CGM.getHLSLRuntime().getAllIntrinsic(), ArrayRef<Value *>{Op0}, nullptr,
- "hlsl.all");
+ CGM.getHLSLRuntime().getAllIntrinsic(), {Op0}, nullptr, "hlsl.all");
}
case Builtin::BI__builtin_hlsl_and: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19601,8 +19573,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Value *Op0 = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
/*ReturnType=*/llvm::Type::getInt1Ty(getLLVMContext()),
- CGM.getHLSLRuntime().getAnyIntrinsic(), ArrayRef<Value *>{Op0}, nullptr,
- "hlsl.any");
+ CGM.getHLSLRuntime().getAnyIntrinsic(), {Op0}, nullptr, "hlsl.any");
}
case Builtin::BI__builtin_hlsl_asdouble:
return handleAsDoubleBuiltin(*this, E);
@@ -19625,8 +19596,8 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Intr = CGM.getHLSLRuntime().getSClampIntrinsic();
}
return Builder.CreateIntrinsic(
- /*ReturnType=*/OpX->getType(), Intr,
- ArrayRef<Value *>{OpX, OpMin, OpMax}, nullptr, "hlsl.clamp");
+ /*ReturnType=*/OpX->getType(), Intr, {OpX, OpMin, OpMax}, nullptr,
+ "hlsl.clamp");
}
case Builtin::BI__builtin_hlsl_cross: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19641,7 +19612,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
"input vectors must have 3 elements each");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getCrossIntrinsic(),
- ArrayRef<Value *>{Op0, Op1}, nullptr, "hlsl.cross");
+ {Op0, Op1}, nullptr, "hlsl.cross");
}
case Builtin::BI__builtin_hlsl_dot: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19679,7 +19650,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
/*ReturnType=*/T0->getScalarType(),
getDotProductIntrinsic(CGM.getHLSLRuntime(), VecTy0->getElementType()),
- ArrayRef<Value *>{Op0, Op1}, nullptr, "hlsl.dot");
+ {Op0, Op1}, nullptr, "hlsl.dot");
}
case Builtin::BI__builtin_hlsl_dot4add_i8packed: {
Value *A = EmitScalarExpr(E->getArg(0));
@@ -19688,7 +19659,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = CGM.getHLSLRuntime().getDot4AddI8PackedIntrinsic();
return Builder.CreateIntrinsic(
- /*ReturnType=*/C->getType(), ID, ArrayRef<Value *>{A, B, C}, nullptr,
+ /*ReturnType=*/C->getType(), ID, {A, B, C}, nullptr,
"hlsl.dot4add.i8packed");
}
case Builtin::BI__builtin_hlsl_dot4add_u8packed: {
@@ -19698,7 +19669,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = CGM.getHLSLRuntime().getDot4AddU8PackedIntrinsic();
return Builder.CreateIntrinsic(
- /*ReturnType=*/C->getType(), ID, ArrayRef<Value *>{A, B, C}, nullptr,
+ /*ReturnType=*/C->getType(), ID, {A, B, C}, nullptr,
"hlsl.dot4add.u8packed");
}
case Builtin::BI__builtin_hlsl_elementwise_firstbithigh: {
@@ -19707,15 +19678,15 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
/*ReturnType=*/ConvertType(E->getType()),
getFirstBitHighIntrinsic(CGM.getHLSLRuntime(), E->getArg(0)->getType()),
- ArrayRef<Value *>{X}, nullptr, "hlsl.firstbithigh");
+ {X}, nullptr, "hlsl.firstbithigh");
}
case Builtin::BI__builtin_hlsl_elementwise_firstbitlow: {
Value *X = EmitScalarExpr(E->getArg(0));
return Builder.CreateIntrinsic(
/*ReturnType=*/ConvertType(E->getType()),
- CGM.getHLSLRuntime().getFirstBitLowIntrinsic(), ArrayRef<Value *>{X},
- nullptr, "hlsl.firstbitlow");
+ CGM.getHLSLRuntime().getFirstBitLowIntrinsic(), {X}, nullptr,
+ "hlsl.firstbitlow");
}
case Builtin::BI__builtin_hlsl_lerp: {
Value *X = EmitScalarExpr(E->getArg(0));
@@ -19725,7 +19696,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("lerp operand must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/X->getType(), CGM.getHLSLRuntime().getLerpIntrinsic(),
- ArrayRef<Value *>{X, Y, S}, nullptr, "hlsl.lerp");
+ {X, Y, S}, nullptr, "hlsl.lerp");
}
case Builtin::BI__builtin_hlsl_normalize: {
Value *X = EmitScalarExpr(E->getArg(0));
@@ -19735,8 +19706,8 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
/*ReturnType=*/X->getType(),
- CGM.getHLSLRuntime().getNormalizeIntrinsic(), ArrayRef<Value *>{X},
- nullptr, "hlsl.normalize");
+ CGM.getHLSLRuntime().getNormalizeIntrinsic(), {X}, nullptr,
+ "hlsl.normalize");
}
case Builtin::BI__builtin_hlsl_elementwise_degrees: {
Value *X = EmitScalarExpr(E->getArg(0));
@@ -19746,7 +19717,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
/*ReturnType=*/X->getType(), CGM.getHLSLRuntime().getDegreesIntrinsic(),
- ArrayRef<Value *>{X}, nullptr, "hlsl.degrees");
+ {X}, nullptr, "hlsl.degrees");
}
case Builtin::BI__builtin_hlsl_elementwise_frac: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19754,7 +19725,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("frac operand must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getFracIntrinsic(),
- ArrayRef<Value *>{Op0}, nullptr, "hlsl.frac");
+ {Op0}, nullptr, "hlsl.frac");
}
case Builtin::BI__builtin_hlsl_elementwise_isinf: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19767,8 +19738,8 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
}
if (!E->getArg(0)->getType()->hasFloatingRepresentation())
llvm_unreachable("isinf operand must have a float representation");
- return Builder.CreateIntrinsic(retType, Intrinsic::dx_isinf,
- ArrayRef<Value *>{Op0}, nullptr, "dx.isinf");
+ return Builder.CreateIntrinsic(retType, Intrinsic::dx_isinf, {Op0}, nullptr,
+ "dx.isinf");
}
case Builtin::BI__builtin_hlsl_mad: {
Value *M = EmitScalarExpr(E->getArg(0));
@@ -19776,14 +19747,14 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
Value *B = EmitScalarExpr(E->getArg(2));
if (E->getArg(0)->getType()->hasFloatingRepresentation())
return Builder.CreateIntrinsic(
- /*ReturnType*/ M->getType(), Intrinsic::fmuladd,
- ArrayRef<Value *>{M, A, B}, nullptr, "hlsl.fmad");
+ /*ReturnType*/ M->getType(), Intrinsic::fmuladd, {M, A, B}, nullptr,
+ "hlsl.fmad");
if (E->getArg(0)->getType()->hasSignedIntegerRepresentation()) {
if (CGM.getTarget().getTriple().getArch() == llvm::Triple::dxil)
return Builder.CreateIntrinsic(
- /*ReturnType*/ M->getType(), Intrinsic::dx_imad,
- ArrayRef<Value *>{M, A, B}, nullptr, "dx.imad");
+ /*ReturnType*/ M->getType(), Intrinsic::dx_imad, {M, A, B}, nullptr,
+ "dx.imad");
Value *Mul = Builder.CreateNSWMul(M, A);
return Builder.CreateNSWAdd(Mul, B);
@@ -19791,8 +19762,8 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
assert(E->getArg(0)->getType()->hasUnsignedIntegerRepresentation());
if (CGM.getTarget().getTriple().getArch() == llvm::Triple::dxil)
return Builder.CreateIntrinsic(
- /*ReturnType=*/M->getType(), Intrinsic::dx_umad,
- ArrayRef<Value *>{M, A, B}, nullptr, "dx.umad");
+ /*ReturnType=*/M->getType(), Intrinsic::dx_umad, {M, A, B}, nullptr,
+ "dx.umad");
Value *Mul = Builder.CreateNUWMul(M, A);
return Builder.CreateNUWAdd(Mul, B);
@@ -19817,7 +19788,7 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
llvm_unreachable("rsqrt operand must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getRsqrtIntrinsic(),
- ArrayRef<Value *>{Op0}, nullptr, "hlsl.rsqrt");
+ {Op0}, nullptr, "hlsl.rsqrt");
}
case Builtin::BI__builtin_hlsl_elementwise_saturate: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19825,8 +19796,8 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
"saturate operand must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(),
- CGM.getHLSLRuntime().getSaturateIntrinsic(), ArrayRef<Value *>{Op0},
- nullptr, "hlsl.saturate");
+ CGM.getHLSLRuntime().getSaturateIntrinsic(), {Op0}, nullptr,
+ "hlsl.saturate");
}
case Builtin::BI__builtin_hlsl_select: {
Value *OpCond = EmitScalarExpr(E->getArg(0));
@@ -19865,7 +19836,7 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
"step operands must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getStepIntrinsic(),
- ArrayRef<Value *>{Op0, Op1}, nullptr, "hlsl.step");
+ {Op0, Op1}, nullptr, "hlsl.step");
}
case Builtin::BI__builtin_hlsl_wave_active_all_true: {
Value *Op = EmitScalarExpr(E->getArg(0));
@@ -19889,42 +19860,41 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
Value *OpExpr = EmitScalarExpr(E->getArg(0));
Intrinsic::ID ID = CGM.getHLSLRuntime().getWaveActiveCountBitsIntrinsic();
return EmitRuntimeCall(
- Intrinsic::getOrInsertDeclaration(&CGM.getModule(), ID),
- ArrayRef{OpExpr});
+ Intrinsic::getOrInsertDeclaration(&CGM.getModule(), ID), {OpExpr});
}
case Builtin::BI__builtin_hlsl_wave_active_sum: {
// Due to the use of variadic arguments, explicitly retreive argument
Value *OpExpr = EmitScalarExpr(E->getArg(0));
- llvm::FunctionType *FT = llvm::FunctionType::get(
- OpExpr->getType(), ArrayRef{OpExpr->getType()}, false);
+ llvm::FunctionType *FT =
+ llvm::FunctionType::get(OpExpr->getType(), {OpExpr->getType()}, false);
Intrinsic::ID IID = getWaveActiveSumIntrinsic(
getTarget().getTriple().getArch(), CGM.getHLSLRuntime(),
E->getArg(0)->getType());
// Get overloaded name
std::string Name =
- Intrinsic::getName(IID, ArrayRef{OpExpr->getType()}, &CGM.getModule());
+ Intrinsic::getName(IID, {OpExpr->getType()}, &CGM.getModule());
return EmitRuntimeCall(CGM.CreateRuntimeFunction(FT, Name, {},
/*Local=*/false,
/*AssumeConvergent=*/true),
- ArrayRef{OpExpr}, "hlsl.wave.active.sum");
+ {OpExpr}, "hlsl.wave.active.sum");
}
case Builtin::BI__builtin_hlsl_wave_active_max: {
// Due to the use of variadic arguments, explicitly retreive argument
Value *OpExpr = EmitScalarExpr(E->getArg(0));
- llvm::FunctionType *FT = llvm::FunctionType::get(
- OpExpr->getType(), ArrayRef{OpExpr->getType()}, false);
+ llvm::FunctionType *FT =
+ llvm::FunctionType::get(OpExpr->getType(), {OpExpr->getType()}, false);
Intrinsic::ID IID = getWaveActiveMaxIntrinsic(
getTarget().getTriple().getArch(), CGM.getHLSLRuntime(),
E->getArg(0)->getType());
// Get overloaded name
std::string Name =
- Intrinsic::getName(IID, ArrayRef{OpExpr->getType()}, &CGM.getModule());
+ Intrinsic::getName(IID, {OpExpr->getType()}, &CGM.getModule());
return EmitRuntimeCall(CGM.CreateRuntimeFunction(FT, Name, {},
/*Local=*/false,
/*AssumeConvergent=*/true),
- ArrayRef{OpExpr}, "hlsl.wave.active.max");
+ {OpExpr}, "hlsl.wave.active.max");
}
case Builtin::BI__builtin_hlsl_wave_get_lane_index: {
// We don't define a SPIR-V intrinsic, instead it is a SPIR-V built-in
@@ -19954,17 +19924,16 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
Value *OpExpr = EmitScalarExpr(E->getArg(0));
Value *OpIndex = EmitScalarExpr(E->getArg(1));
llvm::FunctionType *FT = llvm::FunctionType::get(
- OpExpr->getType(), ArrayRef{OpExpr->getType(), OpIndex->getType()},
- false);
+ OpExpr->getType(), {OpExpr->getType(), OpIndex->getType()}, false);
// Get overloaded name
std::string Name =
Intrinsic::getName(CGM.getHLSLRuntime().getWaveReadLaneAtIntrinsic(),
- ArrayRef{OpExpr->getType()}, &CGM.getModule());
+ {OpExpr->getType()}, &CGM.getModule());
return EmitRuntimeCall(CGM.CreateRuntimeFunction(FT, Name, {},
/*Local=*/false,
/*AssumeConvergent=*/true),
- ArrayRef{OpExpr, OpIndex}, "hlsl.wave.readlane");
+ {OpExpr, OpIndex}, "hlsl.wave.readlane");
}
case Builtin::BI__builtin_hlsl_elementwise_sign: {
auto *Arg0 = E->getArg(0);
@@ -19986,9 +19955,9 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
ConstantInt::get(retType, 1), "hlsl.sign");
}
- return Builder.CreateIntrinsic(
- retType, CGM.getHLSLRuntime().getSignIntrinsic(),
- ArrayRef<Value *>{Op0}, nullptr, "hlsl.sign");
+ return Builder.CreateIntrinsic(retType,
+ CGM.getHLSLRuntime().getSignIntrinsic(),
+ {Op0}, nullptr, "hlsl.sign");
}
case Builtin::BI__builtin_hlsl_elementwise_radians: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -19996,8 +19965,8 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
"radians operand must have a float representation");
return Builder.CreateIntrinsic(
/*ReturnType=*/Op0->getType(),
- CGM.getHLSLRuntime().getRadiansIntrinsic(), ArrayRef<Value *>{Op0},
- nullptr, "hlsl.radians");
+ CGM.getHLSLRuntime().getRadiansIntrinsic(), {Op0}, nullptr,
+ "hlsl.radians");
}
case Builtin::BI__builtin_hlsl_buffer_update_counter: {
Value *ResHandle = EmitScalarExpr(E->getArg(0));
@@ -20006,7 +19975,7 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
return Builder.CreateIntrinsic(
/*ReturnType=*/Offset->getType(),
CGM.getHLSLRuntime().getBufferUpdateCounterIntrinsic(),
- ArrayRef<Value *>{ResHandle, OffsetI8}, nullptr);
+ {ResHandle, OffsetI8}, nullptr);
}
case Builtin::BI__builtin_hlsl_elementwise_splitdouble: {
@@ -20373,12 +20342,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
// The builtins take these arguments as vec4 where the last element is
// ignored. The intrinsic takes them as vec3.
- RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
- ArrayRef<int>{0, 1, 2});
- RayDir =
- Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
- RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
- ArrayRef<int>{0, 1, 2});
+ RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin, {0, 1, 2});
+ RayDir = Builder.CreateShuffleVector(RayDir, RayDir, {0, 1, 2});
+ RayInverseDir =
+ Builder.CreateShuffleVector(RayInverseDir, RayInverseDir, {0, 1, 2});
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
{NodePtr->getType(), RayDir->getType()});
@@ -20930,7 +20897,7 @@ Value *CodeGenFunction::EmitSPIRVBuiltinExpr(unsigned BuiltinID,
"Distance operands must be a vector");
return Builder.CreateIntrinsic(
/*ReturnType=*/X->getType()->getScalarType(), Intrinsic::spv_distance,
- ArrayRef<Value *>{X, Y}, nullptr, "spv.distance");
+ {X, Y}, nullptr, "spv.distance");
}
case SPIRV::BI__builtin_spirv_length: {
Value *X = EmitScalarExpr(E->getArg(0));
@@ -20940,7 +20907,7 @@ Value *CodeGenFunction::EmitSPIRVBuiltinExpr(unsigned BuiltinID,
"length operand must be a vector");
return Builder.CreateIntrinsic(
/*ReturnType=*/X->getType()->getScalarType(), Intrinsic::spv_length,
- ArrayRef<Value *>{X}, nullptr, "spv.length");
+ {X}, nullptr, "spv.length");
}
case SPIRV::BI__builtin_spirv_reflect: {
Value *I = EmitScalarExpr(E->getArg(0));
@@ -20952,8 +20919,8 @@ Value *CodeGenFunction::EmitSPIRVBuiltinExpr(unsigned BuiltinID,
E->getArg(1)->getType()->isVectorType() &&
"Reflect operands must be a vector");
return Builder.CreateIntrinsic(
- /*ReturnType=*/I->getType(), Intrinsic::spv_reflect,
- ArrayRef<Value *>{I, N}, nullptr, "spv.reflect");
+ /*ReturnType=*/I->getType(), Intrinsic::spv_reflect, {I, N}, nullptr,
+ "spv.reflect");
}
}
return nullptr;
@@ -22950,7 +22917,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
Value *Trunc = Builder.CreateCall(Callee, Vec);
Value *Splat = Constant::getNullValue(TruncT);
- return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3});
+ return Builder.CreateShuffleVector(Trunc, Splat, {0, 1, 2, 3});
}
case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
Value *Ops[18];
More information about the cfe-commits
mailing list