[llvm] [NFC] Rename internal fns (PR #77994)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Jan 20 11:23:33 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-ir
Author: Nathan Sidwell (urnathan)
<details>
<summary>Changes</summary>
IIUC internal functions should use a lowerCaseName. This renames them and uses the anonymous namespace rather than static.
I left the existing 'llvm::Name' entry points, rather than move the all the definitions inside a `namespace llvm {` definition.
---
Patch is 53.31 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/77994.diff
1 Files Affected:
- (modified) llvm/lib/IR/AutoUpgrade.cpp (+130-138)
``````````diff
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 3a3b41fb786c2d8..848beda834c68e1 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -53,7 +53,7 @@ static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
// changed their type from v4f32 to v2i64.
-static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
+static bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID,
Function *&NewFn) {
// Check whether this is an old version of the function, which received
// v4f32 arguments.
@@ -69,7 +69,7 @@ static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
// arguments have changed their type from i32 to i8.
-static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
+static bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
Function *&NewFn) {
// Check that the last argument is an i32.
Type *LastArgType = F->getFunctionType()->getParamType(
@@ -85,7 +85,7 @@ static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
// Upgrade the declaration of fp compare intrinsics that change return type
// from scalar to vXi1 mask.
-static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
+static bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
Function *&NewFn) {
// Check if the return type is a vector.
if (F->getReturnType()->isVectorTy())
@@ -96,7 +96,7 @@ static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
return true;
}
-static bool UpgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
+static bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
Function *&NewFn) {
if (F->getReturnType()->getScalarType()->isBFloatTy())
return false;
@@ -106,7 +106,7 @@ static bool UpgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
return true;
}
-static bool UpgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
+static bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
Function *&NewFn) {
if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
return false;
@@ -116,7 +116,7 @@ static bool UpgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
return true;
}
-static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
+static bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
// All of the intrinsics matches below should be marked with which llvm
// version started autoupgrading them. At some point in the future we would
// like to use this information to remove upgrade code for some older
@@ -483,13 +483,13 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
Name.starts_with("vcvtph2ps.")); // Added in 11.0
}
-static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
+static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
Function *&NewFn) {
// Only handle intrinsics that start with "x86.".
if (!Name.consume_front("x86."))
return false;
- if (ShouldUpgradeX86Intrinsic(F, Name)) {
+ if (shouldUpgradeX86Intrinsic(F, Name)) {
NewFn = nullptr;
return true;
}
@@ -515,7 +515,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
.Case("nzc", Intrinsic::x86_sse41_ptestnzc)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic)
- return UpgradePTESTIntrinsic(F, ID, NewFn);
+ return upgradePTESTIntrinsic(F, ID, NewFn);
return false;
}
@@ -533,7 +533,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
.Case("avx2.mpsadbw", Intrinsic::x86_avx2_mpsadbw)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic)
- return UpgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
+ return upgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
if (Name.consume_front("avx512.mask.cmp.")) {
// Added in 7.0
@@ -546,7 +546,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
.Case("ps.512", Intrinsic::x86_avx512_mask_cmp_ps_512)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic)
- return UpgradeX86MaskedFPCompare(F, ID, NewFn);
+ return upgradeX86MaskedFPCompare(F, ID, NewFn);
return false; // No other 'x86.avx523.mask.cmp.*'.
}
@@ -567,7 +567,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
Intrinsic::x86_avx512bf16_cvtneps2bf16_512)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic)
- return UpgradeX86BF16Intrinsic(F, ID, NewFn);
+ return upgradeX86BF16Intrinsic(F, ID, NewFn);
// Added in 9.0
ID = StringSwitch<Intrinsic::ID>(Name)
@@ -576,7 +576,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
.Case("dpbf16ps.512", Intrinsic::x86_avx512bf16_dpbf16ps_512)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic)
- return UpgradeX86BF16DPIntrinsic(F, ID, NewFn);
+ return upgradeX86BF16DPIntrinsic(F, ID, NewFn);
return false; // No other 'x86.avx512bf16.*'.
}
@@ -623,7 +623,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
// Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
// IsArm: 'arm.*', !IsArm: 'aarch64.*'.
-static bool UpgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
+static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
StringRef Name,
Function *&NewFn) {
if (Name.starts_with("rbit")) {
@@ -899,7 +899,7 @@ static bool UpgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // No other 'arm.*', 'aarch64.*'.
}
-static Intrinsic::ID ShouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
+static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
if (Name.consume_front("abs."))
return StringSwitch<Intrinsic::ID>(Name)
.Case("bf16", Intrinsic::nvvm_abs_bf16)
@@ -979,7 +979,7 @@ static Intrinsic::ID ShouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
return Intrinsic::not_intrinsic;
}
-static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
+static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
assert(F && "Illegal to upgrade a non-existent Function.");
StringRef Name = F->getName();
@@ -993,7 +993,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
case 'a': {
bool IsArm = Name.consume_front("arm.");
if (IsArm || Name.consume_front("aarch64.")) {
- if (UpgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
+ if (upgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
return true;
break;
}
@@ -1190,7 +1190,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// Check for nvvm intrinsics that need a return type adjustment.
if (!F->getReturnType()->getScalarType()->isBFloatTy()) {
- Intrinsic::ID IID = ShouldUpgradeNVPTXBF16Intrinsic(Name);
+ Intrinsic::ID IID = shouldUpgradeNVPTXBF16Intrinsic(Name);
if (IID != Intrinsic::not_intrinsic) {
NewFn = nullptr;
return true;
@@ -1353,7 +1353,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
break;
case 'x':
- if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
+ if (upgradeX86IntrinsicFunction(F, Name, NewFn))
return true;
}
@@ -1398,7 +1398,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
NewFn = nullptr;
- bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
+ bool Upgraded = upgradeIntrinsicFunction1(F, NewFn);
assert(F != NewFn && "Intrinsic function upgraded to the same function");
// Upgrade intrinsic attributes. This does not change the function.
@@ -1442,8 +1442,8 @@ GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
// to byte shuffles.
-static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
- Value *Op, unsigned Shift) {
+static Value *upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+ unsigned Shift) {
auto *ResultTy = cast<FixedVectorType>(Op->getType());
unsigned NumElts = ResultTy->getNumElements() * 8;
@@ -1476,7 +1476,7 @@ static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
// to byte shuffles.
-static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+static Value *upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
unsigned Shift) {
auto *ResultTy = cast<FixedVectorType>(Op->getType());
unsigned NumElts = ResultTy->getNumElements() * 8;
@@ -1528,8 +1528,8 @@ static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
return Mask;
}
-static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
- Value *Op0, Value *Op1) {
+static Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
+ Value *Op1) {
// If the mask is all ones just emit the first operation.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
@@ -1540,8 +1540,8 @@ static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
return Builder.CreateSelect(Mask, Op0, Op1);
}
-static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
- Value *Op0, Value *Op1) {
+static Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
+ Value *Op1) {
// If the mask is all ones just emit the first operation.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
@@ -1557,7 +1557,7 @@ static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
-static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
+static Value *upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
Value *Op1, Value *Shift,
Value *Passthru, Value *Mask,
bool IsVALIGN) {
@@ -1599,10 +1599,10 @@ static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
Value *Align = Builder.CreateShuffleVector(
Op1, Op0, ArrayRef(Indices, NumElts), "palignr");
- return EmitX86Select(Builder, Mask, Align, Passthru);
+ return emitX86Select(Builder, Mask, Align, Passthru);
}
-static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
+static Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
bool ZeroMask, bool IndexForm) {
Type *Ty = CI.getType();
unsigned VecWidth = Ty->getPrimitiveSizeInBits();
@@ -1660,10 +1660,10 @@ static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
: Builder.CreateBitCast(CI.getArgOperand(1),
Ty);
- return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
+ return emitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
}
-static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
+static Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
Intrinsic::ID IID) {
Type *Ty = CI.getType();
Value *Op0 = CI.getOperand(0);
@@ -1674,7 +1674,7 @@ static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
if (CI.arg_size() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
Value *Mask = CI.getOperand(3);
- Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ Res = emitX86Select(Builder, Mask, Res, VecSrc);
}
return Res;
}
@@ -1701,7 +1701,7 @@ static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
if (CI.arg_size() == 4) { // For masked intrinsics.
Value *VecSrc = CI.getOperand(2);
Value *Mask = CI.getOperand(3);
- Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ Res = emitX86Select(Builder, Mask, Res, VecSrc);
}
return Res;
}
@@ -1774,14 +1774,13 @@ static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
CI.getArgOperand(0);
Value *Mask = CI.getOperand(NumArgs - 1);
- Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ Res = emitX86Select(Builder, Mask, Res, VecSrc);
}
return Res;
}
-static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
- Value *Ptr, Value *Data, Value *Mask,
- bool Aligned) {
+static Value *upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data,
+ Value *Mask, bool Aligned) {
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(Data->getType()));
@@ -1801,9 +1800,8 @@ static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
}
-static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
- Value *Ptr, Value *Passthru, Value *Mask,
- bool Aligned) {
+static Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr,
+ Value *Passthru, Value *Mask, bool Aligned) {
Type *ValTy = Passthru->getType();
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
@@ -1831,7 +1829,7 @@ static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
if (CI.arg_size() == 3)
- Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
+ Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
return Res;
}
@@ -1859,13 +1857,13 @@ static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
Value *Res = Builder.CreateMul(LHS, RHS);
if (CI.arg_size() == 4)
- Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
+ Res = emitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
return Res;
}
// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
-static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
+static Value *applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
Value *Mask) {
unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
if (Mask) {
@@ -1915,19 +1913,19 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
- return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
+ return applyX86MaskOn1BitsVec(Builder, Cmp, Mask);
}
// Replace a masked intrinsic with an older unmasked intrinsic.
-static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
+static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
Intrinsic::ID IID) {
Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
Value *Rep = Builder.CreateCall(Intrin,
{ CI.getArgOperand(0), CI.getArgOperand(1) });
- return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
+ return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
}
-static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
+static Value *upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
Value* A = CI.getArgOperand(0);
Value* B = CI.getArgOperand(1);
Value* Src = CI.getArgOperand(2);
@@ -1941,8 +1939,7 @@ static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
return Builder.CreateInsertElement(A, Select, (uint64_t)0);
}
-
-static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
+static Value *upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
Value* Op = CI.getArgOperand(0);
Type* ReturnOp = CI.getType();
unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
@@ -2184,7 +2181,7 @@ static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
Args);
unsigned NumArgs = CI.arg_size();
- Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
+ Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
CI.getArgOperand(NumArgs - 2));
return true;
}
@@ -2200,7 +2197,7 @@ void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
}
}
-static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
+static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IRBuilder<> &Builder) {
if (Name == "mve.vctp64.old") {
// Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
@@ -2294,7 +2291,7 @@ static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
llvm_unreachable("Unknown function for ARM CallBase upgrade.");
}
-static Value *UpgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
+static Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
Function *F, IRBuilder<> &Builder) {
const bool IsInc = Name.starts_with("atomic.inc.");
if (IsInc || Name.starts_with("atomic.dec.")) {
@@ -2448,7 +2445,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
if (IsX86 && Name == "avx512.mask.store.ss") {
Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
- UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+ upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
Mask, false);
// Remove intrinsic.
@@ -2456,10 +2453,10 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
return;
}
- if (IsX86 && (Name.starts_with("avx512.mask.store"))) {
+ if (IsX86 && Name.starts_with("avx512.mask.store")) {
// "avx512.mask.storeu." or "avx512.mask.store."
bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
- UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+ upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), Aligned);
// Remove intrinsic.
@@ -2515,7 +2512,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
CI->getType()),
{CI->getArgOperand(0)});
}
- Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ Rep = emitX86Select(Builder, CI->getArgOperand(2),...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/77994
More information about the llvm-commits
mailing list