[llvm] [NFC] Rename internal fns (PR #77994)

Nathan Sidwell via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 19 10:30:31 PST 2024


https://github.com/urnathan updated https://github.com/llvm/llvm-project/pull/77994

>From e9cc4f3270290ebb1b63acc836775f7b47693c5a Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Fri, 12 Jan 2024 17:29:14 -0500
Subject: [PATCH 1/2] [NFC] Rename internal fns

IIUC internal functions should use a lowerCaseName. This renames them
and uses the anonymous namespace rather than static.
---
 llvm/lib/IR/AutoUpgrade.cpp | 354 ++++++++++++++++++------------------
 1 file changed, 180 insertions(+), 174 deletions(-)

diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 3a3b41fb786c2d..745a9496f387db 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -49,12 +49,13 @@ static cl::opt<bool>
     DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info",
                                 cl::desc("Disable autoupgrade of debug info"));
 
-static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
+namespace {
+
+void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
 
 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
 // changed their type from v4f32 to v2i64.
-static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
-                                  Function *&NewFn) {
+bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn) {
   // Check whether this is an old version of the function, which received
   // v4f32 arguments.
   Type *Arg0Type = F->getFunctionType()->getParamType(0);
@@ -69,8 +70,8 @@ static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
 
 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
 // arguments have changed their type from i32 to i8.
-static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
-                                             Function *&NewFn) {
+bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
+                                      Function *&NewFn) {
   // Check that the last argument is an i32.
   Type *LastArgType = F->getFunctionType()->getParamType(
      F->getFunctionType()->getNumParams() - 1);
@@ -85,8 +86,8 @@ static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
 
 // Upgrade the declaration of fp compare intrinsics that change return type
 // from scalar to vXi1 mask.
-static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
-                                      Function *&NewFn) {
+bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
+                               Function *&NewFn) {
   // Check if the return type is a vector.
   if (F->getReturnType()->isVectorTy())
     return false;
@@ -96,8 +97,7 @@ static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
   return true;
 }
 
-static bool UpgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
-                                    Function *&NewFn) {
+bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn) {
   if (F->getReturnType()->getScalarType()->isBFloatTy())
     return false;
 
@@ -106,8 +106,8 @@ static bool UpgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
   return true;
 }
 
-static bool UpgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
-                                      Function *&NewFn) {
+bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
+                               Function *&NewFn) {
   if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
     return false;
 
@@ -116,7 +116,7 @@ static bool UpgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
   return true;
 }
 
-static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
+bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
   // All of the intrinsics matches below should be marked with which llvm
   // version started autoupgrading them. At some point in the future we would
   // like to use this information to remove upgrade code for some older
@@ -483,13 +483,13 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
           Name.starts_with("vcvtph2ps.")); // Added in 11.0
 }
 
-static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
-                                        Function *&NewFn) {
+bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
+                                 Function *&NewFn) {
   // Only handle intrinsics that start with "x86.".
   if (!Name.consume_front("x86."))
     return false;
 
-  if (ShouldUpgradeX86Intrinsic(F, Name)) {
+  if (shouldUpgradeX86Intrinsic(F, Name)) {
     NewFn = nullptr;
     return true;
   }
@@ -515,7 +515,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
              .Case("nzc", Intrinsic::x86_sse41_ptestnzc)
              .Default(Intrinsic::not_intrinsic);
     if (ID != Intrinsic::not_intrinsic)
-      return UpgradePTESTIntrinsic(F, ID, NewFn);
+      return upgradePTESTIntrinsic(F, ID, NewFn);
 
     return false;
   }
@@ -533,7 +533,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
            .Case("avx2.mpsadbw", Intrinsic::x86_avx2_mpsadbw)
            .Default(Intrinsic::not_intrinsic);
   if (ID != Intrinsic::not_intrinsic)
-    return UpgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
+    return upgradeX86IntrinsicsWith8BitMask(F, ID, NewFn);
 
   if (Name.consume_front("avx512.mask.cmp.")) {
     // Added in 7.0
@@ -546,7 +546,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
              .Case("ps.512", Intrinsic::x86_avx512_mask_cmp_ps_512)
              .Default(Intrinsic::not_intrinsic);
     if (ID != Intrinsic::not_intrinsic)
-      return UpgradeX86MaskedFPCompare(F, ID, NewFn);
+      return upgradeX86MaskedFPCompare(F, ID, NewFn);
     return false; // No other 'x86.avx523.mask.cmp.*'.
   }
 
@@ -567,7 +567,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
                    Intrinsic::x86_avx512bf16_cvtneps2bf16_512)
              .Default(Intrinsic::not_intrinsic);
     if (ID != Intrinsic::not_intrinsic)
-      return UpgradeX86BF16Intrinsic(F, ID, NewFn);
+      return upgradeX86BF16Intrinsic(F, ID, NewFn);
 
     // Added in 9.0
     ID = StringSwitch<Intrinsic::ID>(Name)
@@ -576,7 +576,7 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
              .Case("dpbf16ps.512", Intrinsic::x86_avx512bf16_dpbf16ps_512)
              .Default(Intrinsic::not_intrinsic);
     if (ID != Intrinsic::not_intrinsic)
-      return UpgradeX86BF16DPIntrinsic(F, ID, NewFn);
+      return upgradeX86BF16DPIntrinsic(F, ID, NewFn);
     return false; // No other 'x86.avx512bf16.*'.
   }
 
@@ -623,9 +623,8 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
 
 // Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
 // IsArm: 'arm.*', !IsArm: 'aarch64.*'.
-static bool UpgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
-                                                 StringRef Name,
-                                                 Function *&NewFn) {
+bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
+                                          StringRef Name, Function *&NewFn) {
   if (Name.starts_with("rbit")) {
     // '(arm|aarch64).rbit'.
     NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
@@ -899,7 +898,7 @@ static bool UpgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
   return false; // No other 'arm.*', 'aarch64.*'.
 }
 
-static Intrinsic::ID ShouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
+Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
   if (Name.consume_front("abs."))
     return StringSwitch<Intrinsic::ID>(Name)
         .Case("bf16", Intrinsic::nvvm_abs_bf16)
@@ -979,7 +978,7 @@ static Intrinsic::ID ShouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
   return Intrinsic::not_intrinsic;
 }
 
-static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
+bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
   assert(F && "Illegal to upgrade a non-existent Function.");
 
   StringRef Name = F->getName();
@@ -993,7 +992,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
   case 'a': {
     bool IsArm = Name.consume_front("arm.");
     if (IsArm || Name.consume_front("aarch64.")) {
-      if (UpgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
+      if (upgradeArmOrAarch64IntrinsicFunction(IsArm, F, Name, NewFn))
         return true;
       break;
     }
@@ -1190,7 +1189,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
 
       // Check for nvvm intrinsics that need a return type adjustment.
       if (!F->getReturnType()->getScalarType()->isBFloatTy()) {
-        Intrinsic::ID IID = ShouldUpgradeNVPTXBF16Intrinsic(Name);
+        Intrinsic::ID IID = shouldUpgradeNVPTXBF16Intrinsic(Name);
         if (IID != Intrinsic::not_intrinsic) {
           NewFn = nullptr;
           return true;
@@ -1353,7 +1352,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
     break;
 
   case 'x':
-    if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
+    if (upgradeX86IntrinsicFunction(F, Name, NewFn))
       return true;
   }
 
@@ -1396,9 +1395,11 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
   return false;
 }
 
+} // namespace
+
 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
   NewFn = nullptr;
-  bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
+  bool Upgraded = upgradeIntrinsicFunction1(F, NewFn);
   assert(F != NewFn && "Intrinsic function upgraded to the same function");
 
   // Upgrade intrinsic attributes.  This does not change the function.
@@ -1440,10 +1441,12 @@ GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
                             NewInit, GV->getName());
 }
 
+namespace {
+
 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
 // to byte shuffles.
-static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
-                                         Value *Op, unsigned Shift) {
+Value *upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+                                  unsigned Shift) {
   auto *ResultTy = cast<FixedVectorType>(Op->getType());
   unsigned NumElts = ResultTy->getNumElements() * 8;
 
@@ -1476,8 +1479,8 @@ static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
 
 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
 // to byte shuffles.
-static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
-                                         unsigned Shift) {
+Value *upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+                                  unsigned Shift) {
   auto *ResultTy = cast<FixedVectorType>(Op->getType());
   unsigned NumElts = ResultTy->getNumElements() * 8;
 
@@ -1508,8 +1511,7 @@ static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
   return Builder.CreateBitCast(Res, ResultTy, "cast");
 }
 
-static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
-                            unsigned NumElts) {
+Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, unsigned NumElts) {
   assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements");
   llvm::VectorType *MaskTy = FixedVectorType::get(
       Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
@@ -1528,8 +1530,8 @@ static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
   return Mask;
 }
 
-static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
-                            Value *Op0, Value *Op1) {
+Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
+                     Value *Op1) {
   // If the mask is all ones just emit the first operation.
   if (const auto *C = dyn_cast<Constant>(Mask))
     if (C->isAllOnesValue())
@@ -1540,8 +1542,8 @@ static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
   return Builder.CreateSelect(Mask, Op0, Op1);
 }
 
-static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
-                                  Value *Op0, Value *Op1) {
+Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
+                           Value *Op1) {
   // If the mask is all ones just emit the first operation.
   if (const auto *C = dyn_cast<Constant>(Mask))
     if (C->isAllOnesValue())
@@ -1557,10 +1559,9 @@ static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate
 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
-static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
-                                        Value *Op1, Value *Shift,
-                                        Value *Passthru, Value *Mask,
-                                        bool IsVALIGN) {
+Value *upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, Value *Op1,
+                                 Value *Shift, Value *Passthru, Value *Mask,
+                                 bool IsVALIGN) {
   unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
 
   unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
@@ -1599,11 +1600,11 @@ static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
   Value *Align = Builder.CreateShuffleVector(
       Op1, Op0, ArrayRef(Indices, NumElts), "palignr");
 
-  return EmitX86Select(Builder, Mask, Align, Passthru);
+  return emitX86Select(Builder, Mask, Align, Passthru);
 }
 
-static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
-                                          bool ZeroMask, bool IndexForm) {
+Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
+                                   bool ZeroMask, bool IndexForm) {
   Type *Ty = CI.getType();
   unsigned VecWidth = Ty->getPrimitiveSizeInBits();
   unsigned EltWidth = Ty->getScalarSizeInBits();
@@ -1660,11 +1661,11 @@ static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
   Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
                              : Builder.CreateBitCast(CI.getArgOperand(1),
                                                      Ty);
-  return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
+  return emitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
 }
 
-static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
-                                         Intrinsic::ID IID) {
+Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
+                                  Intrinsic::ID IID) {
   Type *Ty = CI.getType();
   Value *Op0 = CI.getOperand(0);
   Value *Op1 = CI.getOperand(1);
@@ -1674,13 +1675,13 @@ static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
   if (CI.arg_size() == 4) { // For masked intrinsics.
     Value *VecSrc = CI.getOperand(2);
     Value *Mask = CI.getOperand(3);
-    Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+    Res = emitX86Select(Builder, Mask, Res, VecSrc);
   }
   return Res;
 }
 
-static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
-                               bool IsRotateRight) {
+Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
+                        bool IsRotateRight) {
   Type *Ty = CI.getType();
   Value *Src = CI.getArgOperand(0);
   Value *Amt = CI.getArgOperand(1);
@@ -1701,13 +1702,13 @@ static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
   if (CI.arg_size() == 4) { // For masked intrinsics.
     Value *VecSrc = CI.getOperand(2);
     Value *Mask = CI.getOperand(3);
-    Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+    Res = emitX86Select(Builder, Mask, Res, VecSrc);
   }
   return Res;
 }
 
-static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
-                              bool IsSigned) {
+Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
+                       bool IsSigned) {
   Type *Ty = CI.getType();
   Value *LHS = CI.getArgOperand(0);
   Value *RHS = CI.getArgOperand(1);
@@ -1745,8 +1746,8 @@ static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
   return Ext;
 }
 
-static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
-                                    bool IsShiftRight, bool ZeroMask) {
+Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
+                             bool IsShiftRight, bool ZeroMask) {
   Type *Ty = CI.getType();
   Value *Op0 = CI.getArgOperand(0);
   Value *Op1 = CI.getArgOperand(1);
@@ -1774,14 +1775,13 @@ static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
                     ZeroMask     ? ConstantAggregateZero::get(CI.getType()) :
                                    CI.getArgOperand(0);
     Value *Mask = CI.getOperand(NumArgs - 1);
-    Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+    Res = emitX86Select(Builder, Mask, Res, VecSrc);
   }
   return Res;
 }
 
-static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
-                                 Value *Ptr, Value *Data, Value *Mask,
-                                 bool Aligned) {
+Value *upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data,
+                          Value *Mask, bool Aligned) {
   // Cast the pointer to the right type.
   Ptr = Builder.CreateBitCast(Ptr,
                               llvm::PointerType::getUnqual(Data->getType()));
@@ -1801,9 +1801,8 @@ static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
   return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
 }
 
-static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
-                                Value *Ptr, Value *Passthru, Value *Mask,
-                                bool Aligned) {
+Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr, Value *Passthru,
+                         Value *Mask, bool Aligned) {
   Type *ValTy = Passthru->getType();
   // Cast the pointer to the right type.
   Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
@@ -1825,17 +1824,17 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
   return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
 }
 
-static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
+Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
   Type *Ty = CI.getType();
   Value *Op0 = CI.getArgOperand(0);
   Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
   Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
   if (CI.arg_size() == 3)
-    Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
+    Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
   return Res;
 }
 
-static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
+Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
   Type *Ty = CI.getType();
 
   // Arguments have a vXi32 type so cast to vXi64.
@@ -1859,14 +1858,13 @@ static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
   Value *Res = Builder.CreateMul(LHS, RHS);
 
   if (CI.arg_size() == 4)
-    Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
+    Res = emitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
 
   return Res;
 }
 
 // Applying mask on vector of i1's and make sure result is at least 8 bits wide.
-static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
-                                     Value *Mask) {
+Value *applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask) {
   unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
   if (Mask) {
     const auto *C = dyn_cast<Constant>(Mask);
@@ -1887,8 +1885,8 @@ static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
   return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
 }
 
-static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
-                                   unsigned CC, bool Signed) {
+Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI, unsigned CC,
+                            bool Signed) {
   Value *Op0 = CI.getArgOperand(0);
   unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
 
@@ -1915,19 +1913,19 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
 
   Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
 
-  return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
+  return applyX86MaskOn1BitsVec(Builder, Cmp, Mask);
 }
 
 // Replace a masked intrinsic with an older unmasked intrinsic.
-static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
-                                    Intrinsic::ID IID) {
+Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
+                             Intrinsic::ID IID) {
   Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
   Value *Rep = Builder.CreateCall(Intrin,
                                  { CI.getArgOperand(0), CI.getArgOperand(1) });
-  return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
+  return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
 }
 
-static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
+Value *upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
   Value* A = CI.getArgOperand(0);
   Value* B = CI.getArgOperand(1);
   Value* Src = CI.getArgOperand(2);
@@ -1941,8 +1939,7 @@ static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
   return Builder.CreateInsertElement(A, Select, (uint64_t)0);
 }
 
-
-static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
+Value *upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
   Value* Op = CI.getArgOperand(0);
   Type* ReturnOp = CI.getType();
   unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
@@ -1951,8 +1948,8 @@ static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
 }
 
 // Replace intrinsic with unmasked version and a select.
-static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
-                                      CallBase &CI, Value *&Rep) {
+bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
+                               CallBase &CI, Value *&Rep) {
   Name = Name.substr(12); // Remove avx512.mask.
 
   unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
@@ -2184,11 +2181,13 @@ static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
   Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
                            Args);
   unsigned NumArgs = CI.arg_size();
-  Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
+  Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
                       CI.getArgOperand(NumArgs - 2));
   return true;
 }
 
+} // namespace
+
 /// Upgrade comment in call to inline asm that represents an objc retain release
 /// marker.
 void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
@@ -2200,8 +2199,10 @@ void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
   }
 }
 
-static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
-                                      IRBuilder<> &Builder) {
+namespace {
+
+Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
+                               IRBuilder<> &Builder) {
   if (Name == "mve.vctp64.old") {
     // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
     // correct type.
@@ -2294,8 +2295,8 @@ static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
   llvm_unreachable("Unknown function for ARM CallBase upgrade.");
 }
 
-static Value *UpgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
-                                         Function *F, IRBuilder<> &Builder) {
+Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
+                                  IRBuilder<> &Builder) {
   const bool IsInc = Name.starts_with("atomic.inc.");
   if (IsInc || Name.starts_with("atomic.dec.")) {
     if (CI->getNumOperands() != 6) // Malformed bitcode.
@@ -2330,6 +2331,8 @@ static Value *UpgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
   llvm_unreachable("Unknown function for AMDGPU intrinsic upgrade.");
 }
 
+} // namespace
+
 /// Upgrade a call to an old intrinsic. All argument and return casting must be
 /// provided to seamlessly integrate with existing context.
 void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
@@ -2448,7 +2451,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
     if (IsX86 && Name == "avx512.mask.store.ss") {
       Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
-      UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+      upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
                          Mask, false);
 
       // Remove intrinsic.
@@ -2456,10 +2459,10 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       return;
     }
 
-    if (IsX86 && (Name.starts_with("avx512.mask.store"))) {
+    if (IsX86 && Name.starts_with("avx512.mask.store")) {
       // "avx512.mask.storeu." or "avx512.mask.store."
       bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
-      UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+      upgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
                          CI->getArgOperand(2), Aligned);
 
       // Remove intrinsic.
@@ -2515,7 +2518,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                                                            CI->getType()),
                                  {CI->getArgOperand(0)});
       }
-      Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                           CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("avx512.ptestm") ||
                          Name.starts_with("avx512.ptestnm"))) {
@@ -2528,12 +2531,12 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       ICmpInst::Predicate Pred =
         Name.starts_with("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
       Rep = Builder.CreateICmp(Pred, Rep, Zero);
-      Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
+      Rep = applyX86MaskOn1BitsVec(Builder, Rep, Mask);
     } else if (IsX86 && (Name.starts_with("avx512.mask.pbroadcast"))){
       unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
                              ->getNumElements();
       Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                           CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("avx512.kunpck"))) {
       unsigned NumElts = CI->getType()->getScalarSizeInBits();
@@ -2634,7 +2637,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
                                { CI->getOperand(0), CI->getArgOperand(1) });
-      Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
+      Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.fpclass.p")) {
       Type *OpTy = CI->getArgOperand(0)->getType();
       unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
@@ -2657,7 +2660,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
                                { CI->getOperand(0), CI->getArgOperand(1) });
-      Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
+      Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.cmp.p")) {
       SmallVector<Value *, 4> Args(CI->args());
       Type *OpTy = Args[0]->getType();
@@ -2700,7 +2703,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Value *Op = CI->getArgOperand(0);
       Value *Zero = llvm::Constant::getNullValue(Op->getType());
       Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
-      Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
+      Rep = applyX86MaskOn1BitsVec(Builder, Rep, nullptr);
     } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
                         Name == "ssse3.pabs.w.128" ||
                         Name == "ssse3.pabs.d.128" ||
@@ -2712,25 +2715,25 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                          Name == "sse41.pmaxsd" ||
                          Name.starts_with("avx2.pmaxs") ||
                          Name.starts_with("avx512.mask.pmaxs"))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
     } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
                          Name == "sse41.pmaxuw" ||
                          Name == "sse41.pmaxud" ||
                          Name.starts_with("avx2.pmaxu") ||
                          Name.starts_with("avx512.mask.pmaxu"))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
     } else if (IsX86 && (Name == "sse41.pminsb" ||
                          Name == "sse2.pmins.w" ||
                          Name == "sse41.pminsd" ||
                          Name.starts_with("avx2.pmins") ||
                          Name.starts_with("avx512.mask.pmins"))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
     } else if (IsX86 && (Name == "sse2.pminu.b" ||
                          Name == "sse41.pminuw" ||
                          Name == "sse41.pminud" ||
                          Name.starts_with("avx2.pminu") ||
                          Name.starts_with("avx512.mask.pminu"))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
     } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
                          Name == "avx2.pmulu.dq" ||
                          Name == "avx512.pmulu.dq.512" ||
@@ -2805,7 +2808,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       }
 
       if (CI->arg_size() >= 3)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                             CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("avx512.mask.vcvtph2ps.") ||
                          Name.starts_with("vcvtph2ps."))) {
@@ -2821,13 +2824,13 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
           Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
       Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
       if (CI->arg_size() >= 3)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                             CI->getArgOperand(1));
     } else if (IsX86 && Name.starts_with("avx512.mask.load")) {
       // "avx512.mask.loadu." or "avx512.mask.load."
       bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
       Rep =
-          UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+          upgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
                             CI->getArgOperand(2), Aligned);
     } else if (IsX86 && Name.starts_with("avx512.mask.expand.load.")) {
       auto *ResultTy = cast<FixedVectorType>(CI->getType());
@@ -2973,7 +2976,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                    : Builder.CreateZExt(SV, DstTy);
       // If there are 3 arguments, it's a masked intrinsic so we need a select.
       if (CI->arg_size() == 3)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                             CI->getArgOperand(1));
     } else if (Name == "avx512.mask.pmov.qd.256" ||
                Name == "avx512.mask.pmov.qd.512" ||
@@ -2981,7 +2984,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                Name == "avx512.mask.pmov.wb.512") {
       Type *Ty = CI->getArgOperand(1)->getType();
       Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                           CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("avx.vbroadcastf128") ||
                          Name == "avx2.vbroadcasti128")) {
@@ -3017,7 +3020,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       }
       Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
                                         CI->getArgOperand(1), ShuffleMask);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
                           CI->getArgOperand(3));
     }else if (IsX86 && (Name.starts_with("avx512.mask.broadcastf") ||
                          Name.starts_with("avx512.mask.broadcasti"))) {
@@ -3034,7 +3037,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
                                         CI->getArgOperand(0),
                                         ShuffleMask);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                           CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("avx2.pbroadcast") ||
                          Name.starts_with("avx2.vbroadcast") ||
@@ -3049,64 +3052,60 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateShuffleVector(Op, M);
 
       if (CI->arg_size() == 3)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                             CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("sse2.padds.") ||
                          Name.starts_with("avx2.padds.") ||
                          Name.starts_with("avx512.padds.") ||
                          Name.starts_with("avx512.mask.padds."))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
     } else if (IsX86 && (Name.starts_with("sse2.psubs.") ||
                          Name.starts_with("avx2.psubs.") ||
                          Name.starts_with("avx512.psubs.") ||
                          Name.starts_with("avx512.mask.psubs."))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
     } else if (IsX86 && (Name.starts_with("sse2.paddus.") ||
                          Name.starts_with("avx2.paddus.") ||
                          Name.starts_with("avx512.mask.paddus."))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
     } else if (IsX86 && (Name.starts_with("sse2.psubus.") ||
                          Name.starts_with("avx2.psubus.") ||
                          Name.starts_with("avx512.mask.psubus."))) {
-      Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
+      Rep = upgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
     } else if (IsX86 && Name.starts_with("avx512.mask.palignr.")) {
-      Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
-                                      CI->getArgOperand(1),
-                                      CI->getArgOperand(2),
-                                      CI->getArgOperand(3),
-                                      CI->getArgOperand(4),
-                                      false);
+      Rep = upgradeX86ALIGNIntrinsics(
+          Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+          CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
+          false);
     } else if (IsX86 && Name.starts_with("avx512.mask.valign.")) {
-      Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
-                                      CI->getArgOperand(1),
-                                      CI->getArgOperand(2),
-                                      CI->getArgOperand(3),
-                                      CI->getArgOperand(4),
-                                      true);
+      Rep = upgradeX86ALIGNIntrinsics(
+          Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+          CI->getArgOperand(2), CI->getArgOperand(3), CI->getArgOperand(4),
+          true);
     } else if (IsX86 && (Name == "sse2.psll.dq" ||
                          Name == "avx2.psll.dq")) {
       // 128/256-bit shift left specified in bits.
       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
-      Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
+      Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
                                        Shift / 8); // Shift is in bits.
     } else if (IsX86 && (Name == "sse2.psrl.dq" ||
                          Name == "avx2.psrl.dq")) {
       // 128/256-bit shift right specified in bits.
       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
-      Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
+      Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
                                        Shift / 8); // Shift is in bits.
     } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
                          Name == "avx2.psll.dq.bs" ||
                          Name == "avx512.psll.dq.512")) {
       // 128/256/512-bit shift left specified in bytes.
       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
-      Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
+      Rep = upgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
     } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
                          Name == "avx2.psrl.dq.bs" ||
                          Name == "avx512.psrl.dq.512")) {
       // 128/256/512-bit shift right specified in bytes.
       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
-      Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
+      Rep = upgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
     } else if (IsX86 && (Name == "sse41.pblendw" ||
                          Name.starts_with("sse41.blendp") ||
                          Name.starts_with("avx.blend.p") ||
@@ -3167,7 +3166,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       // If the intrinsic has a mask operand, handle that.
       if (CI->arg_size() == 5)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
                             CI->getArgOperand(3));
     } else if (IsX86 && (Name.starts_with("avx.vextractf128.") ||
                          Name == "avx2.vextracti128" ||
@@ -3192,7 +3191,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       // If the intrinsic has a mask operand, handle that.
       if (CI->arg_size() == 4)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                             CI->getArgOperand(2));
     } else if (!IsX86 && Name == "stackprotectorcheck") {
       Rep = nullptr;
@@ -3210,7 +3209,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
 
       if (CI->arg_size() == 4)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                             CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx.vperm2f128.") ||
                          Name == "avx2.vperm2i128")) {
@@ -3270,7 +3269,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
 
       if (CI->arg_size() == 4)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                             CI->getArgOperand(2));
     } else if (IsX86 && (Name == "sse2.pshufl.w" ||
                          Name.starts_with("avx512.mask.pshufl.w."))) {
@@ -3289,7 +3288,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
 
       if (CI->arg_size() == 4)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                             CI->getArgOperand(2));
     } else if (IsX86 && (Name == "sse2.pshufh.w" ||
                          Name.starts_with("avx512.mask.pshufh.w."))) {
@@ -3308,7 +3307,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
 
       if (CI->arg_size() == 4)
-        Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+        Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                             CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.shuf.p")) {
       Value *Op0 = CI->getArgOperand(0);
@@ -3333,7 +3332,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
 
-      Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep,
                           CI->getArgOperand(3));
     } else if (IsX86 && (Name.starts_with("avx512.mask.movddup") ||
                          Name.starts_with("avx512.mask.movshdup") ||
@@ -3355,7 +3354,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
 
-      Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                           CI->getArgOperand(1));
     } else if (IsX86 && (Name.starts_with("avx512.mask.punpckl") ||
                          Name.starts_with("avx512.mask.unpckl."))) {
@@ -3371,7 +3370,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
 
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx512.mask.punpckh") ||
                          Name.starts_with("avx512.mask.unpckh."))) {
@@ -3387,7 +3386,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
 
       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
 
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx512.mask.and.") ||
                          Name.starts_with("avx512.mask.pand."))) {
@@ -3396,7 +3395,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
       Rep = Builder.CreateBitCast(Rep, FTy);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx512.mask.andn.") ||
                          Name.starts_with("avx512.mask.pandn."))) {
@@ -3406,7 +3405,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateAnd(Rep,
                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
       Rep = Builder.CreateBitCast(Rep, FTy);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx512.mask.or.") ||
                          Name.starts_with("avx512.mask.por."))) {
@@ -3415,7 +3414,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
                              Builder.CreateBitCast(CI->getArgOperand(1), ITy));
       Rep = Builder.CreateBitCast(Rep, FTy);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx512.mask.xor.") ||
                          Name.starts_with("avx512.mask.pxor."))) {
@@ -3424,19 +3423,19 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
       Rep = Builder.CreateBitCast(Rep, FTy);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.padd.")) {
       Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.psub.")) {
       Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.pmull.")) {
       Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.add.p")) {
       if (Name.ends_with(".512")) {
@@ -3452,7 +3451,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       } else {
         Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
       }
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.div.p")) {
       if (Name.ends_with(".512")) {
@@ -3468,7 +3467,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       } else {
         Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
       }
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.mul.p")) {
       if (Name.ends_with(".512")) {
@@ -3484,7 +3483,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       } else {
         Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
       }
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.sub.p")) {
       if (Name.ends_with(".512")) {
@@ -3500,7 +3499,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       } else {
         Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
       }
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && (Name.starts_with("avx512.mask.max.p") ||
                          Name.starts_with("avx512.mask.min.p")) &&
@@ -3516,14 +3515,14 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
       Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
                                { CI->getArgOperand(0), CI->getArgOperand(1),
                                  CI->getArgOperand(4) });
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep,
                           CI->getArgOperand(2));
     } else if (IsX86 && Name.starts_with("avx512.mask.lzcnt.")) {
       Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
                                                          Intrinsic::ctlz,
                                                          CI->getType()),
                                { CI->getArgOperand(0), Builder.getInt1(false) });
-      Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+      Rep = emitX86Select(Builder, CI->getArgOperand(2), Rep,
                           CI->getArgOperand(1));
     } else if (IsX86 && Name.starts_with("avx512.mask.psll")) {
       bool IsImmediate = Name[16] == 'i' ||
@@ -3592,7 +3591,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
           llvm_unreachable("Unexpected size");
       }
 
-      Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
+      Rep = upgradeX86MaskedShift(Builder, *CI, IID);
     } else if (IsX86 && Name.starts_with("avx512.mask.psrl")) {
       bool IsImmediate = Name[16] == 'i' ||
                          (Name.size() > 18 && Name[18] == 'i');
@@ -3660,7 +3659,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
           llvm_unreachable("Unexpected size");
       }
 
-      Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
+      Rep = upgradeX86MaskedShift(Builder, *CI, IID);
     } else if (IsX86 && Name.starts_with("avx512.mask.psra")) {
       bool IsImmediate = Name[16] == 'i' ||
                          (Name.size() > 18 && Name[18] == 'i');
@@ -3726,11 +3725,11 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
           llvm_unreachable("Unexpected size");
       }
 
-      Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
+      Rep = upgradeX86MaskedShift(Builder, *CI, IID);
     } else if (IsX86 && Name.starts_with("avx512.mask.move.s")) {
       Rep = upgradeMaskedMove(Builder, *CI);
     } else if (IsX86 && Name.starts_with("avx512.cvtmask2")) {
-      Rep = UpgradeMaskToInt(Builder, *CI);
+      Rep = upgradeMaskToInt(Builder, *CI);
     } else if (IsX86 && Name.ends_with(".movntdqa")) {
       MDNode *Node = MDNode::get(
           C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
@@ -3846,8 +3845,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
         PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
                                                 (uint64_t)0);
 
-      Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
-                                Rep, PassThru);
+      Rep = emitX86ScalarSelect(Builder, CI->getArgOperand(3), Rep, PassThru);
       Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
                                         Rep, (uint64_t)0);
     } else if (IsX86 && (Name.starts_with("avx512.mask.vfmadd.p") ||
@@ -3898,7 +3896,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                         IsMask3 ? CI->getArgOperand(2) :
                                   CI->getArgOperand(0);
 
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
     } else if (IsX86 &&  Name.starts_with("fma.vfmsubadd.p")) {
       unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
       unsigned EltWidth = CI->getType()->getScalarSizeInBits();
@@ -3969,7 +3967,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                         IsMask3 ? CI->getArgOperand(2) :
                                   CI->getArgOperand(0);
 
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
     } else if (IsX86 && (Name.starts_with("avx512.mask.pternlog.") ||
                          Name.starts_with("avx512.maskz.pternlog."))) {
       bool ZeroMask = Name[11] == 'z';
@@ -3997,7 +3995,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                                Args);
       Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
                                  : CI->getArgOperand(0);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
+      Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
     } else if (IsX86 && (Name.starts_with("avx512.mask.vpmadd52") ||
                          Name.starts_with("avx512.maskz.vpmadd52"))) {
       bool ZeroMask = Name[11] == 'z';
@@ -4025,13 +4023,13 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                                Args);
       Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
                                  : CI->getArgOperand(0);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
     } else if (IsX86 && (Name.starts_with("avx512.mask.vpermi2var.") ||
                          Name.starts_with("avx512.mask.vpermt2var.") ||
                          Name.starts_with("avx512.maskz.vpermt2var."))) {
       bool ZeroMask = Name[11] == 'z';
       bool IndexForm = Name[17] == 'i';
-      Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
+      Rep = upgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
     } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpbusd.") ||
                          Name.starts_with("avx512.maskz.vpdpbusd.") ||
                          Name.starts_with("avx512.mask.vpdpbusds.") ||
@@ -4061,7 +4059,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                                Args);
       Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
                                  : CI->getArgOperand(0);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
     } else if (IsX86 && (Name.starts_with("avx512.mask.vpdpwssd.") ||
                          Name.starts_with("avx512.maskz.vpdpwssd.") ||
                          Name.starts_with("avx512.mask.vpdpwssds.") ||
@@ -4091,7 +4089,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                                Args);
       Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
                                  : CI->getArgOperand(0);
-      Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+      Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
     } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
                          Name == "addcarry.u32" || Name == "addcarry.u64" ||
                          Name == "subborrow.u32" || Name == "subborrow.u64")) {
@@ -4157,7 +4155,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                        : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
       Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
     } else if (IsNVVM && Name == "clz.ll") {
-      // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
+      // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 returns an i64.
       Value *Arg = CI->getArgOperand(0);
       Value *Ctlz = Builder.CreateCall(
           Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
@@ -4165,7 +4163,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
           {Arg, Builder.getFalse()}, "ctlz");
       Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
     } else if (IsNVVM && Name == "popc.ll") {
-      // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
+      // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 returns an
       // i64.
       Value *Arg = CI->getArgOperand(0);
       Value *Popc = Builder.CreateCall(
@@ -4181,7 +4179,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
                                    {Builder.getFloatTy()}),
                                CI->getArgOperand(0), "h2f");
       } else {
-        Intrinsic::ID IID = ShouldUpgradeNVPTXBF16Intrinsic(Name);
+        Intrinsic::ID IID = shouldUpgradeNVPTXBF16Intrinsic(Name);
         if (IID != Intrinsic::not_intrinsic &&
             !F->getReturnType()->getScalarType()->isBFloatTy()) {
           rename(F);
@@ -4202,9 +4200,9 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
         }
       }
     } else if (IsARM) {
-      Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder);
+      Rep = upgradeARMIntrinsicCall(Name, CI, F, Builder);
     } else if (IsAMDGCN) {
-      Rep = UpgradeAMDGCNIntrinsicCall(Name, CI, F, Builder);
+      Rep = upgradeAMDGCNIntrinsicCall(Name, CI, F, Builder);
     } else {
       llvm_unreachable("Unknown function for CallBase upgrade.");
     }
@@ -4623,7 +4621,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
     Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
 
     NewCall = Builder.CreateCall(NewFn, Args);
-    Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
+    Value *Res = applyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
 
     NewCall->takeName(CI);
     CI->replaceAllUsesWith(Res);
@@ -4830,9 +4828,11 @@ bool llvm::UpgradeDebugInfo(Module &M) {
   return Modified;
 }
 
+namespace {
+
 /// This checks for objc retain release marker which should be upgraded. It
 /// returns true if module is modified.
-static bool UpgradeRetainReleaseMarker(Module &M) {
+bool upgradeRetainReleaseMarker(Module &M) {
   bool Changed = false;
   const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
   NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
@@ -4856,6 +4856,8 @@ static bool UpgradeRetainReleaseMarker(Module &M) {
   return Changed;
 }
 
+} // namespace
+
 void llvm::UpgradeARCRuntime(Module &M) {
   // This lambda converts normal function calls to ARC runtime functions to
   // intrinsic calls.
@@ -4931,7 +4933,7 @@ void llvm::UpgradeARCRuntime(Module &M) {
   // Upgrade the retain release marker. If there is no need to upgrade
   // the marker, that means either the module is already new enough to contain
   // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
-  if (!UpgradeRetainReleaseMarker(M))
+  if (!upgradeRetainReleaseMarker(M))
     return;
 
   std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
@@ -5182,7 +5184,9 @@ void llvm::UpgradeFunctionAttributes(Function &F) {
     Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
 }
 
-static bool isOldLoopArgument(Metadata *MD) {
+namespace {
+
+bool isOldLoopArgument(Metadata *MD) {
   auto *T = dyn_cast_or_null<MDTuple>(MD);
   if (!T)
     return false;
@@ -5194,7 +5198,7 @@ static bool isOldLoopArgument(Metadata *MD) {
   return S->getString().starts_with("llvm.vectorizer.");
 }
 
-static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
+MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
   StringRef OldPrefix = "llvm.vectorizer.";
   assert(OldTag.starts_with(OldPrefix) && "Expected old prefix");
 
@@ -5206,7 +5210,7 @@ static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
              .str());
 }
 
-static Metadata *upgradeLoopArgument(Metadata *MD) {
+Metadata *upgradeLoopArgument(Metadata *MD) {
   auto *T = dyn_cast_or_null<MDTuple>(MD);
   if (!T)
     return MD;
@@ -5228,6 +5232,8 @@ static Metadata *upgradeLoopArgument(Metadata *MD) {
   return MDTuple::get(T->getContext(), Ops);
 }
 
+} // namespace
+
 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
   auto *T = dyn_cast<MDTuple>(&N);
   if (!T)

>From 9d9b8f52f890f511e747dd05788152d8336851b2 Mon Sep 17 00:00:00 2001
From: Nathan Sidwell <nathan at acm.org>
Date: Fri, 19 Jan 2024 11:59:34 -0500
Subject: [PATCH 2/2] Don't use anonymous namespace

---
 llvm/lib/IR/AutoUpgrade.cpp | 148 ++++++++++++++++--------------------
 1 file changed, 67 insertions(+), 81 deletions(-)

diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 745a9496f387db..848beda834c68e 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -49,13 +49,12 @@ static cl::opt<bool>
     DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info",
                                 cl::desc("Disable autoupgrade of debug info"));
 
-namespace {
-
-void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
+static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
 
 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
 // changed their type from v4f32 to v2i64.
-bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn) {
+static bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID,
+                                  Function *&NewFn) {
   // Check whether this is an old version of the function, which received
   // v4f32 arguments.
   Type *Arg0Type = F->getFunctionType()->getParamType(0);
@@ -70,8 +69,8 @@ bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn) {
 
 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
 // arguments have changed their type from i32 to i8.
-bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
-                                      Function *&NewFn) {
+static bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
+                                             Function *&NewFn) {
   // Check that the last argument is an i32.
   Type *LastArgType = F->getFunctionType()->getParamType(
      F->getFunctionType()->getNumParams() - 1);
@@ -86,8 +85,8 @@ bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
 
 // Upgrade the declaration of fp compare intrinsics that change return type
 // from scalar to vXi1 mask.
-bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
-                               Function *&NewFn) {
+static bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
+                                      Function *&NewFn) {
   // Check if the return type is a vector.
   if (F->getReturnType()->isVectorTy())
     return false;
@@ -97,7 +96,8 @@ bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
   return true;
 }
 
-bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn) {
+static bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
+                                    Function *&NewFn) {
   if (F->getReturnType()->getScalarType()->isBFloatTy())
     return false;
 
@@ -106,8 +106,8 @@ bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID, Function *&NewFn) {
   return true;
 }
 
-bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
-                               Function *&NewFn) {
+static bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
+                                      Function *&NewFn) {
   if (F->getFunctionType()->getParamType(1)->getScalarType()->isBFloatTy())
     return false;
 
@@ -116,7 +116,7 @@ bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
   return true;
 }
 
-bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
+static bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
   // All of the intrinsics matches below should be marked with which llvm
   // version started autoupgrading them. At some point in the future we would
   // like to use this information to remove upgrade code for some older
@@ -483,8 +483,8 @@ bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
           Name.starts_with("vcvtph2ps.")); // Added in 11.0
 }
 
-bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
-                                 Function *&NewFn) {
+static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
+                                        Function *&NewFn) {
   // Only handle intrinsics that start with "x86.".
   if (!Name.consume_front("x86."))
     return false;
@@ -623,8 +623,9 @@ bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
 
 // Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
 // IsArm: 'arm.*', !IsArm: 'aarch64.*'.
-bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
-                                          StringRef Name, Function *&NewFn) {
+static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
+                                                 StringRef Name,
+                                                 Function *&NewFn) {
   if (Name.starts_with("rbit")) {
     // '(arm|aarch64).rbit'.
     NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
@@ -898,7 +899,7 @@ bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
   return false; // No other 'arm.*', 'aarch64.*'.
 }
 
-Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
+static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
   if (Name.consume_front("abs."))
     return StringSwitch<Intrinsic::ID>(Name)
         .Case("bf16", Intrinsic::nvvm_abs_bf16)
@@ -978,7 +979,7 @@ Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
   return Intrinsic::not_intrinsic;
 }
 
-bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
+static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
   assert(F && "Illegal to upgrade a non-existent Function.");
 
   StringRef Name = F->getName();
@@ -1395,8 +1396,6 @@ bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
   return false;
 }
 
-} // namespace
-
 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
   NewFn = nullptr;
   bool Upgraded = upgradeIntrinsicFunction1(F, NewFn);
@@ -1441,12 +1440,10 @@ GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
                             NewInit, GV->getName());
 }
 
-namespace {
-
 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
 // to byte shuffles.
-Value *upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
-                                  unsigned Shift) {
+static Value *upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+                                         unsigned Shift) {
   auto *ResultTy = cast<FixedVectorType>(Op->getType());
   unsigned NumElts = ResultTy->getNumElements() * 8;
 
@@ -1479,8 +1476,8 @@ Value *upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
 
 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
 // to byte shuffles.
-Value *upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
-                                  unsigned Shift) {
+static Value *upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+                                         unsigned Shift) {
   auto *ResultTy = cast<FixedVectorType>(Op->getType());
   unsigned NumElts = ResultTy->getNumElements() * 8;
 
@@ -1511,7 +1508,8 @@ Value *upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
   return Builder.CreateBitCast(Res, ResultTy, "cast");
 }
 
-Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, unsigned NumElts) {
+static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
+                            unsigned NumElts) {
   assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements");
   llvm::VectorType *MaskTy = FixedVectorType::get(
       Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
@@ -1530,8 +1528,8 @@ Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, unsigned NumElts) {
   return Mask;
 }
 
-Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
-                     Value *Op1) {
+static Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
+                            Value *Op1) {
   // If the mask is all ones just emit the first operation.
   if (const auto *C = dyn_cast<Constant>(Mask))
     if (C->isAllOnesValue())
@@ -1542,8 +1540,8 @@ Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
   return Builder.CreateSelect(Mask, Op0, Op1);
 }
 
-Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
-                           Value *Op1) {
+static Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
+                                  Value *Op1) {
   // If the mask is all ones just emit the first operation.
   if (const auto *C = dyn_cast<Constant>(Mask))
     if (C->isAllOnesValue())
@@ -1559,9 +1557,10 @@ Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate
 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
-Value *upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, Value *Op1,
-                                 Value *Shift, Value *Passthru, Value *Mask,
-                                 bool IsVALIGN) {
+static Value *upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
+                                        Value *Op1, Value *Shift,
+                                        Value *Passthru, Value *Mask,
+                                        bool IsVALIGN) {
   unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
 
   unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
@@ -1603,8 +1602,8 @@ Value *upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, Value *Op1,
   return emitX86Select(Builder, Mask, Align, Passthru);
 }
 
-Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
-                                   bool ZeroMask, bool IndexForm) {
+static Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
+                                          bool ZeroMask, bool IndexForm) {
   Type *Ty = CI.getType();
   unsigned VecWidth = Ty->getPrimitiveSizeInBits();
   unsigned EltWidth = Ty->getScalarSizeInBits();
@@ -1664,8 +1663,8 @@ Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
   return emitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
 }
 
-Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
-                                  Intrinsic::ID IID) {
+static Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
+                                         Intrinsic::ID IID) {
   Type *Ty = CI.getType();
   Value *Op0 = CI.getOperand(0);
   Value *Op1 = CI.getOperand(1);
@@ -1680,8 +1679,8 @@ Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
   return Res;
 }
 
-Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
-                        bool IsRotateRight) {
+static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
+                               bool IsRotateRight) {
   Type *Ty = CI.getType();
   Value *Src = CI.getArgOperand(0);
   Value *Amt = CI.getArgOperand(1);
@@ -1707,8 +1706,8 @@ Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
   return Res;
 }
 
-Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
-                       bool IsSigned) {
+static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
+                              bool IsSigned) {
   Type *Ty = CI.getType();
   Value *LHS = CI.getArgOperand(0);
   Value *RHS = CI.getArgOperand(1);
@@ -1746,8 +1745,8 @@ Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
   return Ext;
 }
 
-Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
-                             bool IsShiftRight, bool ZeroMask) {
+static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
+                                    bool IsShiftRight, bool ZeroMask) {
   Type *Ty = CI.getType();
   Value *Op0 = CI.getArgOperand(0);
   Value *Op1 = CI.getArgOperand(1);
@@ -1780,8 +1779,8 @@ Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
   return Res;
 }
 
-Value *upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data,
-                          Value *Mask, bool Aligned) {
+static Value *upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data,
+                                 Value *Mask, bool Aligned) {
   // Cast the pointer to the right type.
   Ptr = Builder.CreateBitCast(Ptr,
                               llvm::PointerType::getUnqual(Data->getType()));
@@ -1801,8 +1800,8 @@ Value *upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data,
   return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
 }
 
-Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr, Value *Passthru,
-                         Value *Mask, bool Aligned) {
+static Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr,
+                                Value *Passthru, Value *Mask, bool Aligned) {
   Type *ValTy = Passthru->getType();
   // Cast the pointer to the right type.
   Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
@@ -1824,7 +1823,7 @@ Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr, Value *Passthru,
   return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
 }
 
-Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
+static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
   Type *Ty = CI.getType();
   Value *Op0 = CI.getArgOperand(0);
   Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
@@ -1834,7 +1833,7 @@ Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
   return Res;
 }
 
-Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
+static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
   Type *Ty = CI.getType();
 
   // Arguments have a vXi32 type so cast to vXi64.
@@ -1864,7 +1863,8 @@ Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
 }
 
 // Applying mask on vector of i1's and make sure result is at least 8 bits wide.
-Value *applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask) {
+static Value *applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
+                                     Value *Mask) {
   unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
   if (Mask) {
     const auto *C = dyn_cast<Constant>(Mask);
@@ -1885,8 +1885,8 @@ Value *applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask) {
   return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
 }
 
-Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI, unsigned CC,
-                            bool Signed) {
+static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
+                                   unsigned CC, bool Signed) {
   Value *Op0 = CI.getArgOperand(0);
   unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
 
@@ -1917,15 +1917,15 @@ Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI, unsigned CC,
 }
 
 // Replace a masked intrinsic with an older unmasked intrinsic.
-Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
-                             Intrinsic::ID IID) {
+static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
+                                    Intrinsic::ID IID) {
   Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
   Value *Rep = Builder.CreateCall(Intrin,
                                  { CI.getArgOperand(0), CI.getArgOperand(1) });
   return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
 }
 
-Value *upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
+static Value *upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
   Value* A = CI.getArgOperand(0);
   Value* B = CI.getArgOperand(1);
   Value* Src = CI.getArgOperand(2);
@@ -1939,7 +1939,7 @@ Value *upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
   return Builder.CreateInsertElement(A, Select, (uint64_t)0);
 }
 
-Value *upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
+static Value *upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
   Value* Op = CI.getArgOperand(0);
   Type* ReturnOp = CI.getType();
   unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
@@ -1948,8 +1948,8 @@ Value *upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
 }
 
 // Replace intrinsic with unmasked version and a select.
-bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
-                               CallBase &CI, Value *&Rep) {
+static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
+                                      CallBase &CI, Value *&Rep) {
   Name = Name.substr(12); // Remove avx512.mask.
 
   unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
@@ -2186,8 +2186,6 @@ bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
   return true;
 }
 
-} // namespace
-
 /// Upgrade comment in call to inline asm that represents an objc retain release
 /// marker.
 void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
@@ -2199,10 +2197,8 @@ void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
   }
 }
 
-namespace {
-
-Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
-                               IRBuilder<> &Builder) {
+static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
+                                      IRBuilder<> &Builder) {
   if (Name == "mve.vctp64.old") {
     // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
     // correct type.
@@ -2295,8 +2291,8 @@ Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
   llvm_unreachable("Unknown function for ARM CallBase upgrade.");
 }
 
-Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
-                                  IRBuilder<> &Builder) {
+static Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
+                                         Function *F, IRBuilder<> &Builder) {
   const bool IsInc = Name.starts_with("atomic.inc.");
   if (IsInc || Name.starts_with("atomic.dec.")) {
     if (CI->getNumOperands() != 6) // Malformed bitcode.
@@ -2331,8 +2327,6 @@ Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
   llvm_unreachable("Unknown function for AMDGPU intrinsic upgrade.");
 }
 
-} // namespace
-
 /// Upgrade a call to an old intrinsic. All argument and return casting must be
 /// provided to seamlessly integrate with existing context.
 void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
@@ -4828,11 +4822,9 @@ bool llvm::UpgradeDebugInfo(Module &M) {
   return Modified;
 }
 
-namespace {
-
 /// This checks for objc retain release marker which should be upgraded. It
 /// returns true if module is modified.
-bool upgradeRetainReleaseMarker(Module &M) {
+static bool upgradeRetainReleaseMarker(Module &M) {
   bool Changed = false;
   const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
   NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
@@ -4856,8 +4848,6 @@ bool upgradeRetainReleaseMarker(Module &M) {
   return Changed;
 }
 
-} // namespace
-
 void llvm::UpgradeARCRuntime(Module &M) {
   // This lambda converts normal function calls to ARC runtime functions to
   // intrinsic calls.
@@ -5184,9 +5174,7 @@ void llvm::UpgradeFunctionAttributes(Function &F) {
     Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
 }
 
-namespace {
-
-bool isOldLoopArgument(Metadata *MD) {
+static bool isOldLoopArgument(Metadata *MD) {
   auto *T = dyn_cast_or_null<MDTuple>(MD);
   if (!T)
     return false;
@@ -5198,7 +5186,7 @@ bool isOldLoopArgument(Metadata *MD) {
   return S->getString().starts_with("llvm.vectorizer.");
 }
 
-MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
+static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
   StringRef OldPrefix = "llvm.vectorizer.";
   assert(OldTag.starts_with(OldPrefix) && "Expected old prefix");
 
@@ -5210,7 +5198,7 @@ MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
              .str());
 }
 
-Metadata *upgradeLoopArgument(Metadata *MD) {
+static Metadata *upgradeLoopArgument(Metadata *MD) {
   auto *T = dyn_cast_or_null<MDTuple>(MD);
   if (!T)
     return MD;
@@ -5232,8 +5220,6 @@ Metadata *upgradeLoopArgument(Metadata *MD) {
   return MDTuple::get(T->getContext(), Ops);
 }
 
-} // namespace
-
 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
   auto *T = dyn_cast<MDTuple>(&N);
   if (!T)



More information about the llvm-commits mailing list