[llvm] 350dada - Give helpers internal linkage. NFC.

Benjamin Kramer via llvm-commits llvm-commits at lists.llvm.org
Tue May 19 13:16:54 PDT 2020


Author: Benjamin Kramer
Date: 2020-05-19T22:16:37+02:00
New Revision: 350dadaa8ab3db34ff41d7291f43442c57719de3

URL: https://github.com/llvm/llvm-project/commit/350dadaa8ab3db34ff41d7291f43442c57719de3
DIFF: https://github.com/llvm/llvm-project/commit/350dadaa8ab3db34ff41d7291f43442c57719de3.diff

LOG: Give helpers internal linkage. NFC.

Added: 
    

Modified: 
    clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
    clang/lib/StaticAnalyzer/Core/CallEvent.cpp
    llvm/lib/IR/Instructions.cpp
    llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
    mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
    mlir/lib/Dialect/Affine/IR/AffineOps.cpp
    mlir/lib/Dialect/StandardOps/IR/Ops.cpp
    mlir/lib/Dialect/Vector/VectorOps.cpp
    mlir/test/lib/IR/TestMatchers.cpp
    mlir/test/lib/Transforms/TestLinalgTransforms.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index b8d52f096e1c..aefcad374596 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -568,21 +568,6 @@ StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
   return findFunctionSummary(FD, C);
 }
 
-llvm::Optional<const FunctionDecl *>
-lookupGlobalCFunction(StringRef Name, const ASTContext &ACtx) {
-  IdentifierInfo &II = ACtx.Idents.get(Name);
-  auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
-  if (LookupRes.size() == 0)
-    return None;
-
-  assert(LookupRes.size() == 1 && "In C, identifiers should be unique");
-  Decl *D = LookupRes.front()->getCanonicalDecl();
-  auto *FD = dyn_cast<FunctionDecl>(D);
-  if (!FD)
-    return None;
-  return FD->getCanonicalDecl();
-}
-
 void StdLibraryFunctionsChecker::initFunctionSummaries(
     CheckerContext &C) const {
   if (!FunctionSummaryMap.empty())

diff  --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index cd15cd872d9d..1ea7c26dc76b 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -1202,7 +1202,7 @@ template <> struct DenseMapInfo<PrivateMethodKey> {
 };
 } // end namespace llvm
 
-const ObjCMethodDecl *
+static const ObjCMethodDecl *
 lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
                         Selector LookupSelector, bool InstanceMethod) {
   // Repeatedly calling lookupPrivateMethod() is expensive, especially

diff  --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 7ddf25b99a6a..957db32d6085 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1246,12 +1246,12 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
   return Amt;
 }
 
-Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
+static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
   const DataLayout &DL = BB->getModule()->getDataLayout();
   return DL.getPrefTypeAlign(Ty);
 }
 
-Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
+static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
   return computeAllocaDefaultAlign(Ty, I->getParent());
 }
 
@@ -1333,12 +1333,12 @@ void LoadInst::AssertOK() {
          "Alignment required for atomic load");
 }
 
-Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
+static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
   const DataLayout &DL = BB->getModule()->getDataLayout();
   return DL.getABITypeAlign(Ty);
 }
 
-Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
+static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
   return computeLoadStoreDefaultAlign(Ty, I->getParent());
 }
 

diff  --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index f908f883fb24..49056d783028 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -960,9 +960,9 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
 // S0-S31 + FPSCR + 8 more bytes (VPR + pad, or just pad)
 static const int CMSE_FP_SAVE_SIZE = 136;
 
-void determineGPRegsToClear(const MachineInstr &MI,
-                            const std::initializer_list<unsigned> &Regs,
-                            SmallVectorImpl<unsigned> &ClearRegs) {
+static void determineGPRegsToClear(const MachineInstr &MI,
+                                   const std::initializer_list<unsigned> &Regs,
+                                   SmallVectorImpl<unsigned> &ClearRegs) {
   SmallVector<unsigned, 4> OpRegs;
   for (const MachineOperand &Op : MI.operands()) {
     if (!Op.isReg() || !Op.isUse())

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 655147076a40..08f455b8bf2c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3330,6 +3330,7 @@ static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
 }
 #endif
 
+namespace {
 /// This is a helper class for lowering variable arguments parameters.
 class VarArgsLoweringHelper {
 public:
@@ -3367,6 +3368,7 @@ class VarArgsLoweringHelper {
   CallingConv::ID CallConv;
   CCState &CCInfo;
 };
+} // namespace
 
 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
     SDValue &Chain, unsigned StackSize) {

diff  --git a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
index ee5e142ccf43..77d0edea7597 100644
--- a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
+++ b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
@@ -447,6 +447,7 @@ PreservedAnalyses AssumeSimplifyPass::run(Function &F,
   return PreservedAnalyses::all();
 }
 
+namespace {
 class AssumeSimplifyPassLegacyPass : public FunctionPass {
 public:
   static char ID;
@@ -469,6 +470,7 @@ class AssumeSimplifyPassLegacyPass : public FunctionPass {
     AU.setPreservesAll();
   }
 };
+} // namespace
 
 char AssumeSimplifyPassLegacyPass::ID = 0;
 

diff  --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index a8d615097d1d..bfe0c44a5d90 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -35,6 +35,7 @@ using namespace mlir::edsc::intrinsics;
 using vector::TransferReadOp;
 using vector::TransferWriteOp;
 
+namespace {
 /// Helper class captures the common information needed to lower N>1-D vector
 /// transfer operations (read and write).
 /// On construction, this class opens an edsc::ScopedContext for simpler IR
@@ -132,6 +133,7 @@ class NDTransferOpHelper {
   VectorType minorVectorType; // vector<(minor_dims) x type>
   MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>>
 };
+} // namespace
 
 template <typename ConcreteOp>
 template <typename Lambda>

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 27f4450924b6..93378dddca8b 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -181,8 +181,8 @@ bool mlir::isValidDim(Value value, Region *region) {
 /// `memrefDefOp` is a statically  shaped one or defined using a valid symbol
 /// for `region`.
 template <typename AnyMemRefDefOp>
-bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
-                             Region *region) {
+static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
+                                    Region *region) {
   auto memRefType = memrefDefOp.getType();
   // Statically shaped.
   if (!memRefType.isDynamicDim(index))
@@ -1882,7 +1882,8 @@ void AffineLoadOp::build(OpBuilder &builder, OperationState &result,
   build(builder, result, memref, map, indices);
 }
 
-ParseResult parseAffineLoadOp(OpAsmParser &parser, OperationState &result) {
+static ParseResult parseAffineLoadOp(OpAsmParser &parser,
+                                     OperationState &result) {
   auto &builder = parser.getBuilder();
   auto indexTy = builder.getIndexType();
 
@@ -1902,7 +1903,7 @@ ParseResult parseAffineLoadOp(OpAsmParser &parser, OperationState &result) {
       parser.addTypeToList(type.getElementType(), result.types));
 }
 
-void print(OpAsmPrinter &p, AffineLoadOp op) {
+static void print(OpAsmPrinter &p, AffineLoadOp op) {
   p << "affine.load " << op.getMemRef() << '[';
   if (AffineMapAttr mapAttr =
           op.getAttrOfType<AffineMapAttr>(op.getMapAttrName()))
@@ -1995,7 +1996,8 @@ void AffineStoreOp::build(OpBuilder &builder, OperationState &result,
   build(builder, result, valueToStore, memref, map, indices);
 }
 
-ParseResult parseAffineStoreOp(OpAsmParser &parser, OperationState &result) {
+static ParseResult parseAffineStoreOp(OpAsmParser &parser,
+                                      OperationState &result) {
   auto indexTy = parser.getBuilder().getIndexType();
 
   MemRefType type;
@@ -2016,7 +2018,7 @@ ParseResult parseAffineStoreOp(OpAsmParser &parser, OperationState &result) {
                  parser.resolveOperands(mapOperands, indexTy, result.operands));
 }
 
-void print(OpAsmPrinter &p, AffineStoreOp op) {
+static void print(OpAsmPrinter &p, AffineStoreOp op) {
   p << "affine.store " << op.getValueToStore();
   p << ", " << op.getMemRef() << '[';
   if (AffineMapAttr mapAttr =
@@ -2104,7 +2106,7 @@ static ParseResult parseAffineMinMaxOp(OpAsmParser &parser,
 /// list may contain nulls, which are interpreted as the operand not being a
 /// constant.
 template <typename T>
-OpFoldResult foldMinMaxOp(T op, ArrayRef<Attribute> operands) {
+static OpFoldResult foldMinMaxOp(T op, ArrayRef<Attribute> operands) {
   static_assert(llvm::is_one_of<T, AffineMinOp, AffineMaxOp>::value,
                 "expected affine min or max op");
 
@@ -2499,8 +2501,8 @@ static ParseResult parseAffineParallelOp(OpAsmParser &parser,
 // AffineVectorLoadOp
 //===----------------------------------------------------------------------===//
 
-ParseResult parseAffineVectorLoadOp(OpAsmParser &parser,
-                                    OperationState &result) {
+static ParseResult parseAffineVectorLoadOp(OpAsmParser &parser,
+                                           OperationState &result) {
   auto &builder = parser.getBuilder();
   auto indexTy = builder.getIndexType();
 
@@ -2522,7 +2524,7 @@ ParseResult parseAffineVectorLoadOp(OpAsmParser &parser,
       parser.addTypeToList(resultType, result.types));
 }
 
-void print(OpAsmPrinter &p, AffineVectorLoadOp op) {
+static void print(OpAsmPrinter &p, AffineVectorLoadOp op) {
   p << "affine.vector_load " << op.getMemRef() << '[';
   if (AffineMapAttr mapAttr =
           op.getAttrOfType<AffineMapAttr>(op.getMapAttrName()))
@@ -2563,8 +2565,8 @@ static LogicalResult verify(AffineVectorLoadOp op) {
 // AffineVectorStoreOp
 //===----------------------------------------------------------------------===//
 
-ParseResult parseAffineVectorStoreOp(OpAsmParser &parser,
-                                     OperationState &result) {
+static ParseResult parseAffineVectorStoreOp(OpAsmParser &parser,
+                                            OperationState &result) {
   auto indexTy = parser.getBuilder().getIndexType();
 
   MemRefType memrefType;
@@ -2587,7 +2589,7 @@ ParseResult parseAffineVectorStoreOp(OpAsmParser &parser,
       parser.resolveOperands(mapOperands, indexTy, result.operands));
 }
 
-void print(OpAsmPrinter &p, AffineVectorStoreOp op) {
+static void print(OpAsmPrinter &p, AffineVectorStoreOp op) {
   p << "affine.vector_store " << op.getValueToStore();
   p << ", " << op.getMemRef() << '[';
   if (AffineMapAttr mapAttr =

diff  --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 7fc598a36421..90a1945a825a 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -2727,6 +2727,7 @@ bool mlir::canFoldIntoConsumerOp(MemRefCastOp castOp) {
   return true;
 }
 
+namespace {
 /// Pattern to rewrite a subview op with MemRefCast arguments.
 /// This essentially pushes memref_cast past its consuming subview when
 /// `canFoldIntoConsumerOp` is true.
@@ -2779,6 +2780,7 @@ class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
     return success();
   }
 };
+} // namespace
 
 void SubViewOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
                                             MLIRContext *context) {

diff  --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp
index f347a564f446..01894d1ad7d1 100644
--- a/mlir/lib/Dialect/Vector/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/VectorOps.cpp
@@ -1338,7 +1338,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
 }
 
 template <typename TransferOp>
-void printTransferAttrs(OpAsmPrinter &p, TransferOp op) {
+static void printTransferAttrs(OpAsmPrinter &p, TransferOp op) {
   SmallVector<StringRef, 2> elidedAttrs;
   if (op.permutation_map() == TransferOp::getTransferMinorIdentityMap(
                                   op.getMemRefType(), op.getVectorType()))

diff  --git a/mlir/test/lib/IR/TestMatchers.cpp b/mlir/test/lib/IR/TestMatchers.cpp
index 8af91506f639..a065325c4196 100644
--- a/mlir/test/lib/IR/TestMatchers.cpp
+++ b/mlir/test/lib/IR/TestMatchers.cpp
@@ -21,7 +21,8 @@ struct TestMatchers : public PassWrapper<TestMatchers, FunctionPass> {
 } // end anonymous namespace
 
 // This could be done better but is not worth the variadic template trouble.
-template <typename Matcher> unsigned countMatches(FuncOp f, Matcher &matcher) {
+template <typename Matcher>
+static unsigned countMatches(FuncOp f, Matcher &matcher) {
   unsigned count = 0;
   f.walk([&count, &matcher](Operation *op) {
     if (matcher.match(op))

diff  --git a/mlir/test/lib/Transforms/TestLinalgTransforms.cpp b/mlir/test/lib/Transforms/TestLinalgTransforms.cpp
index 87191d3e87d2..70ba828e4953 100644
--- a/mlir/test/lib/Transforms/TestLinalgTransforms.cpp
+++ b/mlir/test/lib/Transforms/TestLinalgTransforms.cpp
@@ -156,7 +156,7 @@ static void applyPatterns(FuncOp funcOp) {
   });
 }
 
-OwningRewritePatternList
+static OwningRewritePatternList
 getMatmulToVectorCanonicalizationPatterns(MLIRContext *context) {
   OwningRewritePatternList patterns;
   AffineApplyOp::getCanonicalizationPatterns(patterns, context);
@@ -169,7 +169,7 @@ getMatmulToVectorCanonicalizationPatterns(MLIRContext *context) {
   return patterns;
 }
 
-void fillL1TilingAndMatmulToVectorPatterns(
+static void fillL1TilingAndMatmulToVectorPatterns(
     MLIRContext *context, StringRef startMarker,
     SmallVectorImpl<OwningRewritePatternList> &patternsVector) {
   patternsVector.emplace_back(LinalgTilingPattern<MatmulOp>(


        


More information about the llvm-commits mailing list