[Mlir-commits] [mlir] 02b6fb2 - Fix clang-tidy issues in mlir/ (NFC)

Mehdi Amini llvmlistbot at llvm.org
Mon Dec 20 12:38:17 PST 2021


Author: Mehdi Amini
Date: 2021-12-20T20:25:01Z
New Revision: 02b6fb218e44490f3ea1597e35df1b1b66c6b869

URL: https://github.com/llvm/llvm-project/commit/02b6fb218e44490f3ea1597e35df1b1b66c6b869
DIFF: https://github.com/llvm/llvm-project/commit/02b6fb218e44490f3ea1597e35df1b1b66c6b869.diff

LOG: Fix clang-tidy issues in mlir/ (NFC)

Reviewed By: ftynse

Differential Revision: https://reviews.llvm.org/D115956

Added: 
    

Modified: 
    mlir/examples/toy/Ch1/parser/AST.cpp
    mlir/examples/toy/Ch1/toyc.cpp
    mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
    mlir/examples/toy/Ch2/parser/AST.cpp
    mlir/examples/toy/Ch2/toyc.cpp
    mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
    mlir/examples/toy/Ch3/parser/AST.cpp
    mlir/examples/toy/Ch3/toyc.cpp
    mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
    mlir/examples/toy/Ch4/parser/AST.cpp
    mlir/examples/toy/Ch4/toyc.cpp
    mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
    mlir/examples/toy/Ch5/parser/AST.cpp
    mlir/examples/toy/Ch5/toyc.cpp
    mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
    mlir/examples/toy/Ch6/parser/AST.cpp
    mlir/examples/toy/Ch6/toyc.cpp
    mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
    mlir/examples/toy/Ch7/parser/AST.cpp
    mlir/examples/toy/Ch7/toyc.cpp
    mlir/lib/Analysis/SliceAnalysis.cpp
    mlir/lib/Analysis/Utils.cpp
    mlir/lib/Bindings/Python/IRAttributes.cpp
    mlir/lib/Bindings/Python/IRCore.cpp
    mlir/lib/Bindings/Python/IRModule.cpp
    mlir/lib/Bindings/Python/PybindUtils.cpp
    mlir/lib/Bindings/Python/Transforms/Transforms.cpp
    mlir/lib/CAPI/IR/IR.cpp
    mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
    mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp
    mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
    mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp
    mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
    mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
    mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
    mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
    mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
    mlir/lib/Dialect/PDL/IR/PDL.cpp
    mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp
    mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
    mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
    mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp
    mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
    mlir/lib/Dialect/Shape/IR/Shape.cpp
    mlir/lib/Dialect/StandardOps/IR/Ops.cpp
    mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
    mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
    mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
    mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
    mlir/lib/Dialect/Vector/VectorTransforms.cpp
    mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
    mlir/lib/ExecutionEngine/AsyncRuntime.cpp
    mlir/lib/ExecutionEngine/ExecutionEngine.cpp
    mlir/lib/ExecutionEngine/JitRunner.cpp
    mlir/lib/ExecutionEngine/RunnerUtils.cpp
    mlir/lib/IR/AffineMap.cpp
    mlir/lib/IR/AsmPrinter.cpp
    mlir/lib/IR/Block.cpp
    mlir/lib/IR/BuiltinAttributes.cpp
    mlir/lib/IR/MLIRContext.cpp
    mlir/lib/IR/Operation.cpp
    mlir/lib/IR/OperationSupport.cpp
    mlir/lib/IR/Region.cpp
    mlir/lib/Interfaces/SideEffectInterfaces.cpp
    mlir/lib/Parser/AffineParser.cpp
    mlir/lib/Pass/Pass.cpp
    mlir/lib/TableGen/Attribute.cpp
    mlir/lib/TableGen/Dialect.cpp
    mlir/lib/TableGen/Operator.cpp
    mlir/lib/TableGen/Pattern.cpp
    mlir/lib/TableGen/Predicate.cpp
    mlir/lib/TableGen/Trait.cpp
    mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
    mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
    mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
    mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp
    mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
    mlir/lib/Tools/PDLL/Parser/Parser.cpp
    mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
    mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp
    mlir/lib/Transforms/LoopFusion.cpp
    mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
    mlir/lib/Transforms/NormalizeMemRefs.cpp
    mlir/lib/Transforms/PipelineDataTransfer.cpp
    mlir/lib/Transforms/Utils/FoldUtils.cpp
    mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
    mlir/lib/Transforms/Utils/LoopUtils.cpp
    mlir/test/lib/Analysis/TestAliasAnalysis.cpp
    mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp
    mlir/test/lib/Dialect/Test/TestDialect.cpp
    mlir/test/lib/Dialect/Test/TestOps.td
    mlir/test/lib/Dialect/Test/TestPatterns.cpp
    mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
    mlir/test/lib/IR/TestMatchers.cpp
    mlir/test/lib/IR/TestOpaqueLoc.cpp
    mlir/test/lib/Transforms/TestLoopFusion.cpp
    mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp
    mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
    mlir/tools/mlir-tblgen/DialectGen.cpp
    mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp
    mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
    mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
    mlir/tools/mlir-tblgen/mlir-tblgen.cpp
    mlir/unittests/ExecutionEngine/Invoke.cpp
    mlir/unittests/IR/OperationSupportTest.cpp
    mlir/unittests/TableGen/StructsGenTest.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/examples/toy/Ch1/parser/AST.cpp b/mlir/examples/toy/Ch1/parser/AST.cpp
index 9315bb1d2ada6..7b98b017d82dd 100644
--- a/mlir/examples/toy/Ch1/parser/AST.cpp
+++ b/mlir/examples/toy/Ch1/parser/AST.cpp
@@ -118,7 +118,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch1/toyc.cpp b/mlir/examples/toy/Ch1/toyc.cpp
index b89fe0ecacfdf..ca2a1a1fa8adf 100644
--- a/mlir/examples/toy/Ch1/toyc.cpp
+++ b/mlir/examples/toy/Ch1/toyc.cpp
@@ -27,7 +27,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
                                           cl::value_desc("filename"));
 namespace {
 enum Action { None, DumpAST };
-}
+} // namespace
 
 static cl::opt<enum Action>
     emitAction("emit", cl::desc("Select the kind of output desired"),

diff  --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
index e4df32932003e..591ae48db4f25 100644
--- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp
@@ -58,8 +58,8 @@ class MLIRGenImpl {
     // add them to the module.
     theModule = mlir::ModuleOp::create(builder.getUnknownLoc());
 
-    for (FunctionAST &F : moduleAST) {
-      auto func = mlirGen(F);
+    for (FunctionAST &f : moduleAST) {
+      auto func = mlirGen(f);
       if (!func)
         return nullptr;
       theModule.push_back(func);
@@ -113,16 +113,16 @@ class MLIRGenImpl {
 
     // This is a generic function, the return type will be inferred later.
     // Arguments type are uniformly unranked tensors.
-    llvm::SmallVector<mlir::Type, 4> arg_types(proto.getArgs().size(),
-                                               getType(VarType{}));
-    auto func_type = builder.getFunctionType(arg_types, llvm::None);
-    return mlir::FuncOp::create(location, proto.getName(), func_type);
+    llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
+                                              getType(VarType{}));
+    auto funcType = builder.getFunctionType(argTypes, llvm::None);
+    return mlir::FuncOp::create(location, proto.getName(), funcType);
   }
 
   /// Emit a new function and add it to the MLIR module.
   mlir::FuncOp mlirGen(FunctionAST &funcAST) {
     // Create a scope in the symbol table to hold variable declarations.
-    ScopedHashTableScope<llvm::StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<llvm::StringRef, mlir::Value> varScope(symbolTable);
 
     // Create an MLIR function for the given prototype.
     mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -371,7 +371,7 @@ class MLIRGenImpl {
   /// Future expressions will be able to reference this variable through symbol
   /// table lookup.
   mlir::Value mlirGen(VarDeclExprAST &vardecl) {
-    auto init = vardecl.getInitVal();
+    auto *init = vardecl.getInitVal();
     if (!init) {
       emitError(loc(vardecl.loc()),
                 "missing initializer in variable declaration");
@@ -398,7 +398,7 @@ class MLIRGenImpl {
 
   /// Codegen a list of expression, return failure if one of them hit an error.
   mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
-    ScopedHashTableScope<StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<StringRef, mlir::Value> varScope(symbolTable);
     for (auto &expr : blockAST) {
       // Specific handling for variable declarations, return statement, and
       // print. These can only appear in block list and not in nested

diff  --git a/mlir/examples/toy/Ch2/parser/AST.cpp b/mlir/examples/toy/Ch2/parser/AST.cpp
index 9315bb1d2ada6..7b98b017d82dd 100644
--- a/mlir/examples/toy/Ch2/parser/AST.cpp
+++ b/mlir/examples/toy/Ch2/parser/AST.cpp
@@ -118,7 +118,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch2/toyc.cpp b/mlir/examples/toy/Ch2/toyc.cpp
index 82667eca21342..9936851ba2ced 100644
--- a/mlir/examples/toy/Ch2/toyc.cpp
+++ b/mlir/examples/toy/Ch2/toyc.cpp
@@ -38,7 +38,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
 
 namespace {
 enum InputType { Toy, MLIR };
-}
+} // namespace
 static cl::opt<enum InputType> inputType(
     "x", cl::init(Toy), cl::desc("Decided the kind of output desired"),
     cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")),
@@ -47,7 +47,7 @@ static cl::opt<enum InputType> inputType(
 
 namespace {
 enum Action { None, DumpAST, DumpMLIR };
-}
+} // namespace
 static cl::opt<enum Action> emitAction(
     "emit", cl::desc("Select the kind of output desired"),
     cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")),
@@ -89,8 +89,8 @@ int dumpMLIR() {
   // Otherwise, the input is '.mlir'.
   llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
       llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
-  if (std::error_code EC = fileOrErr.getError()) {
-    llvm::errs() << "Could not open input file: " << EC.message() << "\n";
+  if (std::error_code ec = fileOrErr.getError()) {
+    llvm::errs() << "Could not open input file: " << ec.message() << "\n";
     return -1;
   }
 

diff  --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
index e4df32932003e..591ae48db4f25 100644
--- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp
@@ -58,8 +58,8 @@ class MLIRGenImpl {
     // add them to the module.
     theModule = mlir::ModuleOp::create(builder.getUnknownLoc());
 
-    for (FunctionAST &F : moduleAST) {
-      auto func = mlirGen(F);
+    for (FunctionAST &f : moduleAST) {
+      auto func = mlirGen(f);
       if (!func)
         return nullptr;
       theModule.push_back(func);
@@ -113,16 +113,16 @@ class MLIRGenImpl {
 
     // This is a generic function, the return type will be inferred later.
     // Arguments type are uniformly unranked tensors.
-    llvm::SmallVector<mlir::Type, 4> arg_types(proto.getArgs().size(),
-                                               getType(VarType{}));
-    auto func_type = builder.getFunctionType(arg_types, llvm::None);
-    return mlir::FuncOp::create(location, proto.getName(), func_type);
+    llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
+                                              getType(VarType{}));
+    auto funcType = builder.getFunctionType(argTypes, llvm::None);
+    return mlir::FuncOp::create(location, proto.getName(), funcType);
   }
 
   /// Emit a new function and add it to the MLIR module.
   mlir::FuncOp mlirGen(FunctionAST &funcAST) {
     // Create a scope in the symbol table to hold variable declarations.
-    ScopedHashTableScope<llvm::StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<llvm::StringRef, mlir::Value> varScope(symbolTable);
 
     // Create an MLIR function for the given prototype.
     mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -371,7 +371,7 @@ class MLIRGenImpl {
   /// Future expressions will be able to reference this variable through symbol
   /// table lookup.
   mlir::Value mlirGen(VarDeclExprAST &vardecl) {
-    auto init = vardecl.getInitVal();
+    auto *init = vardecl.getInitVal();
     if (!init) {
       emitError(loc(vardecl.loc()),
                 "missing initializer in variable declaration");
@@ -398,7 +398,7 @@ class MLIRGenImpl {
 
   /// Codegen a list of expression, return failure if one of them hit an error.
   mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
-    ScopedHashTableScope<StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<StringRef, mlir::Value> varScope(symbolTable);
     for (auto &expr : blockAST) {
       // Specific handling for variable declarations, return statement, and
       // print. These can only appear in block list and not in nested

diff  --git a/mlir/examples/toy/Ch3/parser/AST.cpp b/mlir/examples/toy/Ch3/parser/AST.cpp
index 9315bb1d2ada6..7b98b017d82dd 100644
--- a/mlir/examples/toy/Ch3/parser/AST.cpp
+++ b/mlir/examples/toy/Ch3/parser/AST.cpp
@@ -118,7 +118,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch3/toyc.cpp b/mlir/examples/toy/Ch3/toyc.cpp
index 9327aed8804c2..daa59908df71a 100644
--- a/mlir/examples/toy/Ch3/toyc.cpp
+++ b/mlir/examples/toy/Ch3/toyc.cpp
@@ -40,7 +40,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
 
 namespace {
 enum InputType { Toy, MLIR };
-}
+} // namespace
 static cl::opt<enum InputType> inputType(
     "x", cl::init(Toy), cl::desc("Decided the kind of output desired"),
     cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")),
@@ -49,7 +49,7 @@ static cl::opt<enum InputType> inputType(
 
 namespace {
 enum Action { None, DumpAST, DumpMLIR };
-}
+} // namespace
 static cl::opt<enum Action> emitAction(
     "emit", cl::desc("Select the kind of output desired"),
     cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")),
@@ -86,8 +86,8 @@ int loadMLIR(llvm::SourceMgr &sourceMgr, mlir::MLIRContext &context,
   // Otherwise, the input is '.mlir'.
   llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
       llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
-  if (std::error_code EC = fileOrErr.getError()) {
-    llvm::errs() << "Could not open input file: " << EC.message() << "\n";
+  if (std::error_code ec = fileOrErr.getError()) {
+    llvm::errs() << "Could not open input file: " << ec.message() << "\n";
     return -1;
   }
 

diff  --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
index add53ce9816f8..35cd0bc106c2d 100644
--- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp
@@ -58,8 +58,8 @@ class MLIRGenImpl {
     // add them to the module.
     theModule = mlir::ModuleOp::create(builder.getUnknownLoc());
 
-    for (FunctionAST &F : moduleAST) {
-      auto func = mlirGen(F);
+    for (FunctionAST &f : moduleAST) {
+      auto func = mlirGen(f);
       if (!func)
         return nullptr;
       theModule.push_back(func);
@@ -113,16 +113,16 @@ class MLIRGenImpl {
 
     // This is a generic function, the return type will be inferred later.
     // Arguments type are uniformly unranked tensors.
-    llvm::SmallVector<mlir::Type, 4> arg_types(proto.getArgs().size(),
-                                               getType(VarType{}));
-    auto func_type = builder.getFunctionType(arg_types, llvm::None);
-    return mlir::FuncOp::create(location, proto.getName(), func_type);
+    llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
+                                              getType(VarType{}));
+    auto funcType = builder.getFunctionType(argTypes, llvm::None);
+    return mlir::FuncOp::create(location, proto.getName(), funcType);
   }
 
   /// Emit a new function and add it to the MLIR module.
   mlir::FuncOp mlirGen(FunctionAST &funcAST) {
     // Create a scope in the symbol table to hold variable declarations.
-    ScopedHashTableScope<llvm::StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<llvm::StringRef, mlir::Value> varScope(symbolTable);
 
     // Create an MLIR function for the given prototype.
     mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -375,7 +375,7 @@ class MLIRGenImpl {
   /// Future expressions will be able to reference this variable through symbol
   /// table lookup.
   mlir::Value mlirGen(VarDeclExprAST &vardecl) {
-    auto init = vardecl.getInitVal();
+    auto *init = vardecl.getInitVal();
     if (!init) {
       emitError(loc(vardecl.loc()),
                 "missing initializer in variable declaration");
@@ -402,7 +402,7 @@ class MLIRGenImpl {
 
   /// Codegen a list of expression, return failure if one of them hit an error.
   mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
-    ScopedHashTableScope<StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<StringRef, mlir::Value> varScope(symbolTable);
     for (auto &expr : blockAST) {
       // Specific handling for variable declarations, return statement, and
       // print. These can only appear in block list and not in nested

diff  --git a/mlir/examples/toy/Ch4/parser/AST.cpp b/mlir/examples/toy/Ch4/parser/AST.cpp
index 9315bb1d2ada6..7b98b017d82dd 100644
--- a/mlir/examples/toy/Ch4/parser/AST.cpp
+++ b/mlir/examples/toy/Ch4/parser/AST.cpp
@@ -118,7 +118,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp
index 0c2c9ebbb34d2..1a4923fb86539 100644
--- a/mlir/examples/toy/Ch4/toyc.cpp
+++ b/mlir/examples/toy/Ch4/toyc.cpp
@@ -41,7 +41,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
 
 namespace {
 enum InputType { Toy, MLIR };
-}
+} // namespace
 static cl::opt<enum InputType> inputType(
     "x", cl::init(Toy), cl::desc("Decided the kind of output desired"),
     cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")),
@@ -50,7 +50,7 @@ static cl::opt<enum InputType> inputType(
 
 namespace {
 enum Action { None, DumpAST, DumpMLIR };
-}
+} // namespace
 static cl::opt<enum Action> emitAction(
     "emit", cl::desc("Select the kind of output desired"),
     cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")),
@@ -87,8 +87,8 @@ int loadMLIR(llvm::SourceMgr &sourceMgr, mlir::MLIRContext &context,
   // Otherwise, the input is '.mlir'.
   llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
       llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
-  if (std::error_code EC = fileOrErr.getError()) {
-    llvm::errs() << "Could not open input file: " << EC.message() << "\n";
+  if (std::error_code ec = fileOrErr.getError()) {
+    llvm::errs() << "Could not open input file: " << ec.message() << "\n";
     return -1;
   }
 

diff  --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
index add53ce9816f8..35cd0bc106c2d 100644
--- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp
@@ -58,8 +58,8 @@ class MLIRGenImpl {
     // add them to the module.
     theModule = mlir::ModuleOp::create(builder.getUnknownLoc());
 
-    for (FunctionAST &F : moduleAST) {
-      auto func = mlirGen(F);
+    for (FunctionAST &f : moduleAST) {
+      auto func = mlirGen(f);
       if (!func)
         return nullptr;
       theModule.push_back(func);
@@ -113,16 +113,16 @@ class MLIRGenImpl {
 
     // This is a generic function, the return type will be inferred later.
     // Arguments type are uniformly unranked tensors.
-    llvm::SmallVector<mlir::Type, 4> arg_types(proto.getArgs().size(),
-                                               getType(VarType{}));
-    auto func_type = builder.getFunctionType(arg_types, llvm::None);
-    return mlir::FuncOp::create(location, proto.getName(), func_type);
+    llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
+                                              getType(VarType{}));
+    auto funcType = builder.getFunctionType(argTypes, llvm::None);
+    return mlir::FuncOp::create(location, proto.getName(), funcType);
   }
 
   /// Emit a new function and add it to the MLIR module.
   mlir::FuncOp mlirGen(FunctionAST &funcAST) {
     // Create a scope in the symbol table to hold variable declarations.
-    ScopedHashTableScope<llvm::StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<llvm::StringRef, mlir::Value> varScope(symbolTable);
 
     // Create an MLIR function for the given prototype.
     mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -375,7 +375,7 @@ class MLIRGenImpl {
   /// Future expressions will be able to reference this variable through symbol
   /// table lookup.
   mlir::Value mlirGen(VarDeclExprAST &vardecl) {
-    auto init = vardecl.getInitVal();
+    auto *init = vardecl.getInitVal();
     if (!init) {
       emitError(loc(vardecl.loc()),
                 "missing initializer in variable declaration");
@@ -402,7 +402,7 @@ class MLIRGenImpl {
 
   /// Codegen a list of expression, return failure if one of them hit an error.
   mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
-    ScopedHashTableScope<StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<StringRef, mlir::Value> varScope(symbolTable);
     for (auto &expr : blockAST) {
       // Specific handling for variable declarations, return statement, and
       // print. These can only appear in block list and not in nested

diff  --git a/mlir/examples/toy/Ch5/parser/AST.cpp b/mlir/examples/toy/Ch5/parser/AST.cpp
index 9315bb1d2ada6..7b98b017d82dd 100644
--- a/mlir/examples/toy/Ch5/parser/AST.cpp
+++ b/mlir/examples/toy/Ch5/parser/AST.cpp
@@ -118,7 +118,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp
index 776dec4d4f8c4..c0431cc52fc7f 100644
--- a/mlir/examples/toy/Ch5/toyc.cpp
+++ b/mlir/examples/toy/Ch5/toyc.cpp
@@ -43,7 +43,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
 
 namespace {
 enum InputType { Toy, MLIR };
-}
+} // namespace
 static cl::opt<enum InputType> inputType(
     "x", cl::init(Toy), cl::desc("Decided the kind of output desired"),
     cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")),
@@ -52,7 +52,7 @@ static cl::opt<enum InputType> inputType(
 
 namespace {
 enum Action { None, DumpAST, DumpMLIR, DumpMLIRAffine };
-}
+} // namespace
 static cl::opt<enum Action> emitAction(
     "emit", cl::desc("Select the kind of output desired"),
     cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")),
@@ -91,8 +91,8 @@ int loadMLIR(llvm::SourceMgr &sourceMgr, mlir::MLIRContext &context,
   // Otherwise, the input is '.mlir'.
   llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
       llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
-  if (std::error_code EC = fileOrErr.getError()) {
-    llvm::errs() << "Could not open input file: " << EC.message() << "\n";
+  if (std::error_code ec = fileOrErr.getError()) {
+    llvm::errs() << "Could not open input file: " << ec.message() << "\n";
     return -1;
   }
 

diff  --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
index add53ce9816f8..35cd0bc106c2d 100644
--- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp
@@ -58,8 +58,8 @@ class MLIRGenImpl {
     // add them to the module.
     theModule = mlir::ModuleOp::create(builder.getUnknownLoc());
 
-    for (FunctionAST &F : moduleAST) {
-      auto func = mlirGen(F);
+    for (FunctionAST &f : moduleAST) {
+      auto func = mlirGen(f);
       if (!func)
         return nullptr;
       theModule.push_back(func);
@@ -113,16 +113,16 @@ class MLIRGenImpl {
 
     // This is a generic function, the return type will be inferred later.
     // Arguments type are uniformly unranked tensors.
-    llvm::SmallVector<mlir::Type, 4> arg_types(proto.getArgs().size(),
-                                               getType(VarType{}));
-    auto func_type = builder.getFunctionType(arg_types, llvm::None);
-    return mlir::FuncOp::create(location, proto.getName(), func_type);
+    llvm::SmallVector<mlir::Type, 4> argTypes(proto.getArgs().size(),
+                                              getType(VarType{}));
+    auto funcType = builder.getFunctionType(argTypes, llvm::None);
+    return mlir::FuncOp::create(location, proto.getName(), funcType);
   }
 
   /// Emit a new function and add it to the MLIR module.
   mlir::FuncOp mlirGen(FunctionAST &funcAST) {
     // Create a scope in the symbol table to hold variable declarations.
-    ScopedHashTableScope<llvm::StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<llvm::StringRef, mlir::Value> varScope(symbolTable);
 
     // Create an MLIR function for the given prototype.
     mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -375,7 +375,7 @@ class MLIRGenImpl {
   /// Future expressions will be able to reference this variable through symbol
   /// table lookup.
   mlir::Value mlirGen(VarDeclExprAST &vardecl) {
-    auto init = vardecl.getInitVal();
+    auto *init = vardecl.getInitVal();
     if (!init) {
       emitError(loc(vardecl.loc()),
                 "missing initializer in variable declaration");
@@ -402,7 +402,7 @@ class MLIRGenImpl {
 
   /// Codegen a list of expression, return failure if one of them hit an error.
   mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
-    ScopedHashTableScope<StringRef, mlir::Value> var_scope(symbolTable);
+    ScopedHashTableScope<StringRef, mlir::Value> varScope(symbolTable);
     for (auto &expr : blockAST) {
       // Specific handling for variable declarations, return statement, and
       // print. These can only appear in block list and not in nested

diff  --git a/mlir/examples/toy/Ch6/parser/AST.cpp b/mlir/examples/toy/Ch6/parser/AST.cpp
index 9315bb1d2ada6..7b98b017d82dd 100644
--- a/mlir/examples/toy/Ch6/parser/AST.cpp
+++ b/mlir/examples/toy/Ch6/parser/AST.cpp
@@ -118,7 +118,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp
index 2cc8a33ddba8a..54c91d2d00b57 100644
--- a/mlir/examples/toy/Ch6/toyc.cpp
+++ b/mlir/examples/toy/Ch6/toyc.cpp
@@ -49,7 +49,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
 
 namespace {
 enum InputType { Toy, MLIR };
-}
+} // namespace
 static cl::opt<enum InputType> inputType(
     "x", cl::init(Toy), cl::desc("Decided the kind of output desired"),
     cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")),
@@ -66,7 +66,7 @@ enum Action {
   DumpLLVMIR,
   RunJIT
 };
-}
+} // namespace
 static cl::opt<enum Action> emitAction(
     "emit", cl::desc("Select the kind of output desired"),
     cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")),
@@ -110,8 +110,8 @@ int loadMLIR(mlir::MLIRContext &context, mlir::OwningModuleRef &module) {
   // Otherwise, the input is '.mlir'.
   llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
       llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
-  if (std::error_code EC = fileOrErr.getError()) {
-    llvm::errs() << "Could not open input file: " << EC.message() << "\n";
+  if (std::error_code ec = fileOrErr.getError()) {
+    llvm::errs() << "Could not open input file: " << ec.message() << "\n";
     return -1;
   }
 

diff  --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
index 23cacb6d7c1e5..4663c94ee85b5 100644
--- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
+++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp
@@ -169,14 +169,14 @@ class MLIRGenImpl {
         return nullptr;
       argTypes.push_back(type);
     }
-    auto func_type = builder.getFunctionType(argTypes, llvm::None);
-    return mlir::FuncOp::create(location, proto.getName(), func_type);
+    auto funcType = builder.getFunctionType(argTypes, llvm::None);
+    return mlir::FuncOp::create(location, proto.getName(), funcType);
   }
 
   /// Emit a new function and add it to the MLIR module.
   mlir::FuncOp mlirGen(FunctionAST &funcAST) {
     // Create a scope in the symbol table to hold variable declarations.
-    SymbolTableScopeT var_scope(symbolTable);
+    SymbolTableScopeT varScope(symbolTable);
 
     // Create an MLIR function for the given prototype.
     mlir::FuncOp function(mlirGen(*funcAST.getProto()));
@@ -286,7 +286,7 @@ class MLIRGenImpl {
       return llvm::None;
 
     auto structVars = structAST->getVariables();
-    auto it = llvm::find_if(structVars, [&](auto &var) {
+    const auto *it = llvm::find_if(structVars, [&](auto &var) {
       return var->getName() == name->getName();
     });
     if (it == structVars.end())
@@ -569,7 +569,7 @@ class MLIRGenImpl {
   /// Future expressions will be able to reference this variable through symbol
   /// table lookup.
   mlir::Value mlirGen(VarDeclExprAST &vardecl) {
-    auto init = vardecl.getInitVal();
+    auto *init = vardecl.getInitVal();
     if (!init) {
       emitError(loc(vardecl.loc()),
                 "missing initializer in variable declaration");
@@ -612,7 +612,7 @@ class MLIRGenImpl {
 
   /// Codegen a list of expression, return failure if one of them hit an error.
   mlir::LogicalResult mlirGen(ExprASTList &blockAST) {
-    SymbolTableScopeT var_scope(symbolTable);
+    SymbolTableScopeT varScope(symbolTable);
     for (auto &expr : blockAST) {
       // Specific handling for variable declarations, return statement, and
       // print. These can only appear in block list and not in nested

diff  --git a/mlir/examples/toy/Ch7/parser/AST.cpp b/mlir/examples/toy/Ch7/parser/AST.cpp
index 901d2f21edcdc..80c87dc21b48f 100644
--- a/mlir/examples/toy/Ch7/parser/AST.cpp
+++ b/mlir/examples/toy/Ch7/parser/AST.cpp
@@ -121,7 +121,7 @@ void ASTDumper::dump(NumberExprAST *num) {
 ///    <2,2>[<2>[ 1, 2 ], <2>[ 3, 4 ] ]
 void printLitHelper(ExprAST *litOrNum) {
   // Inside a literal expression we can have either a number or another literal
-  if (auto num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
+  if (auto *num = llvm::dyn_cast<NumberExprAST>(litOrNum)) {
     llvm::errs() << num->getValue();
     return;
   }

diff  --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp
index f10a95b724cac..1c3b756f02a9d 100644
--- a/mlir/examples/toy/Ch7/toyc.cpp
+++ b/mlir/examples/toy/Ch7/toyc.cpp
@@ -49,7 +49,7 @@ static cl::opt<std::string> inputFilename(cl::Positional,
 
 namespace {
 enum InputType { Toy, MLIR };
-}
+} // namespace
 static cl::opt<enum InputType> inputType(
     "x", cl::init(Toy), cl::desc("Decided the kind of output desired"),
     cl::values(clEnumValN(Toy, "toy", "load the input file as a Toy source.")),
@@ -66,7 +66,7 @@ enum Action {
   DumpLLVMIR,
   RunJIT
 };
-}
+} // namespace
 static cl::opt<enum Action> emitAction(
     "emit", cl::desc("Select the kind of output desired"),
     cl::values(clEnumValN(DumpAST, "ast", "output the AST dump")),
@@ -110,8 +110,8 @@ int loadMLIR(mlir::MLIRContext &context, mlir::OwningModuleRef &module) {
   // Otherwise, the input is '.mlir'.
   llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
       llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
-  if (std::error_code EC = fileOrErr.getError()) {
-    llvm::errs() << "Could not open input file: " << EC.message() << "\n";
+  if (std::error_code ec = fileOrErr.getError()) {
+    llvm::errs() << "Could not open input file: " << ec.message() << "\n";
     return -1;
   }
 

diff  --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp
index a29315a3938ce..b45ee4c0faae4 100644
--- a/mlir/lib/Analysis/SliceAnalysis.cpp
+++ b/mlir/lib/Analysis/SliceAnalysis.cpp
@@ -168,7 +168,7 @@ struct DFSState {
 };
 } // namespace
 
-static void DFSPostorder(Operation *root, DFSState *state) {
+static void dfsPostorder(Operation *root, DFSState *state) {
   SmallVector<Operation *> queue(1, root);
   std::vector<Operation *> ops;
   while (!queue.empty()) {
@@ -200,7 +200,7 @@ mlir::topologicalSort(const SetVector<Operation *> &toSort) {
   DFSState state(toSort);
   for (auto *s : toSort) {
     assert(toSort.count(s) == 1 && "NYI: multi-sets not supported");
-    DFSPostorder(s, &state);
+    dfsPostorder(s, &state);
   }
 
   // Reorder and return.

diff  --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 17c499a0453bf..097828e077e75 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -1278,10 +1278,10 @@ bool MemRefAccess::operator==(const MemRefAccess &rhs) const {
 
 /// Returns the number of surrounding loops common to 'loopsA' and 'loopsB',
 /// where each lists loops from outer-most to inner-most in loop nest.
-unsigned mlir::getNumCommonSurroundingLoops(Operation &A, Operation &B) {
+unsigned mlir::getNumCommonSurroundingLoops(Operation &a, Operation &b) {
   SmallVector<AffineForOp, 4> loopsA, loopsB;
-  getLoopIVs(A, &loopsA);
-  getLoopIVs(B, &loopsB);
+  getLoopIVs(a, &loopsA);
+  getLoopIVs(b, &loopsB);
 
   unsigned minNumLoops = std::min(loopsA.size(), loopsB.size());
   unsigned numCommonLoops = 0;

diff  --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp
index 17b3b34a2ea30..eed6369acb2fb 100644
--- a/mlir/lib/Bindings/Python/IRAttributes.cpp
+++ b/mlir/lib/Bindings/Python/IRAttributes.cpp
@@ -17,7 +17,6 @@ namespace py = pybind11;
 using namespace mlir;
 using namespace mlir::python;
 
-using llvm::None;
 using llvm::Optional;
 using llvm::SmallVector;
 using llvm::Twine;
@@ -510,7 +509,8 @@ class PyDenseElementsAttribute
     if (mlirTypeIsAF32(elementType)) {
       // f32
       return bufferInfo<float>(shapedType);
-    } else if (mlirTypeIsAF64(elementType)) {
+    }
+    if (mlirTypeIsAF64(elementType)) {
       // f64
       return bufferInfo<double>(shapedType);
     } else if (mlirTypeIsAF16(elementType)) {
@@ -712,12 +712,12 @@ class PyDictAttribute : public PyConcreteAttribute<PyDictAttribute> {
           SmallVector<MlirNamedAttribute> mlirNamedAttributes;
           mlirNamedAttributes.reserve(attributes.size());
           for (auto &it : attributes) {
-            auto &mlir_attr = it.second.cast<PyAttribute &>();
+            auto &mlirAttr = it.second.cast<PyAttribute &>();
             auto name = it.first.cast<std::string>();
             mlirNamedAttributes.push_back(mlirNamedAttributeGet(
-                mlirIdentifierGet(mlirAttributeGetContext(mlir_attr),
+                mlirIdentifierGet(mlirAttributeGetContext(mlirAttr),
                                   toMlirStringRef(name)),
-                mlir_attr));
+                mlirAttr));
           }
           MlirAttribute attr =
               mlirDictionaryAttrGet(context->get(), mlirNamedAttributes.size(),

diff  --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp
index 3640a15e3407b..864144226d457 100644
--- a/mlir/lib/Bindings/Python/IRCore.cpp
+++ b/mlir/lib/Bindings/Python/IRCore.cpp
@@ -1267,7 +1267,7 @@ PyOpView::buildGeneric(py::object cls, py::list resultTypeList,
       if (segmentSpec == 1 || segmentSpec == 0) {
         // Unpack unary element.
         try {
-          auto operandValue = py::cast<PyValue *>(std::get<0>(it.value()));
+          auto *operandValue = py::cast<PyValue *>(std::get<0>(it.value()));
           if (operandValue) {
             operands.push_back(operandValue);
             operandSegmentLengths.push_back(1);
@@ -2286,10 +2286,10 @@ void mlir::python::populateIRCore(py::module &m) {
       .def_property_readonly(
           "body",
           [](PyModule &self) {
-            PyOperationRef module_op = PyOperation::forOperation(
+            PyOperationRef moduleOp = PyOperation::forOperation(
                 self.getContext(), mlirModuleGetOperation(self.get()),
                 self.getRef().releaseObject());
-            PyBlock returnBlock(module_op, mlirModuleGetBody(self.get()));
+            PyBlock returnBlock(moduleOp, mlirModuleGetBody(self.get()));
             return returnBlock;
           },
           "Return the block for this module")

diff  --git a/mlir/lib/Bindings/Python/IRModule.cpp b/mlir/lib/Bindings/Python/IRModule.cpp
index 9f853eb92df18..7008e54bd0460 100644
--- a/mlir/lib/Bindings/Python/IRModule.cpp
+++ b/mlir/lib/Bindings/Python/IRModule.cpp
@@ -51,9 +51,8 @@ void PyGlobals::loadDialectModule(llvm::StringRef dialectNamespace) {
     } catch (py::error_already_set &e) {
       if (e.matches(PyExc_ModuleNotFoundError)) {
         continue;
-      } else {
-        throw;
       }
+      throw;
     }
     break;
   }
@@ -136,11 +135,10 @@ PyGlobals::lookupRawOpViewClass(llvm::StringRef operationName) {
       // Positive cache.
       rawOpViewClassMapCache[operationName] = foundIt->second;
       return foundIt->second;
-    } else {
-      // Negative cache.
-      rawOpViewClassMap[operationName] = py::none();
-      return llvm::None;
     }
+    // Negative cache.
+    rawOpViewClassMap[operationName] = py::none();
+    return llvm::None;
   }
 }
 

diff  --git a/mlir/lib/Bindings/Python/PybindUtils.cpp b/mlir/lib/Bindings/Python/PybindUtils.cpp
index bd80b8c147025..d243307f12c1e 100644
--- a/mlir/lib/Bindings/Python/PybindUtils.cpp
+++ b/mlir/lib/Bindings/Python/PybindUtils.cpp
@@ -8,8 +8,6 @@
 
 #include "PybindUtils.h"
 
-namespace py = pybind11;
-
 pybind11::error_already_set
 mlir::python::SetPyError(PyObject *excClass, const llvm::Twine &message) {
   auto messageStr = message.str();

diff  --git a/mlir/lib/Bindings/Python/Transforms/Transforms.cpp b/mlir/lib/Bindings/Python/Transforms/Transforms.cpp
index 46c4691923c72..944b191bc12cb 100644
--- a/mlir/lib/Bindings/Python/Transforms/Transforms.cpp
+++ b/mlir/lib/Bindings/Python/Transforms/Transforms.cpp
@@ -10,8 +10,6 @@
 
 #include <pybind11/pybind11.h>
 
-namespace py = pybind11;
-
 // -----------------------------------------------------------------------------
 // Module initialization.
 // -----------------------------------------------------------------------------

diff  --git a/mlir/lib/CAPI/IR/IR.cpp b/mlir/lib/CAPI/IR/IR.cpp
index 424bbae179c33..955f5e0c1eb6c 100644
--- a/mlir/lib/CAPI/IR/IR.cpp
+++ b/mlir/lib/CAPI/IR/IR.cpp
@@ -818,7 +818,7 @@ void mlirSymbolTableErase(MlirSymbolTable symbolTable,
 MlirLogicalResult mlirSymbolTableReplaceAllSymbolUses(MlirStringRef oldSymbol,
                                                       MlirStringRef newSymbol,
                                                       MlirOperation from) {
-  auto cppFrom = unwrap(from);
+  auto *cppFrom = unwrap(from);
   auto *context = cppFrom->getContext();
   auto oldSymbolAttr = StringAttr::get(unwrap(oldSymbol), context);
   auto newSymbolAttr = StringAttr::get(unwrap(newSymbol), context);

diff  --git a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
index ac346dc5794df..10ce877e24fa4 100644
--- a/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp
@@ -468,10 +468,10 @@ Value UnrankedMemRefDescriptor::sizeBasePtr(
   Value structPtr =
       builder.create<LLVM::BitcastOp>(loc, structPtrTy, memRefDescPtr);
 
-  Type int32_type = typeConverter.convertType(builder.getI32Type());
+  Type int32Type = typeConverter.convertType(builder.getI32Type());
   Value zero =
       createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 0);
-  Value three = builder.create<LLVM::ConstantOp>(loc, int32_type,
+  Value three = builder.create<LLVM::ConstantOp>(loc, int32Type,
                                                  builder.getI32IntegerAttr(3));
   return builder.create<LLVM::GEPOp>(loc, LLVM::LLVMPointerType::get(indexTy),
                                      structPtr, ValueRange({zero, three}));

diff  --git a/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp b/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp
index 4382753458644..a4d68b1343f72 100644
--- a/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp
+++ b/mlir/lib/Conversion/PDLToPDLInterp/RootOrdering.cpp
@@ -90,8 +90,8 @@ static void contract(RootOrderingGraph &graph, ArrayRef<Value> cycle,
       DenseMap<Value, RootOrderingCost> &costs = outer->second;
       Value bestSource;
       std::pair<unsigned, unsigned> bestCost;
-      auto inner = costs.begin(), inner_e = costs.end();
-      while (inner != inner_e) {
+      auto inner = costs.begin(), innerE = costs.end();
+      while (inner != innerE) {
         Value source = inner->first;
         if (cycleSet.contains(source)) {
           // Going-away edge => get its cost and erase it.

diff  --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index 7243c2516a3de..f3547e580501e 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -259,8 +259,8 @@ void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
   // from 0 to N with step 1.  Therefore, loop induction variables are replaced
   // with (gpu-thread/block-id * S) + LB.
   builder.setInsertionPointToStart(&launchOp.body().front());
-  auto lbArgumentIt = lbs.begin();
-  auto stepArgumentIt = steps.begin();
+  auto *lbArgumentIt = lbs.begin();
+  auto *stepArgumentIt = steps.begin();
   for (auto en : llvm::enumerate(ivs)) {
     Value id =
         en.index() < numBlockDims
@@ -640,7 +640,7 @@ ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
     } else if (op == launchOp.getOperation()) {
       // Found our sentinel value. We have finished the operations from one
       // nesting level, pop one level back up.
-      auto parent = rewriter.getInsertionPoint()->getParentOp();
+      auto *parent = rewriter.getInsertionPoint()->getParentOp();
       rewriter.setInsertionPointAfter(parent);
       leftNestingScope = true;
       seenSideeffects = false;

diff  --git a/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp b/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp
index 1994006e88cf1..8ceaa864721c0 100644
--- a/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp
+++ b/mlir/lib/Conversion/SCFToStandard/SCFToStandard.cpp
@@ -455,11 +455,11 @@ ParallelLowering::matchAndRewrite(ParallelOp parallelOp,
   ivs.reserve(parallelOp.getNumLoops());
   bool first = true;
   SmallVector<Value, 4> loopResults(iterArgs);
-  for (auto loop_operands :
+  for (auto loopOperands :
        llvm::zip(parallelOp.getInductionVars(), parallelOp.getLowerBound(),
                  parallelOp.getUpperBound(), parallelOp.getStep())) {
     Value iv, lower, upper, step;
-    std::tie(iv, lower, upper, step) = loop_operands;
+    std::tie(iv, lower, upper, step) = loopOperands;
     ForOp forOp = rewriter.create<ForOp>(loc, lower, upper, step, iterArgs);
     ivs.push_back(forOp.getInductionVar());
     auto iterRange = forOp.getRegionIterArgs();

diff  --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index b416c303ad51a..14ae384aa2dc9 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -1390,7 +1390,7 @@ class VectorShufflePattern
     auto dstType = typeConverter.convertType(op.getType());
     auto scalarType = dstType.cast<VectorType>().getElementType();
     auto componentsArray = components.getValue();
-    auto context = rewriter.getContext();
+    auto *context = rewriter.getContext();
     auto llvmI32Type = IntegerType::get(context, 32);
     Value targetOp = rewriter.create<LLVM::UndefOp>(loc, dstType);
     for (unsigned i = 0; i < componentsArray.size(); i++) {

diff  --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index f05226d6a4645..abff8b57ccdc2 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -2173,16 +2173,16 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
 
           rewriter.create<linalg::YieldOp>(loc, result);
           return success();
-        } else {
-          y0x0 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y0x0);
-          y0x1 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y0x1);
-          y1x0 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y1x0);
-          y1x1 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y1x1);
-
-          if (resultElementTy.getIntOrFloatBitWidth() > 32) {
-            dx = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, dx);
-            dy = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, dy);
-          }
+        }
+        y0x0 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y0x0);
+        y0x1 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y0x1);
+        y1x0 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y1x0);
+        y1x1 = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, y1x1);
+
+        if (resultElementTy.getIntOrFloatBitWidth() > 32) {
+          dx = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, dx);
+          dy = rewriter.create<arith::ExtSIOp>(loc, resultElementTy, dy);
+        }
 
           auto unitVal = rewriter.create<arith::ConstantOp>(
               loc, rewriter.getIntegerAttr(resultElementTy, 1 << shift));
@@ -2206,7 +2206,6 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
 
           rewriter.create<linalg::YieldOp>(loc, result);
           return success();
-        }
       }
 
       return failure();

diff  --git a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
index 1aebd90a2e660..a7f449c12c73d 100644
--- a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
@@ -28,9 +28,9 @@ namespace {
 struct GpuAllReduceRewriter {
   using AccumulatorFactory = std::function<Value(Value, Value)>;
 
-  GpuAllReduceRewriter(gpu::GPUFuncOp funcOp_, gpu::AllReduceOp reduceOp_,
-                       PatternRewriter &rewriter_)
-      : funcOp(funcOp_), reduceOp(reduceOp_), rewriter(rewriter_),
+  GpuAllReduceRewriter(gpu::GPUFuncOp funcOp, gpu::AllReduceOp reduceOp,
+                       PatternRewriter &rewriter)
+      : funcOp(funcOp), reduceOp(reduceOp), rewriter(rewriter),
         loc(reduceOp.getLoc()), valueType(reduceOp.value().getType()),
         indexType(IndexType::get(reduceOp.getContext())),
         int32Type(IntegerType::get(reduceOp.getContext(), /*width=*/32)) {}

diff  --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index b586b2a295be4..1a03867a18cba 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -313,7 +313,7 @@ class GpuKernelOutliningPass
     // a SymbolTable by the caller. SymbolTable needs to be refactored to
     // prevent manual building of Ops with symbols in code using SymbolTables
     // and then this needs to use the OpBuilder.
-    auto context = getOperation().getContext();
+    auto *context = getOperation().getContext();
     OpBuilder builder(context);
     auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
                                                          kernelFunc.getName());

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index b129ed99758a4..e7a09956a90e4 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -266,13 +266,14 @@ bool LLVMPointerType::areCompatible(DataLayoutEntryListRef oldLayout,
     unsigned size = kDefaultPointerSizeBits;
     unsigned abi = kDefaultPointerAlignment;
     auto newType = newEntry.getKey().get<Type>().cast<LLVMPointerType>();
-    auto it = llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
-      if (auto type = entry.getKey().dyn_cast<Type>()) {
-        return type.cast<LLVMPointerType>().getAddressSpace() ==
-               newType.getAddressSpace();
-      }
-      return false;
-    });
+    const auto *it =
+        llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
+          if (auto type = entry.getKey().dyn_cast<Type>()) {
+            return type.cast<LLVMPointerType>().getAddressSpace() ==
+                   newType.getAddressSpace();
+          }
+          return false;
+        });
     if (it == oldLayout.end()) {
       llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
         if (auto type = entry.getKey().dyn_cast<Type>()) {
@@ -440,14 +441,15 @@ LLVMStructType::getTypeSizeInBits(const DataLayout &dataLayout,
 
 namespace {
 enum class StructDLEntryPos { Abi = 0, Preferred = 1 };
-}
+} // namespace
 
 static Optional<unsigned>
 getStructDataLayoutEntry(DataLayoutEntryListRef params, LLVMStructType type,
                          StructDLEntryPos pos) {
-  auto currentEntry = llvm::find_if(params, [](DataLayoutEntryInterface entry) {
-    return entry.isTypeEntry();
-  });
+  const auto *currentEntry =
+      llvm::find_if(params, [](DataLayoutEntryInterface entry) {
+        return entry.isTypeEntry();
+      });
   if (currentEntry == params.end())
     return llvm::None;
 
@@ -509,7 +511,7 @@ bool LLVMStructType::areCompatible(DataLayoutEntryListRef oldLayout,
     if (!newEntry.isTypeEntry())
       continue;
 
-    auto previousEntry =
+    const auto *previousEntry =
         llvm::find_if(oldLayout, [](DataLayoutEntryInterface entry) {
           return entry.isTypeEntry();
         });

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 26a0c9277b327..7e864ab4722e4 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -228,6 +228,7 @@ class RegionBuilderHelper {
     return operand;
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__add(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -237,6 +238,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__exp(Value x) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(x))
@@ -244,6 +246,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__log(Value x) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(x))
@@ -251,6 +254,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__sub(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -260,6 +264,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__mul(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -269,6 +274,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__max(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -278,6 +284,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__max_unsigned(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -287,6 +294,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__min(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -296,6 +304,7 @@ class RegionBuilderHelper {
     llvm_unreachable("unsupported non numeric type");
   }
 
+  // NOLINTNEXTLINE(*-identifier-naming): externally called.
   Value applyfn__min_unsigned(Value lhs, Value rhs) {
     OpBuilder builder = getBuilder();
     if (isFloatingPoint(lhs))
@@ -1829,12 +1838,12 @@ static ParseResult parseTiledLoopOp(OpAsmParser &parser,
     return failure();
 
   // Parse input tensors.
-  SmallVector<OpAsmParser::OperandType, 4> inputs, input_region_args;
+  SmallVector<OpAsmParser::OperandType, 4> inputs, inputRegionArgs;
   SmallVector<Type, 4> inputTypes;
   if (succeeded(parser.parseOptionalKeyword("ins"))) {
     llvm::SMLoc inputsOperandsLoc = parser.getCurrentLocation();
 
-    if (parser.parseAssignmentListWithTypes(input_region_args, inputs,
+    if (parser.parseAssignmentListWithTypes(inputRegionArgs, inputs,
                                             inputTypes))
       return failure();
 
@@ -1844,12 +1853,12 @@ static ParseResult parseTiledLoopOp(OpAsmParser &parser,
   }
 
   // Parse output tensors.
-  SmallVector<OpAsmParser::OperandType, 4> outputs, output_region_args;
+  SmallVector<OpAsmParser::OperandType, 4> outputs, outputRegionArgs;
   SmallVector<Type, 4> outputTypes;
   if (succeeded(parser.parseOptionalKeyword("outs"))) {
     llvm::SMLoc outputsOperandsLoc = parser.getCurrentLocation();
 
-    if (parser.parseAssignmentListWithTypes(output_region_args, outputs,
+    if (parser.parseAssignmentListWithTypes(outputRegionArgs, outputs,
                                             outputTypes))
       return failure();
 
@@ -1905,15 +1914,15 @@ static ParseResult parseTiledLoopOp(OpAsmParser &parser,
   // Parse the body.
   Region *body = result.addRegion();
 
-  SmallVector<Type, 4> region_types(ivs.size(), builder.getIndexType());
-  region_types.append(inputTypes);
-  region_types.append(outputTypes);
+  SmallVector<Type, 4> regionTypes(ivs.size(), builder.getIndexType());
+  regionTypes.append(inputTypes);
+  regionTypes.append(outputTypes);
 
-  SmallVector<OpAsmParser::OperandType, 4> region_args(ivs);
-  region_args.append(input_region_args);
-  region_args.append(output_region_args);
+  SmallVector<OpAsmParser::OperandType, 4> regionArgs(ivs);
+  regionArgs.append(inputRegionArgs);
+  regionArgs.append(outputRegionArgs);
 
-  if (parser.parseRegion(*body, region_args, region_types))
+  if (parser.parseRegion(*body, regionArgs, regionTypes))
     return failure();
 
   // Parse optional attributes.

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
index d7c45e0b96fe1..84d27f2176312 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseToLinalg.cpp
@@ -127,7 +127,7 @@ class ConvertElementwiseToLinalgPass
     : public ConvertElementwiseToLinalgBase<ConvertElementwiseToLinalgPass> {
 
   void runOnOperation() final {
-    auto func = getOperation();
+    auto *func = getOperation();
     auto *context = &getContext();
     ConversionTarget target(*context);
     RewritePatternSet patterns(context);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index d068174f3d48a..17e78d085d069 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1426,9 +1426,9 @@ namespace {
 ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
 /// ```
 /// kw is unrolled, w is unrolled iff dilationW > 1.
-struct Conv1D_NWC_Generator : public StructuredGenerator<LinalgOp> {
-  Conv1D_NWC_Generator(OpBuilder &builder, LinalgOp linalgOp, int strideW,
-                       int dilationW)
+struct Conv1DNwcGenerator : public StructuredGenerator<LinalgOp> {
+  Conv1DNwcGenerator(OpBuilder &builder, LinalgOp linalgOp, int strideW,
+                     int dilationW)
       : StructuredGenerator<LinalgOp>(builder, linalgOp), valid(false),
         strideW(strideW), dilationW(dilationW) {
     // Determine whether `linalgOp` can be generated with this generator
@@ -1594,7 +1594,7 @@ struct Conv1D_NWC_Generator : public StructuredGenerator<LinalgOp> {
   /// ```
   /// kw is always unrolled.
   /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is > 1.
-  FailureOr<Operation *> dilated_conv() {
+  FailureOr<Operation *> dilatedConv() {
     if (!valid)
       return failure();
 
@@ -1730,7 +1730,7 @@ struct Conv1D_NWC_Generator : public StructuredGenerator<LinalgOp> {
     if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c},
                 /*rhsIndex*/ {kw, c},
                 /*resIndex*/ {n, w, c}}))
-      return dilated_conv();
+      return dilatedConv();
     return failure();
   }
 
@@ -1752,7 +1752,7 @@ vectorizeConvolution(OpBuilder &b, ConvolutionOpInterface convOp) {
   auto stride = strides ? *strides.getValues<uint64_t>().begin() : 1;
   auto dilation = dilations ? *dilations.getValues<uint64_t>().begin() : 1;
   LinalgOp linalgOp = cast<LinalgOp>(convOp.getOperation());
-  Conv1D_NWC_Generator e(b, linalgOp, stride, dilation);
+  Conv1DNwcGenerator e(b, linalgOp, stride, dilation);
   auto res = e.generateConv();
   if (succeeded(res))
     return res;

diff  --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
index c15d61773e0dc..a42dfe79b39c8 100644
--- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
@@ -195,7 +195,7 @@ static Value clamp(ImplicitLocOpBuilder &builder, Value value, Value lowerBound,
 // Decomposes given floating point value `arg` into a normalized fraction and
 // an integral power of two (see std::frexp). Returned values have float type.
 static std::pair<Value, Value> frexp(ImplicitLocOpBuilder &builder, Value arg,
-                                     bool is_positive = false) {
+                                     bool isPositive = false) {
   assert(getElementTypeOrSelf(arg).isF32() && "arg must be f32 type");
   ArrayRef<int64_t> shape = vectorShape(arg);
 
@@ -222,7 +222,7 @@ static std::pair<Value, Value> frexp(ImplicitLocOpBuilder &builder, Value arg,
   Value normalizedFraction = builder.create<arith::BitcastOp>(f32Vec, tmp1);
 
   // Compute exponent.
-  Value arg0 = is_positive ? arg : builder.create<math::AbsOp>(arg);
+  Value arg0 = isPositive ? arg : builder.create<math::AbsOp>(arg);
   Value biasedExponentBits = builder.create<arith::ShRUIOp>(
       builder.create<arith::BitcastOp>(i32Vec, arg0),
       bcast(i32Cst(builder, 23)));

diff  --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index a4bdfd69d202a..f5bb64b0f9110 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -375,13 +375,13 @@ parseReductionVarList(OpAsmParser &parser,
 /// Print Reduction clause
 static void printReductionVarList(OpAsmPrinter &p,
                                   Optional<ArrayAttr> reductions,
-                                  OperandRange reduction_vars) {
+                                  OperandRange reductionVars) {
   p << "reduction(";
   for (unsigned i = 0, e = reductions->size(); i < e; ++i) {
     if (i != 0)
       p << ", ";
-    p << (*reductions)[i] << " -> " << reduction_vars[i] << " : "
-      << reduction_vars[i].getType();
+    p << (*reductions)[i] << " -> " << reductionVars[i] << " : "
+      << reductionVars[i].getType();
   }
   p << ") ";
 }
@@ -389,9 +389,9 @@ static void printReductionVarList(OpAsmPrinter &p,
 /// Verifies Reduction Clause
 static LogicalResult verifyReductionVarList(Operation *op,
                                             Optional<ArrayAttr> reductions,
-                                            OperandRange reduction_vars) {
-  if (reduction_vars.size() != 0) {
-    if (!reductions || reductions->size() != reduction_vars.size())
+                                            OperandRange reductionVars) {
+  if (reductionVars.size() != 0) {
+    if (!reductions || reductions->size() != reductionVars.size())
       return op->emitOpError()
              << "expected as many reduction symbol references "
                 "as reduction variables";
@@ -402,7 +402,7 @@ static LogicalResult verifyReductionVarList(Operation *op,
   }
 
   DenseSet<Value> accumulators;
-  for (auto args : llvm::zip(reduction_vars, *reductions)) {
+  for (auto args : llvm::zip(reductionVars, *reductions)) {
     Value accum = std::get<0>(args);
 
     if (!accumulators.insert(accum).second)

diff  --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp
index 2363668618e5e..b9e5415dadcc9 100644
--- a/mlir/lib/Dialect/PDL/IR/PDL.cpp
+++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp
@@ -271,8 +271,8 @@ bool OperationOp::hasTypeInference() {
 static LogicalResult verify(PatternOp pattern) {
   Region &body = pattern.body();
   Operation *term = body.front().getTerminator();
-  auto rewrite_op = dyn_cast<RewriteOp>(term);
-  if (!rewrite_op) {
+  auto rewriteOp = dyn_cast<RewriteOp>(term);
+  if (!rewriteOp) {
     return pattern.emitOpError("expected body to terminate with `pdl.rewrite`")
         .attachNote(term->getLoc())
         .append("see terminator defined here");

diff  --git a/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp b/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp
index 5a14fff1c90da..9bb3b6232dccd 100644
--- a/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp
+++ b/mlir/lib/Dialect/PDLInterp/IR/PDLInterp.cpp
@@ -74,9 +74,9 @@ void ForEachOp::build(::mlir::OpBuilder &builder, ::mlir::OperationState &state,
   build(builder, state, range, successor);
   if (initLoop) {
     // Create the block and the loop variable.
-    auto range_type = range.getType().cast<pdl::RangeType>();
+    auto rangeType = range.getType().cast<pdl::RangeType>();
     state.regions.front()->emplaceBlock();
-    state.regions.front()->addArgument(range_type.getElementType());
+    state.regions.front()->addArgument(rangeType.getElementType());
   }
 }
 

diff  --git a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
index 7901b6d01e506..a17b77dea2f35 100644
--- a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
+++ b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
@@ -104,11 +104,13 @@ Type QuantizedType::castFromStorageType(Type candidateType) {
   if (candidateType == getStorageType()) {
     // i.e. i32 -> quant<"uniform[i8:f32]{1.0}">
     return *this;
-  } else if (candidateType.isa<RankedTensorType>()) {
+  }
+  if (candidateType.isa<RankedTensorType>()) {
     // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">>
     return RankedTensorType::get(
         candidateType.cast<RankedTensorType>().getShape(), getStorageType());
-  } else if (candidateType.isa<UnrankedTensorType>()) {
+  }
+  if (candidateType.isa<UnrankedTensorType>()) {
     // i.e. tensor<i8> -> tensor<!quant<"uniform[i8:f32]{1.0}">>
     return UnrankedTensorType::get(getStorageType());
   } else if (candidateType.isa<VectorType>()) {
@@ -124,7 +126,8 @@ Type QuantizedType::castToStorageType(Type quantizedType) {
   if (quantizedType.isa<QuantizedType>()) {
     // i.e. quant<"uniform[i8:f32]{1.0}"> -> i8
     return quantizedType.cast<QuantizedType>().getStorageType();
-  } else if (quantizedType.isa<ShapedType>()) {
+  }
+  if (quantizedType.isa<ShapedType>()) {
     // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">>
     ShapedType sType = quantizedType.cast<ShapedType>();
     if (!sType.getElementType().isa<QuantizedType>()) {
@@ -134,7 +137,8 @@ Type QuantizedType::castToStorageType(Type quantizedType) {
         sType.getElementType().cast<QuantizedType>().getStorageType();
     if (quantizedType.isa<RankedTensorType>()) {
       return RankedTensorType::get(sType.getShape(), storageType);
-    } else if (quantizedType.isa<UnrankedTensorType>()) {
+    }
+    if (quantizedType.isa<UnrankedTensorType>()) {
       return UnrankedTensorType::get(storageType);
     } else if (quantizedType.isa<VectorType>()) {
       return VectorType::get(sType.getShape(), storageType);
@@ -148,7 +152,8 @@ Type QuantizedType::castFromExpressedType(Type candidateType) {
   if (candidateType == getExpressedType()) {
     // i.e. f32 -> quant<"uniform[i8:f32]{1.0}">
     return *this;
-  } else if (candidateType.isa<ShapedType>()) {
+  }
+  if (candidateType.isa<ShapedType>()) {
     ShapedType candidateShapedType = candidateType.cast<ShapedType>();
     if (candidateShapedType.getElementType() != getExpressedType()) {
       return nullptr;
@@ -157,7 +162,8 @@ Type QuantizedType::castFromExpressedType(Type candidateType) {
     if (candidateType.isa<RankedTensorType>()) {
       // i.e. tensor<4xf32> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">>
       return RankedTensorType::get(candidateShapedType.getShape(), *this);
-    } else if (candidateType.isa<UnrankedTensorType>()) {
+    }
+    if (candidateType.isa<UnrankedTensorType>()) {
       // i.e. tensor<xf32> -> tensor<x!quant<"uniform[i8:f32]{1.0}">>
       return UnrankedTensorType::get(*this);
     } else if (candidateType.isa<VectorType>()) {
@@ -173,7 +179,8 @@ Type QuantizedType::castToExpressedType(Type quantizedType) {
   if (quantizedType.isa<QuantizedType>()) {
     // i.e. quant<"uniform[i8:f32]{1.0}"> -> f32
     return quantizedType.cast<QuantizedType>().getExpressedType();
-  } else if (quantizedType.isa<ShapedType>()) {
+  }
+  if (quantizedType.isa<ShapedType>()) {
     // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">>
     ShapedType sType = quantizedType.cast<ShapedType>();
     if (!sType.getElementType().isa<QuantizedType>()) {
@@ -183,7 +190,8 @@ Type QuantizedType::castToExpressedType(Type quantizedType) {
         sType.getElementType().cast<QuantizedType>().getExpressedType();
     if (quantizedType.isa<RankedTensorType>()) {
       return RankedTensorType::get(sType.getShape(), expressedType);
-    } else if (quantizedType.isa<UnrankedTensorType>()) {
+    }
+    if (quantizedType.isa<UnrankedTensorType>()) {
       return UnrankedTensorType::get(expressedType);
     } else if (quantizedType.isa<VectorType>()) {
       return VectorType::get(sType.getShape(), expressedType);

diff  --git a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
index c50d09a2c0653..7920bee01c51f 100644
--- a/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
+++ b/mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
@@ -126,7 +126,7 @@ void ConvertSimulatedQuantPass::runOnFunction() {
   bool hadFailure = false;
   auto func = getFunction();
   RewritePatternSet patterns(func.getContext());
-  auto ctx = func.getContext();
+  auto *ctx = func.getContext();
   patterns.add<ConstFakeQuantRewrite, ConstFakeQuantPerAxisRewrite>(
       ctx, &hadFailure);
   (void)applyPatternsAndFoldGreedily(func, std::move(patterns));

diff  --git a/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp b/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp
index 7750ad478f9d1..8c69729824691 100644
--- a/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp
+++ b/mlir/lib/Dialect/Quant/Utils/FakeQuantSupport.cpp
@@ -140,10 +140,10 @@ UniformQuantizedPerAxisType mlir::quant::fakeQuantAttrsToType(
     Location loc, unsigned numBits, int32_t quantizedDimension,
     ArrayRef<double> rmins, ArrayRef<double> rmaxs, bool narrowRange,
     Type expressedType, bool isSigned) {
-  size_t axis_size = rmins.size();
-  if (axis_size != rmaxs.size()) {
+  size_t axisSize = rmins.size();
+  if (axisSize != rmaxs.size()) {
     return (emitError(loc, "mismatched per-axis min and max size: ")
-                << axis_size << " vs. " << rmaxs.size(),
+                << axisSize << " vs. " << rmaxs.size(),
             nullptr);
   }
 
@@ -159,9 +159,9 @@ UniformQuantizedPerAxisType mlir::quant::fakeQuantAttrsToType(
 
   SmallVector<double, 4> scales;
   SmallVector<int64_t, 4> zeroPoints;
-  scales.reserve(axis_size);
-  zeroPoints.reserve(axis_size);
-  for (size_t axis = 0; axis != axis_size; ++axis) {
+  scales.reserve(axisSize);
+  zeroPoints.reserve(axisSize);
+  for (size_t axis = 0; axis != axisSize; ++axis) {
     double rmin = rmins[axis];
     double rmax = rmaxs[axis];
     if (std::fabs(rmax - rmin) < std::numeric_limits<double>::epsilon()) {

diff  --git a/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp b/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp
index 220e8cea75bfc..66885fb7a5fc1 100644
--- a/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp
+++ b/mlir/lib/Dialect/Quant/Utils/QuantizeUtils.cpp
@@ -106,17 +106,17 @@ Attribute mlir::quant::quantizeAttrUniform(
         realValue.cast<DenseFPElementsAttr>(), quantizedElementType, converter);
     outConvertedType = converted.getType();
     return converted;
-  } else if (realValue.isa<SparseElementsAttr>()) {
+  }
+  if (realValue.isa<SparseElementsAttr>()) {
     // Sparse tensor or vector constant.
     auto converted = convertSparseElementsAttr(
         realValue.cast<SparseElementsAttr>(), quantizedElementType, converter);
     outConvertedType = converted.getType();
     return converted;
-  } else {
-    // Nothing else matched: try to convert a primitive.
-    return convertPrimitiveValueAttr(realValue, quantizedElementType, converter,
-                                     outConvertedType);
   }
+  // Nothing else matched: try to convert a primitive.
+  return convertPrimitiveValueAttr(realValue, quantizedElementType, converter,
+                                   outConvertedType);
 }
 
 /// Convert an attribute from a type based on
@@ -132,9 +132,9 @@ Attribute mlir::quant::quantizeAttr(Attribute realValue,
     UniformQuantizedValueConverter converter(uniformQuantized);
     return quantizeAttrUniform(realValue, uniformQuantized, converter,
                                outConvertedType);
-
-  } else if (auto uniformQuantizedPerAxis =
-                 quantizedElementType.dyn_cast<UniformQuantizedPerAxisType>()) {
+  }
+  if (auto uniformQuantizedPerAxis =
+          quantizedElementType.dyn_cast<UniformQuantizedPerAxisType>()) {
     UniformQuantizedPerAxisValueConverter converter(uniformQuantizedPerAxis);
     auto converted = converter.convert(realValue);
     // TODO: why we need this outConvertedType? remove it?
@@ -142,7 +142,6 @@ Attribute mlir::quant::quantizeAttr(Attribute realValue,
       outConvertedType = converted.getType();
     }
     return converted;
-  } else {
-    return nullptr;
   }
+  return nullptr;
 }

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
index 0588549210018..d1bd271d389bc 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
@@ -74,7 +74,7 @@ static Attribute extractCompositeElement(Attribute composite,
 
 namespace {
 #include "SPIRVCanonicalization.inc"
-}
+} // namespace
 
 //===----------------------------------------------------------------------===//
 // spv.AccessChainOp

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index c8c03c8de8774..8090b235cc46e 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -3250,13 +3250,13 @@ static ParseResult parseCooperativeMatrixLoadNVOp(OpAsmParser &parser,
   return success();
 }
 
-static void print(spirv::CooperativeMatrixLoadNVOp M, OpAsmPrinter &printer) {
-  printer << " " << M.pointer() << ", " << M.stride() << ", "
-          << M.columnmajor();
+static void print(spirv::CooperativeMatrixLoadNVOp m, OpAsmPrinter &printer) {
+  printer << " " << m.pointer() << ", " << m.stride() << ", "
+          << m.columnmajor();
   // Print optional memory access attribute.
-  if (auto memAccess = M.memory_access())
+  if (auto memAccess = m.memory_access())
     printer << " [\"" << stringifyMemoryAccess(*memAccess) << "\"]";
-  printer << " : " << M.pointer().getType() << " as " << M.getType();
+  printer << " : " << m.pointer().getType() << " as " << m.getType();
 }
 
 static LogicalResult verifyPointerAndCoopMatrixType(Operation *op, Type pointer,

diff  --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 0764b0920db67..80c46a13b7611 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -31,7 +31,7 @@ using namespace mlir::shape;
 
 namespace {
 #include "ShapeCanonicalization.inc"
-}
+} // namespace
 
 RankedTensorType shape::getExtentTensorType(MLIRContext *ctx, int64_t rank) {
   return RankedTensorType::get({rank}, IndexType::get(ctx));
@@ -50,7 +50,8 @@ LogicalResult shape::getShapeVec(Value input,
       return failure();
     shapeValues = llvm::to_vector<6>(type.getShape());
     return success();
-  } else if (auto inputOp = input.getDefiningOp<ConstShapeOp>()) {
+  }
+  if (auto inputOp = input.getDefiningOp<ConstShapeOp>()) {
     shapeValues = llvm::to_vector<6>(inputOp.getShape().getValues<int64_t>());
     return success();
   } else if (auto inputOp = input.getDefiningOp<arith::ConstantOp>()) {

diff  --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 2283174dc55d2..4c0f69e2ce0a8 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -540,7 +540,8 @@ struct SimplifyConstCondBranchPred : public OpRewritePattern<CondBranchOp> {
       rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getTrueDest(),
                                             condbr.getTrueOperands());
       return success();
-    } else if (matchPattern(condbr.getCondition(), m_Zero())) {
+    }
+    if (matchPattern(condbr.getCondition(), m_Zero())) {
       // False branch taken.
       rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getFalseDest(),
                                             condbr.getFalseOperands());

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
index ad840fc903749..588b635805893 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
@@ -152,11 +152,11 @@ struct ReifyExpandOrCollapseShapeOp
   reifyResultShapes(Operation *op, OpBuilder &b,
                     ReifiedRankedShapedTypeDims &reifiedReturnShapes) const {
     auto loc = op->getLoc();
-    auto reshape_op = cast<OpTy>(op);
-    auto result_shape = getReshapeOutputShapeFromInputShape(
-        b, loc, reshape_op.src(), reshape_op.getResultType().getShape(),
-        reshape_op.getReassociationMaps());
-    reifiedReturnShapes.push_back(getAsValues(b, loc, result_shape));
+    auto reshapeOp = cast<OpTy>(op);
+    auto resultShape = getReshapeOutputShapeFromInputShape(
+        b, loc, reshapeOp.src(), reshapeOp.getResultType().getShape(),
+        reshapeOp.getReassociationMaps());
+    reifiedReturnShapes.push_back(getAsValues(b, loc, resultShape));
     return success();
   }
 };

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 49db8688c0da3..cec8b2c18754a 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -634,7 +634,7 @@ OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
 // ReshapeOp
 //===----------------------------------------------------------------------===//
 
-static int64_t GetNumElements(ShapedType type) {
+static int64_t getNumElements(ShapedType type) {
   int64_t numElements = 1;
   for (auto dim : type.getShape())
     numElements *= dim;
@@ -657,7 +657,7 @@ static LogicalResult verify(ReshapeOp op) {
   if (resultRankedType) {
     if (operandRankedType && resultRankedType.hasStaticShape() &&
         operandRankedType.hasStaticShape()) {
-      if (GetNumElements(operandRankedType) != GetNumElements(resultRankedType))
+      if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
         return op.emitOpError("source and destination tensor should have the "
                               "same number of elements");
     }

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp
index 1fd790430593f..e4369c53112d3 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp
@@ -97,9 +97,9 @@ struct BufferizeFromElementsOp
 
     // Traverse all `elements` and create `memref.store` ops.
     ImplicitLocOpBuilder b(loc, rewriter);
-    auto element_it = adaptor.elements().begin();
+    auto elementIt = adaptor.elements().begin();
     SmallVector<Value, 2> indices(tensorType.getRank(), constants[0]);
-    CreateStores(/*dim=*/0, buffer, shape, constants, element_it, indices, b);
+    createStores(/*dim=*/0, buffer, shape, constants, elementIt, indices, b);
 
     rewriter.replaceOp(op, {buffer});
     return success();
@@ -108,21 +108,21 @@ struct BufferizeFromElementsOp
 private:
   // Implements backtracking to traverse indices of the output buffer while
   // iterating over op.elements().
-  void CreateStores(int dim, Value buffer, ArrayRef<int64_t> shape,
-                    ArrayRef<Value> constants, ValueRange::iterator &element_it,
+  void createStores(int dim, Value buffer, ArrayRef<int64_t> shape,
+                    ArrayRef<Value> constants, ValueRange::iterator &elementIt,
                     SmallVectorImpl<Value> &indices,
                     ImplicitLocOpBuilder b) const {
     if (dim == static_cast<int>(shape.size()) - 1) {
       for (int i = 0; i < shape.back(); ++i) {
         indices.back() = constants[i];
-        b.create<memref::StoreOp>(*element_it, buffer, indices);
-        ++element_it;
+        b.create<memref::StoreOp>(*elementIt, buffer, indices);
+        ++elementIt;
       }
       return;
     }
     for (int i = 0; i < shape[dim]; ++i) {
       indices[dim] = constants[i];
-      CreateStores(dim + 1, buffer, shape, constants, element_it, indices, b);
+      createStores(dim + 1, buffer, shape, constants, elementIt, indices, b);
     }
   }
 };

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index f61ce68893d69..e56f83b8044e6 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -771,8 +771,8 @@ static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder,
                                                  OperationState &result,
                                                  Type outputType, Value input,
                                                  Value paddings,
-                                                 Value pad_const) {
-  result.addOperands({input, paddings, pad_const});
+                                                 Value padConst) {
+  result.addOperands({input, paddings, padConst});
   auto quantAttr = buildPadOpQuantizationAttr(builder, input);
   if (quantAttr)
     result.addAttribute("quantization_info", quantAttr);

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
index e623089f50f04..341e78d527925 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeTransposeConv.cpp
@@ -33,9 +33,9 @@ static void getValuesFromIntArrayAttribute(ArrayAttr attr,
 }
 
 template <typename TosaOp, typename... Args>
-TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty,
+TosaOp createOpAndInfer(PatternRewriter &rewriter, Location loc, Type resultTy,
                         Args &&...args) {
-  auto op = rewriter.create<TosaOp>(loc, result_ty, args...);
+  auto op = rewriter.create<TosaOp>(loc, resultTy, args...);
 
   InferShapedTypeOpInterface shapeInterface =
       dyn_cast<InferShapedTypeOpInterface>(op.getOperation());
@@ -57,12 +57,12 @@ TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty,
   auto result = op->getResult(0);
   auto predictedShape = returnedShapes[0];
   auto currentKnowledge =
-      mlir::tosa::ValueKnowledge::getKnowledgeFromType(result_ty);
+      mlir::tosa::ValueKnowledge::getKnowledgeFromType(resultTy);
 
   // Compute the knowledge based on the inferred type.
   auto inferredKnowledge =
       mlir::tosa::ValueKnowledge::getPessimisticValueState();
-  inferredKnowledge.dtype = result_ty.cast<ShapedType>().getElementType();
+  inferredKnowledge.dtype = resultTy.cast<ShapedType>().getElementType();
   inferredKnowledge.hasRank = predictedShape.hasRank();
   if (predictedShape.hasRank()) {
     for (auto dim : predictedShape.getDims()) {
@@ -73,8 +73,8 @@ TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty,
   // Compute the new type based on the joined version.
   auto newKnowledge =
       mlir::tosa::ValueKnowledge::join(currentKnowledge, inferredKnowledge);
-  auto new_ty = newKnowledge.getType();
-  result.setType(new_ty);
+  auto newTy = newKnowledge.getType();
+  result.setType(newTy);
   return op;
 }
 
@@ -205,19 +205,19 @@ class TransposeConvStridedConverter
         weightWidth % stride[1] ? stride[1] - weightWidth % stride[1] : 0;
     DenseElementsAttr weightPaddingAttr = DenseIntElementsAttr::get(
         RankedTensorType::get({4, 2}, rewriter.getI32Type()), weightPadding);
-    Value weightPaddingVal = CreateOpAndInfer<tosa::ConstOp>(
+    Value weightPaddingVal = createOpAndInfer<tosa::ConstOp>(
         rewriter, loc, weightPaddingAttr.getType(), weightPaddingAttr);
 
     if (op.quantization_info().hasValue()) {
       auto quantInfo = op.quantization_info().getValue();
-      weight = CreateOpAndInfer<tosa::PadOp>(
+      weight = createOpAndInfer<tosa::PadOp>(
           rewriter, loc, UnrankedTensorType::get(weightETy), weight,
           weightPaddingVal, nullptr,
           PadOpQuantizationAttr::get(quantInfo.weight_zp(),
                                      rewriter.getContext()));
 
     } else {
-      weight = CreateOpAndInfer<tosa::PadOp>(rewriter, loc,
+      weight = createOpAndInfer<tosa::PadOp>(rewriter, loc,
                                              UnrankedTensorType::get(weightETy),
                                              weight, weightPaddingVal);
     }
@@ -231,7 +231,7 @@ class TransposeConvStridedConverter
         outputChannels, weightHeight / stride[0],
         stride[0],      weightWidth / stride[1],
         stride[1],      inputChannels};
-    weight = CreateOpAndInfer<tosa::ReshapeOp>(
+    weight = createOpAndInfer<tosa::ReshapeOp>(
         rewriter, loc, UnrankedTensorType::get(weightETy), weight,
         rewriter.getI64ArrayAttr(weightReshapeDims0));
 
@@ -240,7 +240,7 @@ class TransposeConvStridedConverter
         loc, RankedTensorType::get({6}, rewriter.getI32Type()),
         rewriter.getI32TensorAttr({2, 4, 0, 1, 3, 5}));
 
-    weight = CreateOpAndInfer<tosa::TransposeOp>(
+    weight = createOpAndInfer<tosa::TransposeOp>(
         rewriter, loc, UnrankedTensorType::get(weightETy), weight,
         transposeWeightVal);
 
@@ -248,15 +248,15 @@ class TransposeConvStridedConverter
     llvm::SmallVector<int64_t, 6> weightReshapeDims1 = {
         outputChannels * stride[0] * stride[1], weightHeight / stride[0],
         weightWidth / stride[1], inputChannels};
-    weight = CreateOpAndInfer<tosa::ReshapeOp>(
+    weight = createOpAndInfer<tosa::ReshapeOp>(
         rewriter, loc, UnrankedTensorType::get(weightETy), weight,
         rewriter.getI64ArrayAttr(weightReshapeDims1));
     ShapedType restridedWeightTy = weight.getType().cast<ShapedType>();
 
-    weight = CreateOpAndInfer<tosa::ReverseOp>(
+    weight = createOpAndInfer<tosa::ReverseOp>(
         rewriter, loc, UnrankedTensorType::get(weightETy), weight,
         rewriter.getI64IntegerAttr(1));
-    weight = CreateOpAndInfer<tosa::ReverseOp>(
+    weight = createOpAndInfer<tosa::ReverseOp>(
         rewriter, loc, UnrankedTensorType::get(weightETy), weight,
         rewriter.getI64IntegerAttr(2));
 
@@ -270,18 +270,18 @@ class TransposeConvStridedConverter
     DenseElementsAttr inputPaddingAttr = DenseIntElementsAttr::get(
         RankedTensorType::get({4, 2}, rewriter.getI32Type()), inputPadding);
 
-    Value inputPaddingVal = CreateOpAndInfer<tosa::ConstOp>(
+    Value inputPaddingVal = createOpAndInfer<tosa::ConstOp>(
         rewriter, loc, inputPaddingAttr.getType(), inputPaddingAttr);
 
     if (op.quantization_info().hasValue()) {
       auto quantInfo = op.quantization_info().getValue();
-      input = CreateOpAndInfer<tosa::PadOp>(
+      input = createOpAndInfer<tosa::PadOp>(
           rewriter, loc, UnrankedTensorType::get(inputETy), input,
           inputPaddingVal, nullptr,
           PadOpQuantizationAttr::get(quantInfo.input_zp(),
                                      rewriter.getContext()));
     } else {
-      input = CreateOpAndInfer<tosa::PadOp>(rewriter, loc,
+      input = createOpAndInfer<tosa::PadOp>(rewriter, loc,
                                             UnrankedTensorType::get(inputETy),
                                             input, inputPaddingVal);
     }
@@ -299,7 +299,7 @@ class TransposeConvStridedConverter
     // Perform the convolution using the zero bias.
     Value conv2d;
     if (op.quantization_info().hasValue()) {
-      conv2d = CreateOpAndInfer<tosa::Conv2DOp>(
+      conv2d = createOpAndInfer<tosa::Conv2DOp>(
                    rewriter, loc, UnrankedTensorType::get(resultETy), input,
                    weight, zeroBias,
                    /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}),
@@ -308,7 +308,7 @@ class TransposeConvStridedConverter
                    op.quantization_info().getValue())
                    .getResult();
     } else {
-      conv2d = CreateOpAndInfer<tosa::Conv2DOp>(
+      conv2d = createOpAndInfer<tosa::Conv2DOp>(
                    rewriter, loc, UnrankedTensorType::get(resultETy), input,
                    weight, zeroBias,
                    /*pad=*/rewriter.getI64ArrayAttr({0, 0, 0, 0}),
@@ -327,7 +327,7 @@ class TransposeConvStridedConverter
     // Factor striding out of the convolution result.
     llvm::SmallVector<int64_t, 6> convReshapeDims0 = {
         batch, convHeight, convWidth, stride[0], stride[1], outputChannels};
-    conv2d = CreateOpAndInfer<tosa::ReshapeOp>(
+    conv2d = createOpAndInfer<tosa::ReshapeOp>(
         rewriter, loc, UnrankedTensorType::get(resultETy), conv2d,
         rewriter.getI64ArrayAttr(convReshapeDims0));
 
@@ -336,14 +336,14 @@ class TransposeConvStridedConverter
         loc, RankedTensorType::get({6}, rewriter.getI32Type()),
         rewriter.getI32TensorAttr({0, 1, 3, 2, 4, 5}));
 
-    conv2d = CreateOpAndInfer<tosa::TransposeOp>(
+    conv2d = createOpAndInfer<tosa::TransposeOp>(
         rewriter, loc, UnrankedTensorType::get(convETy), conv2d,
         transposeConvVal);
 
     // Fuse striding behavior back into width / height.
     llvm::SmallVector<int64_t, 6> convReshapeDims1 = {
         batch, convHeight * stride[0], convWidth * stride[1], outputChannels};
-    conv2d = CreateOpAndInfer<tosa::ReshapeOp>(
+    conv2d = createOpAndInfer<tosa::ReshapeOp>(
         rewriter, loc, UnrankedTensorType::get(resultETy), conv2d,
         rewriter.getI64ArrayAttr(convReshapeDims1));
 
@@ -354,14 +354,14 @@ class TransposeConvStridedConverter
     sliceBegin[1] = pad[0];
     sliceBegin[2] = pad[1];
 
-    auto slice = CreateOpAndInfer<tosa::SliceOp>(
+    auto slice = createOpAndInfer<tosa::SliceOp>(
                      rewriter, loc, UnrankedTensorType::get(resultETy), conv2d,
                      rewriter.getI64ArrayAttr(sliceBegin),
                      rewriter.getI64ArrayAttr(resultTy.getShape()))
                      .getResult();
 
     auto addBias =
-        CreateOpAndInfer<tosa::AddOp>(rewriter, loc, op.getType(), slice, bias);
+        createOpAndInfer<tosa::AddOp>(rewriter, loc, op.getType(), slice, bias);
 
     rewriter.replaceOp(op, addBias.getResult());
 

diff  --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
index 33a1e34d2415a..34d480c3917e0 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
@@ -223,7 +223,7 @@ void propagateShapesInRegion(Region &region) {
           // Check whether this use case is replaceable. We define an op as
           // being replaceable if it is used by a ReturnOp or a TosaOp.
           bool replaceable = true;
-          for (auto user : result.getUsers()) {
+          for (auto *user : result.getUsers()) {
             if (isa<ReturnOp>(user))
               continue;
             if (user->getDialect()->getNamespace() ==

diff  --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp
index edd561fb49bd5..3cac3302af32a 100644
--- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp
@@ -1179,7 +1179,7 @@ struct UnrolledOuterProductGenerator
     return builder.create<vector::TransposeOp>(loc, v, perm);
   }
 
-  Value outer_prod(Value lhs, Value rhs, Value res, int reductionSize) {
+  Value outerProd(Value lhs, Value rhs, Value res, int reductionSize) {
     assert(reductionSize > 0);
     for (int64_t k = 0; k < reductionSize; ++k) {
       Value a = builder.create<vector::ExtractOp>(loc, lhs, k);
@@ -1199,31 +1199,31 @@ struct UnrolledOuterProductGenerator
     bindDims(builder.getContext(), m, n, k);
     // Classical row-major matmul:  Just permute the lhs.
     if (layout({{m, k}, {k, n}, {m, n}}))
-      return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1));
+      return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1));
     // TODO: may be better to fail and use some vector<k> -> scalar reduction.
     if (layout({{m, k}, {n, k}, {m, n}})) {
       Value tlhs = t(lhs);
-      return outer_prod(tlhs, t(rhs), res, lhsType.getDimSize(1));
+      return outerProd(tlhs, t(rhs), res, lhsType.getDimSize(1));
     }
     // No need to permute anything.
     if (layout({{k, m}, {k, n}, {m, n}}))
-      return outer_prod(lhs, rhs, res, lhsType.getDimSize(0));
+      return outerProd(lhs, rhs, res, lhsType.getDimSize(0));
     // Just permute the rhs.
     if (layout({{k, m}, {n, k}, {m, n}}))
-      return outer_prod(lhs, t(rhs), res, lhsType.getDimSize(0));
+      return outerProd(lhs, t(rhs), res, lhsType.getDimSize(0));
     // Transposed output: swap RHS and LHS.
     // Classical row-major matmul: permute the lhs.
     if (layout({{m, k}, {k, n}, {n, m}}))
-      return outer_prod(rhs, t(lhs), res, lhsType.getDimSize(1));
+      return outerProd(rhs, t(lhs), res, lhsType.getDimSize(1));
     // TODO: may be better to fail and use some vector<k> -> scalar reduction.
     if (layout({{m, k}, {n, k}, {n, m}})) {
       Value trhs = t(rhs);
-      return outer_prod(trhs, t(lhs), res, lhsType.getDimSize(1));
+      return outerProd(trhs, t(lhs), res, lhsType.getDimSize(1));
     }
     if (layout({{k, m}, {k, n}, {n, m}}))
-      return outer_prod(rhs, lhs, res, lhsType.getDimSize(0));
+      return outerProd(rhs, lhs, res, lhsType.getDimSize(0));
     if (layout({{k, m}, {n, k}, {n, m}}))
-      return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0));
+      return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0));
     return failure();
   }
 
@@ -1236,16 +1236,16 @@ struct UnrolledOuterProductGenerator
 
     // Case mat-vec: transpose.
     if (layout({{m, k}, {k}, {m}}))
-      return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1));
+      return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1));
     // Case mat-trans-vec: ready to go.
     if (layout({{k, m}, {k}, {m}}))
-      return outer_prod(lhs, rhs, res, lhsType.getDimSize(0));
+      return outerProd(lhs, rhs, res, lhsType.getDimSize(0));
     // Case vec-mat: swap and transpose.
     if (layout({{k}, {m, k}, {m}}))
-      return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0));
+      return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0));
     // Case vec-mat-trans: swap and ready to go.
     if (layout({{k}, {k, m}, {m}}))
-      return outer_prod(rhs, lhs, res, lhsType.getDimSize(0));
+      return outerProd(rhs, lhs, res, lhsType.getDimSize(0));
     return failure();
   }
 
@@ -1260,16 +1260,16 @@ struct UnrolledOuterProductGenerator
 
     // Case mat-vec: transpose.
     if (layout({{m, k}, {k}, {m}}))
-      return outer_prod(t(lhs), rhs, res, lhsType.getDimSize(1));
+      return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1));
     // Case mat-trans-vec: ready to go.
     if (layout({{k, m}, {k}, {m}}))
-      return outer_prod(lhs, rhs, res, lhsType.getDimSize(0));
+      return outerProd(lhs, rhs, res, lhsType.getDimSize(0));
     // Case vec-mat: swap and transpose.
     if (layout({{k}, {m, k}, {m}}))
-      return outer_prod(t(rhs), lhs, res, lhsType.getDimSize(0));
+      return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0));
     // Case vec-mat-trans: swap and ready to go.
     if (layout({{k}, {k, m}, {m}}))
-      return outer_prod(rhs, lhs, res, lhsType.getDimSize(0));
+      return outerProd(rhs, lhs, res, lhsType.getDimSize(0));
     return failure();
   }
 

diff  --git a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
index 38088e17bd4f4..3f2db00b388dc 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
+++ b/mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
@@ -31,8 +31,9 @@ Value mlir::x86vector::avx2::inline_asm::mm256BlendPsAsm(
     ImplicitLocOpBuilder &b, Value v1, Value v2, uint8_t mask) {
   auto asmDialectAttr =
       LLVM::AsmDialectAttr::get(b.getContext(), LLVM::AsmDialect::AD_Intel);
-  auto asmTp = "vblendps $0, $1, $2, {0}";
-  auto asmCstr = "=x,x,x"; // Careful: constraint parser is very brittle: no ws!
+  const auto *asmTp = "vblendps $0, $1, $2, {0}";
+  const auto *asmCstr =
+      "=x,x,x"; // Careful: constraint parser is very brittle: no ws!
   SmallVector<Value> asmVals{v1, v2};
   auto asmStr = llvm::formatv(asmTp, llvm::format_hex(mask, /*width=*/2)).str();
   auto asmOp = b.create<LLVM::InlineAsmOp>(
@@ -116,18 +117,18 @@ void mlir::x86vector::avx2::transpose4x8xf32(ImplicitLocOpBuilder &ib,
          "expects all types to be vector<8xf32>");
 #endif
 
-  Value T0 = mm256UnpackLoPs(ib, vs[0], vs[1]);
-  Value T1 = mm256UnpackHiPs(ib, vs[0], vs[1]);
-  Value T2 = mm256UnpackLoPs(ib, vs[2], vs[3]);
-  Value T3 = mm256UnpackHiPs(ib, vs[2], vs[3]);
-  Value S0 = mm256ShufflePs(ib, T0, T2, MaskHelper::shuffle<1, 0, 1, 0>());
-  Value S1 = mm256ShufflePs(ib, T0, T2, MaskHelper::shuffle<3, 2, 3, 2>());
-  Value S2 = mm256ShufflePs(ib, T1, T3, MaskHelper::shuffle<1, 0, 1, 0>());
-  Value S3 = mm256ShufflePs(ib, T1, T3, MaskHelper::shuffle<3, 2, 3, 2>());
-  vs[0] = mm256Permute2f128Ps(ib, S0, S1, MaskHelper::permute<2, 0>());
-  vs[1] = mm256Permute2f128Ps(ib, S2, S3, MaskHelper::permute<2, 0>());
-  vs[2] = mm256Permute2f128Ps(ib, S0, S1, MaskHelper::permute<3, 1>());
-  vs[3] = mm256Permute2f128Ps(ib, S2, S3, MaskHelper::permute<3, 1>());
+  Value t0 = mm256UnpackLoPs(ib, vs[0], vs[1]);
+  Value t1 = mm256UnpackHiPs(ib, vs[0], vs[1]);
+  Value t2 = mm256UnpackLoPs(ib, vs[2], vs[3]);
+  Value t3 = mm256UnpackHiPs(ib, vs[2], vs[3]);
+  Value s0 = mm256ShufflePs(ib, t0, t2, MaskHelper::shuffle<1, 0, 1, 0>());
+  Value s1 = mm256ShufflePs(ib, t0, t2, MaskHelper::shuffle<3, 2, 3, 2>());
+  Value s2 = mm256ShufflePs(ib, t1, t3, MaskHelper::shuffle<1, 0, 1, 0>());
+  Value s3 = mm256ShufflePs(ib, t1, t3, MaskHelper::shuffle<3, 2, 3, 2>());
+  vs[0] = mm256Permute2f128Ps(ib, s0, s1, MaskHelper::permute<2, 0>());
+  vs[1] = mm256Permute2f128Ps(ib, s2, s3, MaskHelper::permute<2, 0>());
+  vs[2] = mm256Permute2f128Ps(ib, s0, s1, MaskHelper::permute<3, 1>());
+  vs[3] = mm256Permute2f128Ps(ib, s2, s3, MaskHelper::permute<3, 1>());
 }
 
 /// AVX2 8x8xf32-specific transpose lowering using a "C intrinsics" model.
@@ -140,46 +141,46 @@ void mlir::x86vector::avx2::transpose8x8xf32(ImplicitLocOpBuilder &ib,
                       [&](Type t) { return t == vt; }) &&
          "expects all types to be vector<8xf32>");
 
-  Value T0 = mm256UnpackLoPs(ib, vs[0], vs[1]);
-  Value T1 = mm256UnpackHiPs(ib, vs[0], vs[1]);
-  Value T2 = mm256UnpackLoPs(ib, vs[2], vs[3]);
-  Value T3 = mm256UnpackHiPs(ib, vs[2], vs[3]);
-  Value T4 = mm256UnpackLoPs(ib, vs[4], vs[5]);
-  Value T5 = mm256UnpackHiPs(ib, vs[4], vs[5]);
-  Value T6 = mm256UnpackLoPs(ib, vs[6], vs[7]);
-  Value T7 = mm256UnpackHiPs(ib, vs[6], vs[7]);
+  Value t0 = mm256UnpackLoPs(ib, vs[0], vs[1]);
+  Value t1 = mm256UnpackHiPs(ib, vs[0], vs[1]);
+  Value t2 = mm256UnpackLoPs(ib, vs[2], vs[3]);
+  Value t3 = mm256UnpackHiPs(ib, vs[2], vs[3]);
+  Value t4 = mm256UnpackLoPs(ib, vs[4], vs[5]);
+  Value t5 = mm256UnpackHiPs(ib, vs[4], vs[5]);
+  Value t6 = mm256UnpackLoPs(ib, vs[6], vs[7]);
+  Value t7 = mm256UnpackHiPs(ib, vs[6], vs[7]);
 
   using inline_asm::mm256BlendPsAsm;
-  Value sh0 = mm256ShufflePs(ib, T0, T2, MaskHelper::shuffle<1, 0, 3, 2>());
-  Value sh2 = mm256ShufflePs(ib, T1, T3, MaskHelper::shuffle<1, 0, 3, 2>());
-  Value sh4 = mm256ShufflePs(ib, T4, T6, MaskHelper::shuffle<1, 0, 3, 2>());
-  Value sh6 = mm256ShufflePs(ib, T5, T7, MaskHelper::shuffle<1, 0, 3, 2>());
-
-  Value S0 =
-      mm256BlendPsAsm(ib, T0, sh0, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
-  Value S1 =
-      mm256BlendPsAsm(ib, T2, sh0, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
-  Value S2 =
-      mm256BlendPsAsm(ib, T1, sh2, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
-  Value S3 =
-      mm256BlendPsAsm(ib, T3, sh2, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
-  Value S4 =
-      mm256BlendPsAsm(ib, T4, sh4, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
-  Value S5 =
-      mm256BlendPsAsm(ib, T6, sh4, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
-  Value S6 =
-      mm256BlendPsAsm(ib, T5, sh6, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
-  Value S7 =
-      mm256BlendPsAsm(ib, T7, sh6, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
-
-  vs[0] = mm256Permute2f128Ps(ib, S0, S4, MaskHelper::permute<2, 0>());
-  vs[1] = mm256Permute2f128Ps(ib, S1, S5, MaskHelper::permute<2, 0>());
-  vs[2] = mm256Permute2f128Ps(ib, S2, S6, MaskHelper::permute<2, 0>());
-  vs[3] = mm256Permute2f128Ps(ib, S3, S7, MaskHelper::permute<2, 0>());
-  vs[4] = mm256Permute2f128Ps(ib, S0, S4, MaskHelper::permute<3, 1>());
-  vs[5] = mm256Permute2f128Ps(ib, S1, S5, MaskHelper::permute<3, 1>());
-  vs[6] = mm256Permute2f128Ps(ib, S2, S6, MaskHelper::permute<3, 1>());
-  vs[7] = mm256Permute2f128Ps(ib, S3, S7, MaskHelper::permute<3, 1>());
+  Value sh0 = mm256ShufflePs(ib, t0, t2, MaskHelper::shuffle<1, 0, 3, 2>());
+  Value sh2 = mm256ShufflePs(ib, t1, t3, MaskHelper::shuffle<1, 0, 3, 2>());
+  Value sh4 = mm256ShufflePs(ib, t4, t6, MaskHelper::shuffle<1, 0, 3, 2>());
+  Value sh6 = mm256ShufflePs(ib, t5, t7, MaskHelper::shuffle<1, 0, 3, 2>());
+
+  Value s0 =
+      mm256BlendPsAsm(ib, t0, sh0, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
+  Value s1 =
+      mm256BlendPsAsm(ib, t2, sh0, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
+  Value s2 =
+      mm256BlendPsAsm(ib, t1, sh2, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
+  Value s3 =
+      mm256BlendPsAsm(ib, t3, sh2, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
+  Value s4 =
+      mm256BlendPsAsm(ib, t4, sh4, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
+  Value s5 =
+      mm256BlendPsAsm(ib, t6, sh4, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
+  Value s6 =
+      mm256BlendPsAsm(ib, t5, sh6, MaskHelper::blend<0, 0, 1, 1, 0, 0, 1, 1>());
+  Value s7 =
+      mm256BlendPsAsm(ib, t7, sh6, MaskHelper::blend<1, 1, 0, 0, 1, 1, 0, 0>());
+
+  vs[0] = mm256Permute2f128Ps(ib, s0, s4, MaskHelper::permute<2, 0>());
+  vs[1] = mm256Permute2f128Ps(ib, s1, s5, MaskHelper::permute<2, 0>());
+  vs[2] = mm256Permute2f128Ps(ib, s2, s6, MaskHelper::permute<2, 0>());
+  vs[3] = mm256Permute2f128Ps(ib, s3, s7, MaskHelper::permute<2, 0>());
+  vs[4] = mm256Permute2f128Ps(ib, s0, s4, MaskHelper::permute<3, 1>());
+  vs[5] = mm256Permute2f128Ps(ib, s1, s5, MaskHelper::permute<3, 1>());
+  vs[6] = mm256Permute2f128Ps(ib, s2, s6, MaskHelper::permute<3, 1>());
+  vs[7] = mm256Permute2f128Ps(ib, s3, s7, MaskHelper::permute<3, 1>());
 }
 
 /// Rewrite avx2-specific 2-D vector.transpose, for the supported cases and

diff  --git a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp
index d38967c4c258b..1cf593bd4a4bc 100644
--- a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp
@@ -463,8 +463,10 @@ extern "C" void mlirAsyncRuntimePrintCurrentThreadId() {
 //   https://developercommunity.visualstudio.com/content/problem/475494/clexe-error-with-lambda-inside-function-templates.html
 // The bug is fixed in VS2019 16.1. Separating the declaration and definition is
 // a work around for older versions of Visual Studio.
+// NOLINTNEXTLINE(*-identifier-naming): externally called.
 extern "C" API void __mlir_runner_init(llvm::StringMap<void *> &exportSymbols);
 
+// NOLINTNEXTLINE(*-identifier-naming): externally called.
 void __mlir_runner_init(llvm::StringMap<void *> &exportSymbols) {
   auto exportSymbol = [&](llvm::StringRef name, auto ptr) {
     assert(exportSymbols.count(name) == 0 && "symbol already exists");
@@ -517,6 +519,7 @@ void __mlir_runner_init(llvm::StringMap<void *> &exportSymbols) {
                &mlir::runtime::mlirAsyncRuntimePrintCurrentThreadId);
 }
 
+// NOLINTNEXTLINE(*-identifier-naming): externally called.
 extern "C" API void __mlir_runner_destroy() { resetDefaultAsyncRuntime(); }
 
 } // namespace runtime

diff  --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp
index 19459f2310991..d0556e13cf3b7 100644
--- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -58,27 +58,27 @@ using llvm::orc::ThreadSafeModule;
 using llvm::orc::TMOwningSimpleCompiler;
 
 /// Wrap a string into an llvm::StringError.
-static Error make_string_error(const Twine &message) {
+static Error makeStringError(const Twine &message) {
   return llvm::make_error<StringError>(message.str(),
                                        llvm::inconvertibleErrorCode());
 }
 
-void SimpleObjectCache::notifyObjectCompiled(const Module *M,
-                                             MemoryBufferRef ObjBuffer) {
-  cachedObjects[M->getModuleIdentifier()] = MemoryBuffer::getMemBufferCopy(
-      ObjBuffer.getBuffer(), ObjBuffer.getBufferIdentifier());
+void SimpleObjectCache::notifyObjectCompiled(const Module *m,
+                                             MemoryBufferRef objBuffer) {
+  cachedObjects[m->getModuleIdentifier()] = MemoryBuffer::getMemBufferCopy(
+      objBuffer.getBuffer(), objBuffer.getBufferIdentifier());
 }
 
-std::unique_ptr<MemoryBuffer> SimpleObjectCache::getObject(const Module *M) {
-  auto I = cachedObjects.find(M->getModuleIdentifier());
-  if (I == cachedObjects.end()) {
-    LLVM_DEBUG(dbgs() << "No object for " << M->getModuleIdentifier()
+std::unique_ptr<MemoryBuffer> SimpleObjectCache::getObject(const Module *m) {
+  auto i = cachedObjects.find(m->getModuleIdentifier());
+  if (i == cachedObjects.end()) {
+    LLVM_DEBUG(dbgs() << "No object for " << m->getModuleIdentifier()
                       << " in cache. Compiling.\n");
     return nullptr;
   }
-  LLVM_DEBUG(dbgs() << "Object for " << M->getModuleIdentifier()
+  LLVM_DEBUG(dbgs() << "Object for " << m->getModuleIdentifier()
                     << " loaded from cache.\n");
-  return MemoryBuffer::getMemBuffer(I->second->getMemBufferRef());
+  return MemoryBuffer::getMemBuffer(i->second->getMemBufferRef());
 }
 
 void SimpleObjectCache::dumpToObjectFile(StringRef outputFilename) {
@@ -114,7 +114,8 @@ bool ExecutionEngine::setupTargetTriple(Module *llvmModule) {
   // Setup the machine properties from the current architecture.
   auto targetTriple = llvm::sys::getDefaultTargetTriple();
   std::string errorMessage;
-  auto target = llvm::TargetRegistry::lookupTarget(targetTriple, errorMessage);
+  const auto *target =
+      llvm::TargetRegistry::lookupTarget(targetTriple, errorMessage);
   if (!target) {
     errs() << "NO target: " << errorMessage << "\n";
     return true;
@@ -160,7 +161,7 @@ static void packFunctionArguments(Module *module) {
 
     // Given a function `foo(<...>)`, define the interface function
     // `mlir_foo(i8**)`.
-    auto newType = llvm::FunctionType::get(
+    auto *newType = llvm::FunctionType::get(
         builder.getVoidTy(), builder.getInt8PtrTy()->getPointerTo(),
         /*isVarArg=*/false);
     auto newName = makePackedFunctionName(func.getName());
@@ -170,7 +171,7 @@ static void packFunctionArguments(Module *module) {
 
     // Extract the arguments from the type-erased argument list and cast them to
     // the proper types.
-    auto bb = llvm::BasicBlock::Create(ctx);
+    auto *bb = llvm::BasicBlock::Create(ctx);
     bb->insertInto(interfaceFunc);
     builder.SetInsertPoint(bb);
     llvm::Value *argList = interfaceFunc->arg_begin();
@@ -237,7 +238,7 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
   auto llvmModule = llvmModuleBuilder ? llvmModuleBuilder(m, *ctx)
                                       : translateModuleToLLVMIR(m, *ctx);
   if (!llvmModule)
-    return make_string_error("could not convert to LLVM IR");
+    return makeStringError("could not convert to LLVM IR");
   // FIXME: the triple should be passed to the translation or dialect conversion
   // instead of this.  Currently, the LLVM module created above has no triple
   // associated with it.
@@ -249,7 +250,7 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
   // Callback to create the object layer with symbol resolution to current
   // process and dynamically linked libraries.
   auto objectLinkingLayerCreator = [&](ExecutionSession &session,
-                                       const Triple &TT) {
+                                       const Triple &tt) {
     auto objectLayer = std::make_unique<RTDyldObjectLinkingLayer>(
         session, []() { return std::make_unique<SectionMemoryManager>(); });
 
@@ -276,7 +277,7 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
                << "\nError: " << mb.getError().message() << "\n";
         continue;
       }
-      auto &JD = session.createBareJITDylib(std::string(libPath));
+      auto &jd = session.createBareJITDylib(std::string(libPath));
       auto loaded = DynamicLibrarySearchGenerator::Load(
           libPath.data(), dataLayout.getGlobalPrefix());
       if (!loaded) {
@@ -284,8 +285,8 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
                << "\n";
         continue;
       }
-      JD.addGenerator(std::move(*loaded));
-      cantFail(objectLayer->add(JD, std::move(mb.get())));
+      jd.addGenerator(std::move(*loaded));
+      cantFail(objectLayer->add(jd, std::move(mb.get())));
     }
 
     return objectLayer;
@@ -293,14 +294,14 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
 
   // Callback to inspect the cache and recompile on demand. This follows Lang's
   // LLJITWithObjectCache example.
-  auto compileFunctionCreator = [&](JITTargetMachineBuilder JTMB)
+  auto compileFunctionCreator = [&](JITTargetMachineBuilder jtmb)
       -> Expected<std::unique_ptr<IRCompileLayer::IRCompiler>> {
     if (jitCodeGenOptLevel)
-      JTMB.setCodeGenOptLevel(jitCodeGenOptLevel.getValue());
-    auto TM = JTMB.createTargetMachine();
-    if (!TM)
-      return TM.takeError();
-    return std::make_unique<TMOwningSimpleCompiler>(std::move(*TM),
+      jtmb.setCodeGenOptLevel(jitCodeGenOptLevel.getValue());
+    auto tm = jtmb.createTargetMachine();
+    if (!tm)
+      return tm.takeError();
+    return std::make_unique<TMOwningSimpleCompiler>(std::move(*tm),
                                                     engine->cache.get());
   };
 
@@ -350,13 +351,13 @@ Expected<void *> ExecutionEngine::lookup(StringRef name) const {
     llvm::raw_string_ostream os(errorMessage);
     llvm::handleAllErrors(expectedSymbol.takeError(),
                           [&os](llvm::ErrorInfoBase &ei) { ei.log(os); });
-    return make_string_error(os.str());
+    return makeStringError(os.str());
   }
 
   auto rawFPtr = expectedSymbol->getAddress();
-  auto fptr = reinterpret_cast<void *>(rawFPtr);
+  auto *fptr = reinterpret_cast<void *>(rawFPtr);
   if (!fptr)
-    return make_string_error("looked up function is null");
+    return makeStringError("looked up function is null");
   return fptr;
 }
 

diff  --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp
index e45e5c9391a65..37072981a481c 100644
--- a/mlir/lib/ExecutionEngine/JitRunner.cpp
+++ b/mlir/lib/ExecutionEngine/JitRunner.cpp
@@ -125,7 +125,7 @@ static OwningModuleRef parseMLIRInput(StringRef inputFilename,
   return OwningModuleRef(parseSourceFile(sourceMgr, context));
 }
 
-static inline Error make_string_error(const Twine &message) {
+static inline Error makeStringError(const Twine &message) {
   return llvm::make_error<llvm::StringError>(message.str(),
                                              llvm::inconvertibleErrorCode());
 }
@@ -239,7 +239,7 @@ static Error compileAndExecuteVoidFunction(Options &options, ModuleOp module,
                                            CompileAndExecuteConfig config) {
   auto mainFunction = module.lookupSymbol<LLVM::LLVMFuncOp>(entryPoint);
   if (!mainFunction || mainFunction.empty())
-    return make_string_error("entry point not found");
+    return makeStringError("entry point not found");
   void *empty = nullptr;
   return compileAndExecute(options, module, entryPoint, config, &empty);
 }
@@ -253,7 +253,7 @@ Error checkCompatibleReturnType<int32_t>(LLVM::LLVMFuncOp mainFunction) {
                         .getReturnType()
                         .dyn_cast<IntegerType>();
   if (!resultType || resultType.getWidth() != 32)
-    return make_string_error("only single i32 function result supported");
+    return makeStringError("only single i32 function result supported");
   return Error::success();
 }
 template <>
@@ -263,7 +263,7 @@ Error checkCompatibleReturnType<int64_t>(LLVM::LLVMFuncOp mainFunction) {
                         .getReturnType()
                         .dyn_cast<IntegerType>();
   if (!resultType || resultType.getWidth() != 64)
-    return make_string_error("only single i64 function result supported");
+    return makeStringError("only single i64 function result supported");
   return Error::success();
 }
 template <>
@@ -272,7 +272,7 @@ Error checkCompatibleReturnType<float>(LLVM::LLVMFuncOp mainFunction) {
            .cast<LLVM::LLVMFunctionType>()
            .getReturnType()
            .isa<Float32Type>())
-    return make_string_error("only single f32 function result supported");
+    return makeStringError("only single f32 function result supported");
   return Error::success();
 }
 template <typename Type>
@@ -281,10 +281,10 @@ Error compileAndExecuteSingleReturnFunction(Options &options, ModuleOp module,
                                             CompileAndExecuteConfig config) {
   auto mainFunction = module.lookupSymbol<LLVM::LLVMFuncOp>(entryPoint);
   if (!mainFunction || mainFunction.isExternal())
-    return make_string_error("entry point not found");
+    return makeStringError("entry point not found");
 
   if (mainFunction.getType().cast<LLVM::LLVMFunctionType>().getNumParams() != 0)
-    return make_string_error("function inputs not supported");
+    return makeStringError("function inputs not supported");
 
   if (Error error = checkCompatibleReturnType<Type>(mainFunction))
     return error;
@@ -384,7 +384,7 @@ int mlir::JitRunnerMain(int argc, char **argv, const DialectRegistry &registry,
                     ? compileAndExecuteFn(options, m.get(),
                                           options.mainFuncName.getValue(),
                                           compileAndExecuteConfig)
-                    : make_string_error("unsupported function type");
+                    : makeStringError("unsupported function type");
 
   int exitCode = EXIT_SUCCESS;
   llvm::handleAllErrors(std::move(error),

diff  --git a/mlir/lib/ExecutionEngine/RunnerUtils.cpp b/mlir/lib/ExecutionEngine/RunnerUtils.cpp
index 3a5f4713c673f..3abd00d0f06d6 100644
--- a/mlir/lib/ExecutionEngine/RunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/RunnerUtils.cpp
@@ -16,6 +16,8 @@
 #include "mlir/ExecutionEngine/RunnerUtils.h"
 #include <chrono>
 
+// NOLINTBEGIN(*-identifier-naming)
+
 extern "C" void
 _mlir_ciface_print_memref_shape_i8(UnrankedMemRefType<int8_t> *M) {
   std::cout << "Unranked Memref ";
@@ -163,3 +165,5 @@ extern "C" int64_t verifyMemRefF64(int64_t rank, void *actualPtr,
   UnrankedMemRefType<double> expectedDesc = {rank, expectedPtr};
   return _mlir_ciface_verifyMemRefF64(&actualDesc, &expectedDesc);
 }
+
+// NOLINTEND(*-identifier-naming)

diff  --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp
index 686dbe8dcc08b..b15d49fd41057 100644
--- a/mlir/lib/IR/AffineMap.cpp
+++ b/mlir/lib/IR/AffineMap.cpp
@@ -209,7 +209,7 @@ AffineMap AffineMap::getPermutationMap(ArrayRef<unsigned> permutation,
   SmallVector<AffineExpr, 4> affExprs;
   for (auto index : permutation)
     affExprs.push_back(getAffineDimExpr(index, context));
-  auto m = std::max_element(permutation.begin(), permutation.end());
+  const auto *m = std::max_element(permutation.begin(), permutation.end());
   auto permutationMap = AffineMap::get(*m + 1, 0, affExprs, context);
   assert(permutationMap.isPermutation() && "Invalid permutation vector");
   return permutationMap;

diff  --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp
index fe6893d2147fe..0b281069823c6 100644
--- a/mlir/lib/IR/AsmPrinter.cpp
+++ b/mlir/lib/IR/AsmPrinter.cpp
@@ -1105,7 +1105,7 @@ void SSANameState::getResultIDAndNumber(OpResult result, Value &lookupValue,
 
   // Find the correct index using a binary search, as the groups are ordered.
   ArrayRef<int> resultGroups = resultGroupIt->second;
-  auto it = llvm::upper_bound(resultGroups, resultNo);
+  const auto *it = llvm::upper_bound(resultGroups, resultNo);
   int groupResultNo = 0, groupSize = 0;
 
   // If there are no smaller elements, the last result group is the lookup.
@@ -1240,8 +1240,8 @@ class AsmPrinter::Impl {
   raw_ostream &getStream() { return os; }
 
   template <typename Container, typename UnaryFunctor>
-  inline void interleaveComma(const Container &c, UnaryFunctor each_fn) const {
-    llvm::interleaveComma(c, os, each_fn);
+  inline void interleaveComma(const Container &c, UnaryFunctor eachFn) const {
+    llvm::interleaveComma(c, os, eachFn);
   }
 
   /// This enum describes the 
diff erent kinds of elision for the type of an

diff  --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp
index 15c1d2096eed3..a138ec5d9cef0 100644
--- a/mlir/lib/IR/Block.cpp
+++ b/mlir/lib/IR/Block.cpp
@@ -316,7 +316,7 @@ Block *Block::getUniquePredecessor() {
 Block *Block::splitBlock(iterator splitBefore) {
   // Start by creating a new basic block, and insert it immediate after this
   // one in the containing region.
-  auto newBB = new Block();
+  auto *newBB = new Block();
   getParent()->getBlocks().insert(std::next(Region::iterator(this)), newBB);
 
   // Move all of the operations from the split point to the end of the region

diff  --git a/mlir/lib/IR/BuiltinAttributes.cpp b/mlir/lib/IR/BuiltinAttributes.cpp
index 0b2970b120ee5..802df2dada9d0 100644
--- a/mlir/lib/IR/BuiltinAttributes.cpp
+++ b/mlir/lib/IR/BuiltinAttributes.cpp
@@ -121,10 +121,10 @@ findDuplicateElement(ArrayRef<NamedAttribute> value) {
   if (value.size() == 2)
     return value[0].getName() == value[1].getName() ? value[0] : none;
 
-  auto it = std::adjacent_find(value.begin(), value.end(),
-                               [](NamedAttribute l, NamedAttribute r) {
-                                 return l.getName() == r.getName();
-                               });
+  const auto *it = std::adjacent_find(value.begin(), value.end(),
+                                      [](NamedAttribute l, NamedAttribute r) {
+                                        return l.getName() == r.getName();
+                                      });
   return it != value.end() ? *it : none;
 }
 

diff  --git a/mlir/lib/IR/MLIRContext.cpp b/mlir/lib/IR/MLIRContext.cpp
index a670d9e42618b..666ce3013f984 100644
--- a/mlir/lib/IR/MLIRContext.cpp
+++ b/mlir/lib/IR/MLIRContext.cpp
@@ -44,7 +44,6 @@ using namespace mlir;
 using namespace mlir::detail;
 
 using llvm::hash_combine;
-using llvm::hash_combine_range;
 
 //===----------------------------------------------------------------------===//
 // MLIRContext CommandLine Options

diff  --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index dc224f21d8348..888cab9eb5683 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -349,28 +349,28 @@ void Operation::updateOrderIfNecessary() {
 
 auto llvm::ilist_detail::SpecificNodeAccess<
     typename llvm::ilist_detail::compute_node_options<
-        ::mlir::Operation>::type>::getNodePtr(pointer N) -> node_type * {
-  return NodeAccess::getNodePtr<OptionsT>(N);
+        ::mlir::Operation>::type>::getNodePtr(pointer n) -> node_type * {
+  return NodeAccess::getNodePtr<OptionsT>(n);
 }
 
 auto llvm::ilist_detail::SpecificNodeAccess<
     typename llvm::ilist_detail::compute_node_options<
-        ::mlir::Operation>::type>::getNodePtr(const_pointer N)
+        ::mlir::Operation>::type>::getNodePtr(const_pointer n)
     -> const node_type * {
-  return NodeAccess::getNodePtr<OptionsT>(N);
+  return NodeAccess::getNodePtr<OptionsT>(n);
 }
 
 auto llvm::ilist_detail::SpecificNodeAccess<
     typename llvm::ilist_detail::compute_node_options<
-        ::mlir::Operation>::type>::getValuePtr(node_type *N) -> pointer {
-  return NodeAccess::getValuePtr<OptionsT>(N);
+        ::mlir::Operation>::type>::getValuePtr(node_type *n) -> pointer {
+  return NodeAccess::getValuePtr<OptionsT>(n);
 }
 
 auto llvm::ilist_detail::SpecificNodeAccess<
     typename llvm::ilist_detail::compute_node_options<
-        ::mlir::Operation>::type>::getValuePtr(const node_type *N)
+        ::mlir::Operation>::type>::getValuePtr(const node_type *n)
     -> const_pointer {
-  return NodeAccess::getValuePtr<OptionsT>(N);
+  return NodeAccess::getValuePtr<OptionsT>(n);
 }
 
 void llvm::ilist_traits<::mlir::Operation>::deleteNode(Operation *op) {
@@ -378,9 +378,9 @@ void llvm::ilist_traits<::mlir::Operation>::deleteNode(Operation *op) {
 }
 
 Block *llvm::ilist_traits<::mlir::Operation>::getContainingBlock() {
-  size_t Offset(size_t(&((Block *)nullptr->*Block::getSublistAccess(nullptr))));
-  iplist<Operation> *Anchor(static_cast<iplist<Operation> *>(this));
-  return reinterpret_cast<Block *>(reinterpret_cast<char *>(Anchor) - Offset);
+  size_t offset(size_t(&((Block *)nullptr->*Block::getSublistAccess(nullptr))));
+  iplist<Operation> *anchor(static_cast<iplist<Operation> *>(this));
+  return reinterpret_cast<Block *>(reinterpret_cast<char *>(anchor) - offset);
 }
 
 /// This is a trait method invoked when an operation is added to a block.  We
@@ -1024,8 +1024,7 @@ LogicalResult OpTrait::impl::verifyNoRegionArguments(Operation *op) {
       if (op->getNumRegions() > 1)
         return op->emitOpError("region #")
                << region.getRegionNumber() << " should have no arguments";
-      else
-        return op->emitOpError("region should have no arguments");
+      return op->emitOpError("region should have no arguments");
     }
   }
   return success();

diff  --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp
index dd76cf6dd6ae3..747d27ad1b8d0 100644
--- a/mlir/lib/IR/OperationSupport.cpp
+++ b/mlir/lib/IR/OperationSupport.cpp
@@ -34,8 +34,8 @@ NamedAttrList::NamedAttrList(DictionaryAttr attributes)
   dictionarySorted.setPointerAndInt(attributes, true);
 }
 
-NamedAttrList::NamedAttrList(const_iterator in_start, const_iterator in_end) {
-  assign(in_start, in_end);
+NamedAttrList::NamedAttrList(const_iterator inStart, const_iterator inEnd) {
+  assign(inStart, inEnd);
 }
 
 ArrayRef<NamedAttribute> NamedAttrList::getAttrs() const { return attrs; }
@@ -66,8 +66,8 @@ void NamedAttrList::append(StringRef name, Attribute attr) {
 }
 
 /// Replaces the attributes with new list of attributes.
-void NamedAttrList::assign(const_iterator in_start, const_iterator in_end) {
-  DictionaryAttr::sort(ArrayRef<NamedAttribute>{in_start, in_end}, attrs);
+void NamedAttrList::assign(const_iterator inStart, const_iterator inEnd) {
+  DictionaryAttr::sort(ArrayRef<NamedAttribute>{inStart, inEnd}, attrs);
   dictionarySorted.setPointerAndInt(nullptr, true);
 }
 

diff  --git a/mlir/lib/IR/Region.cpp b/mlir/lib/IR/Region.cpp
index 698e40582c92e..161470de8554a 100644
--- a/mlir/lib/IR/Region.cpp
+++ b/mlir/lib/IR/Region.cpp
@@ -152,10 +152,10 @@ void Region::dropAllReferences() {
 }
 
 Region *llvm::ilist_traits<::mlir::Block>::getParentRegion() {
-  size_t Offset(
+  size_t offset(
       size_t(&((Region *)nullptr->*Region::getSublistAccess(nullptr))));
-  iplist<Block> *Anchor(static_cast<iplist<Block> *>(this));
-  return reinterpret_cast<Region *>(reinterpret_cast<char *>(Anchor) - Offset);
+  iplist<Block> *anchor(static_cast<iplist<Block> *>(this));
+  return reinterpret_cast<Region *>(reinterpret_cast<char *>(anchor) - offset);
 }
 
 /// This is a trait method invoked when a basic block is added to a region.

diff  --git a/mlir/lib/Interfaces/SideEffectInterfaces.cpp b/mlir/lib/Interfaces/SideEffectInterfaces.cpp
index c02bc0c9f5889..e469dde68e7f8 100644
--- a/mlir/lib/Interfaces/SideEffectInterfaces.cpp
+++ b/mlir/lib/Interfaces/SideEffectInterfaces.cpp
@@ -76,9 +76,9 @@ static bool wouldOpBeTriviallyDeadImpl(Operation *rootOp) {
 
       // Otherwise, if the op has recursive side effects we can treat the
       // operation itself as having no effects.
-    } else if (hasRecursiveEffects) {
-      continue;
     }
+    if (hasRecursiveEffects)
+      continue;
 
     // If there were no effect interfaces, we treat this op as conservatively
     // having effects.

diff  --git a/mlir/lib/Parser/AffineParser.cpp b/mlir/lib/Parser/AffineParser.cpp
index e8c4a02c21ce9..1ba2ad5c7a2da 100644
--- a/mlir/lib/Parser/AffineParser.cpp
+++ b/mlir/lib/Parser/AffineParser.cpp
@@ -525,13 +525,14 @@ ParseResult AffineParser::parseAffineMapOrIntegerSetInline(AffineMap &map,
   bool isColon = getToken().is(Token::colon);
   if (!isArrow && !isColon) {
     return emitError("expected '->' or ':'");
-  } else if (isArrow) {
+  }
+  if (isArrow) {
     parseToken(Token::arrow, "expected '->' or '['");
     map = parseAffineMapRange(numDims, numSymbols);
     return map ? success() : failure();
-  } else if (parseToken(Token::colon, "expected ':' or '['")) {
-    return failure();
   }
+  if (parseToken(Token::colon, "expected ':' or '['"))
+    return failure();
 
   if ((set = parseIntegerSetConstraints(numDims, numSymbols)))
     return success();

diff  --git a/mlir/lib/Pass/Pass.cpp b/mlir/lib/Pass/Pass.cpp
index c7bbf4a9b2a78..911753650d5eb 100644
--- a/mlir/lib/Pass/Pass.cpp
+++ b/mlir/lib/Pass/Pass.cpp
@@ -358,8 +358,8 @@ LogicalResult OpToOpPassAdaptor::run(Pass *pass, Operation *op,
   PassInstrumentor *pi = am.getPassInstrumentor();
   PassInstrumentation::PipelineParentInfo parentInfo = {llvm::get_threadid(),
                                                         pass};
-  auto dynamic_pipeline_callback = [&](OpPassManager &pipeline,
-                                       Operation *root) -> LogicalResult {
+  auto dynamicPipelineCallback = [&](OpPassManager &pipeline,
+                                     Operation *root) -> LogicalResult {
     if (!op->isAncestor(root))
       return root->emitOpError()
              << "Trying to schedule a dynamic pipeline on an "
@@ -379,7 +379,7 @@ LogicalResult OpToOpPassAdaptor::run(Pass *pass, Operation *op,
                                           verifyPasses, parentInitGeneration,
                                           pi, &parentInfo);
   };
-  pass->passState.emplace(op, am, dynamic_pipeline_callback);
+  pass->passState.emplace(op, am, dynamicPipelineCallback);
 
   // Instrument before the pass has run.
   if (pi)
@@ -437,7 +437,7 @@ LogicalResult OpToOpPassAdaptor::runPipeline(
     const PassInstrumentation::PipelineParentInfo *parentInfo) {
   assert((!instrumentor || parentInfo) &&
          "expected parent info if instrumentor is provided");
-  auto scope_exit = llvm::make_scope_exit([&] {
+  auto scopeExit = llvm::make_scope_exit([&] {
     // Clear out any computed operation analyses. These analyses won't be used
     // any more in this pipeline, and this helps reduce the current working set
     // of memory. If preserving these analyses becomes important in the future
@@ -460,7 +460,7 @@ LogicalResult OpToOpPassAdaptor::runPipeline(
 /// type, or nullptr if one does not exist.
 static OpPassManager *findPassManagerFor(MutableArrayRef<OpPassManager> mgrs,
                                          StringRef name) {
-  auto it = llvm::find_if(
+  auto *it = llvm::find_if(
       mgrs, [&](OpPassManager &mgr) { return mgr.getOpName() == name; });
   return it == mgrs.end() ? nullptr : &*it;
 }
@@ -470,7 +470,7 @@ static OpPassManager *findPassManagerFor(MutableArrayRef<OpPassManager> mgrs,
 static OpPassManager *findPassManagerFor(MutableArrayRef<OpPassManager> mgrs,
                                          StringAttr name,
                                          MLIRContext &context) {
-  auto it = llvm::find_if(
+  auto *it = llvm::find_if(
       mgrs, [&](OpPassManager &mgr) { return mgr.getOpName(context) == name; });
   return it == mgrs.end() ? nullptr : &*it;
 }

diff  --git a/mlir/lib/TableGen/Attribute.cpp b/mlir/lib/TableGen/Attribute.cpp
index 5bc618c5e2094..630a9205fc6c4 100644
--- a/mlir/lib/TableGen/Attribute.cpp
+++ b/mlir/lib/TableGen/Attribute.cpp
@@ -253,7 +253,7 @@ StringRef StructFieldAttr::getName() const {
 }
 
 Attribute StructFieldAttr::getType() const {
-  auto init = def->getValueInit("type");
+  auto *init = def->getValueInit("type");
   return Attribute(cast<llvm::DefInit>(init));
 }
 

diff  --git a/mlir/lib/TableGen/Dialect.cpp b/mlir/lib/TableGen/Dialect.cpp
index 6970a7f8276a8..57586dd64d07e 100644
--- a/mlir/lib/TableGen/Dialect.cpp
+++ b/mlir/lib/TableGen/Dialect.cpp
@@ -38,7 +38,7 @@ std::string Dialect::getCppClassName() const {
 
 static StringRef getAsStringOrEmpty(const llvm::Record &record,
                                     StringRef fieldName) {
-  if (auto valueInit = record.getValueInit(fieldName)) {
+  if (auto *valueInit = record.getValueInit(fieldName)) {
     if (llvm::isa<llvm::StringInit>(valueInit))
       return record.getValueAsString(fieldName);
   }

diff  --git a/mlir/lib/TableGen/Operator.cpp b/mlir/lib/TableGen/Operator.cpp
index fb2c0c79f4069..f1c1fe5346661 100644
--- a/mlir/lib/TableGen/Operator.cpp
+++ b/mlir/lib/TableGen/Operator.cpp
@@ -346,10 +346,9 @@ void Operator::populateTypeInferenceInfo(
       if (getArg(*mi).is<NamedAttribute *>()) {
         // TODO: Handle attributes.
         continue;
-      } else {
-        resultTypeMapping[i].emplace_back(*mi);
-        found = true;
       }
+      resultTypeMapping[i].emplace_back(*mi);
+      found = true;
     }
     return found;
   };

diff  --git a/mlir/lib/TableGen/Pattern.cpp b/mlir/lib/TableGen/Pattern.cpp
index 148ca49e65e07..b459af368fc53 100644
--- a/mlir/lib/TableGen/Pattern.cpp
+++ b/mlir/lib/TableGen/Pattern.cpp
@@ -649,7 +649,7 @@ std::vector<AppliedConstraint> Pattern::getConstraints() const {
   std::vector<AppliedConstraint> ret;
   ret.reserve(listInit->size());
 
-  for (auto it : *listInit) {
+  for (auto *it : *listInit) {
     auto *dagInit = dyn_cast<llvm::DagInit>(it);
     if (!dagInit)
       PrintFatalError(&def, "all elements in Pattern multi-entity "

diff  --git a/mlir/lib/TableGen/Predicate.cpp b/mlir/lib/TableGen/Predicate.cpp
index 554b6627247fd..7238a866a4461 100644
--- a/mlir/lib/TableGen/Predicate.cpp
+++ b/mlir/lib/TableGen/Predicate.cpp
@@ -188,7 +188,7 @@ buildPredicateTree(const Pred &root,
   // Build child subtrees.
   auto combined = static_cast<const CombinedPred &>(root);
   for (const auto *record : combined.getChildren()) {
-    auto childTree =
+    auto *childTree =
         buildPredicateTree(Pred(record), allocator, allSubstitutions);
     rootNode->children.push_back(childTree);
   }
@@ -241,7 +241,7 @@ propagateGroundTruth(PredNode *node,
 
   for (auto &child : children) {
     // First, simplify the child.  This maintains the predicate as it was.
-    auto simplifiedChild =
+    auto *simplifiedChild =
         propagateGroundTruth(child, knownTruePreds, knownFalsePreds);
 
     // Just add the child if we don't know how to simplify the current node.
@@ -273,8 +273,9 @@ propagateGroundTruth(PredNode *node,
       node->kind = collapseKind;
       node->children.clear();
       return node;
-    } else if (simplifiedChild->kind == eraseKind ||
-               eraseList.count(simplifiedChild->predicate) != 0) {
+    }
+    if (simplifiedChild->kind == eraseKind ||
+        eraseList.count(simplifiedChild->predicate) != 0) {
       continue;
     }
     node->children.push_back(simplifiedChild);
@@ -350,7 +351,7 @@ static std::string getCombinedCondition(const PredNode &root) {
 
 std::string CombinedPred::getConditionImpl() const {
   llvm::SpecificBumpPtrAllocator<PredNode> allocator;
-  auto predicateTree = buildPredicateTree(*this, allocator, {});
+  auto *predicateTree = buildPredicateTree(*this, allocator, {});
   predicateTree =
       propagateGroundTruth(predicateTree,
                            /*knownTruePreds=*/llvm::SmallPtrSet<Pred *, 2>(),

diff  --git a/mlir/lib/TableGen/Trait.cpp b/mlir/lib/TableGen/Trait.cpp
index 02bb4d4de64af..4e28e9987e752 100644
--- a/mlir/lib/TableGen/Trait.cpp
+++ b/mlir/lib/TableGen/Trait.cpp
@@ -26,7 +26,7 @@ using namespace mlir::tblgen;
 //===----------------------------------------------------------------------===//
 
 Trait Trait::create(const llvm::Init *init) {
-  auto def = cast<llvm::DefInit>(init)->getDef();
+  auto *def = cast<llvm::DefInit>(init)->getDef();
   if (def->isSubClassOf("PredTrait"))
     return Trait(Kind::Pred, def);
   if (def->isSubClassOf("GenInternalTrait"))

diff  --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index e8adf0d013496..65f0fc9b38499 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -61,7 +61,7 @@ class Importer {
   LogicalResult processFunction(llvm::Function *f);
 
   /// Imports GV as a GlobalOp, creating it if it doesn't exist.
-  GlobalOp processGlobal(llvm::GlobalVariable *GV);
+  GlobalOp processGlobal(llvm::GlobalVariable *gv);
 
 private:
   /// Returns personality of `f` as a FlatSymbolRefAttr.
@@ -145,7 +145,8 @@ Location Importer::processDebugLoc(const llvm::DebugLoc &loc,
     os << "llvm-imported-inst-%";
     inst->printAsOperand(os, /*PrintType=*/false);
     return FileLineColLoc::get(context, os.str(), 0, 0);
-  } else if (!loc) {
+  }
+  if (!loc) {
     return unknownLoc;
   }
   // FIXME: Obtain the filename from DILocationInfo.
@@ -304,47 +305,47 @@ Attribute Importer::getConstantAsAttr(llvm::Constant *value) {
   return nullptr;
 }
 
-GlobalOp Importer::processGlobal(llvm::GlobalVariable *GV) {
-  auto it = globals.find(GV);
+GlobalOp Importer::processGlobal(llvm::GlobalVariable *gv) {
+  auto it = globals.find(gv);
   if (it != globals.end())
     return it->second;
 
   OpBuilder b(module.getBody(), getGlobalInsertPt());
   Attribute valueAttr;
-  if (GV->hasInitializer())
-    valueAttr = getConstantAsAttr(GV->getInitializer());
-  Type type = processType(GV->getValueType());
+  if (gv->hasInitializer())
+    valueAttr = getConstantAsAttr(gv->getInitializer());
+  Type type = processType(gv->getValueType());
   if (!type)
     return nullptr;
 
   uint64_t alignment = 0;
-  llvm::MaybeAlign maybeAlign = GV->getAlign();
+  llvm::MaybeAlign maybeAlign = gv->getAlign();
   if (maybeAlign.hasValue()) {
     llvm::Align align = maybeAlign.getValue();
     alignment = align.value();
   }
 
   GlobalOp op =
-      b.create<GlobalOp>(UnknownLoc::get(context), type, GV->isConstant(),
-                         convertLinkageFromLLVM(GV->getLinkage()),
-                         GV->getName(), valueAttr, alignment);
+      b.create<GlobalOp>(UnknownLoc::get(context), type, gv->isConstant(),
+                         convertLinkageFromLLVM(gv->getLinkage()),
+                         gv->getName(), valueAttr, alignment);
 
-  if (GV->hasInitializer() && !valueAttr) {
+  if (gv->hasInitializer() && !valueAttr) {
     Region &r = op.getInitializerRegion();
     currentEntryBlock = b.createBlock(&r);
     b.setInsertionPoint(currentEntryBlock, currentEntryBlock->begin());
-    Value v = processConstant(GV->getInitializer());
+    Value v = processConstant(gv->getInitializer());
     if (!v)
       return nullptr;
     b.create<ReturnOp>(op.getLoc(), ArrayRef<Value>({v}));
   }
-  if (GV->hasAtLeastLocalUnnamedAddr())
+  if (gv->hasAtLeastLocalUnnamedAddr())
     op.setUnnamedAddrAttr(UnnamedAddrAttr::get(
-        context, convertUnnamedAddrFromLLVM(GV->getUnnamedAddr())));
-  if (GV->hasSection())
-    op.setSectionAttr(b.getStringAttr(GV->getSection()));
+        context, convertUnnamedAddrFromLLVM(gv->getUnnamedAddr())));
+  if (gv->hasSection())
+    op.setSectionAttr(b.getStringAttr(gv->getSection()));
 
-  return globals[GV] = op;
+  return globals[gv] = op;
 }
 
 Value Importer::processConstant(llvm::Constant *c) {
@@ -366,9 +367,9 @@ Value Importer::processConstant(llvm::Constant *c) {
       return nullptr;
     return instMap[c] = bEntry.create<NullOp>(unknownLoc, type);
   }
-  if (auto *GV = dyn_cast<llvm::GlobalVariable>(c))
+  if (auto *gv = dyn_cast<llvm::GlobalVariable>(c))
     return bEntry.create<AddressOfOp>(UnknownLoc::get(context),
-                                      processGlobal(GV));
+                                      processGlobal(gv));
 
   if (auto *ce = dyn_cast<llvm::ConstantExpr>(c)) {
     llvm::Instruction *i = ce->getAsInstruction();
@@ -526,8 +527,8 @@ LogicalResult
 Importer::processBranchArgs(llvm::Instruction *br, llvm::BasicBlock *target,
                             SmallVectorImpl<Value> &blockArguments) {
   for (auto inst = target->begin(); isa<llvm::PHINode>(inst); ++inst) {
-    auto *PN = cast<llvm::PHINode>(&*inst);
-    Value value = processValue(PN->getIncomingValueForBlock(br->getParent()));
+    auto *pn = cast<llvm::PHINode>(&*inst);
+    Value value = processValue(pn->getIncomingValueForBlock(br->getParent()));
     if (!value)
       return failure();
     blockArguments.push_back(value);
@@ -777,10 +778,10 @@ FlatSymbolRefAttr Importer::getPersonalityAsAttr(llvm::Function *f) {
 
   // If it doesn't have a name, currently, only function pointers that are
   // bitcast to i8* are parsed.
-  if (auto ce = dyn_cast<llvm::ConstantExpr>(pf)) {
+  if (auto *ce = dyn_cast<llvm::ConstantExpr>(pf)) {
     if (ce->getOpcode() == llvm::Instruction::BitCast &&
         ce->getType() == llvm::Type::getInt8PtrTy(f->getContext())) {
-      if (auto func = dyn_cast<llvm::Function>(ce->getOperand(0)))
+      if (auto *func = dyn_cast<llvm::Function>(ce->getOperand(0)))
         return SymbolRefAttr::get(b.getContext(), func->getName());
     }
   }

diff  --git a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
index c8dd7fe5c7569..e4196aeca50a9 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp
@@ -55,12 +55,11 @@ static llvm::Constant *createSourceLocStrFromLocation(Location loc,
     unsigned lineNo = fileLoc.getLine();
     unsigned colNo = fileLoc.getColumn();
     return builder.getOrCreateSrcLocStr(name, fileName, lineNo, colNo);
-  } else {
-    std::string locStr;
-    llvm::raw_string_ostream locOS(locStr);
-    locOS << loc;
-    return builder.getOrCreateSrcLocStr(locOS.str());
   }
+  std::string locStr;
+  llvm::raw_string_ostream locOS(locStr);
+  locOS << loc;
+  return builder.getOrCreateSrcLocStr(locOS.str());
 }
 
 /// Create the location struct from the operation location information.
@@ -81,9 +80,8 @@ static llvm::Constant *createMappingInformation(Location loc,
   if (auto nameLoc = loc.dyn_cast<NameLoc>()) {
     StringRef name = nameLoc.getName();
     return createSourceLocStrFromLocation(nameLoc.getChildLoc(), builder, name);
-  } else {
-    return createSourceLocStrFromLocation(loc, builder, "unknown");
   }
+  return createSourceLocStrFromLocation(loc, builder, "unknown");
 }
 
 /// Return the runtime function used to lower the given operation.

diff  --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index fdd3656069306..581065f7cd0a7 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -861,11 +861,11 @@ convertOmpWsLoop(Operation &opInst, llvm::IRBuilderBase &builder,
 }
 
 // Convert an Atomic Ordering attribute to llvm::AtomicOrdering.
-llvm::AtomicOrdering convertAtomicOrdering(Optional<StringRef> AOAttr) {
-  if (!AOAttr.hasValue())
+llvm::AtomicOrdering convertAtomicOrdering(Optional<StringRef> aoAttr) {
+  if (!aoAttr.hasValue())
     return llvm::AtomicOrdering::Monotonic; // Default Memory Ordering
 
-  return StringSwitch<llvm::AtomicOrdering>(AOAttr.getValue())
+  return StringSwitch<llvm::AtomicOrdering>(aoAttr.getValue())
       .Case("seq_cst", llvm::AtomicOrdering::SequentiallyConsistent)
       .Case("acq_rel", llvm::AtomicOrdering::AcquireRelease)
       .Case("acquire", llvm::AtomicOrdering::Acquire)
@@ -889,7 +889,7 @@ convertOmpAtomicRead(Operation &opInst, llvm::IRBuilderBase &builder,
       moduleTranslation.translateLoc(opInst.getLoc(), subprogram);
   llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder.saveIP(),
                                                     llvm::DebugLoc(diLoc));
-  llvm::AtomicOrdering AO = convertAtomicOrdering(readOp.memory_order());
+  llvm::AtomicOrdering ao = convertAtomicOrdering(readOp.memory_order());
   llvm::Value *address = moduleTranslation.lookupValue(readOp.address());
   llvm::OpenMPIRBuilder::InsertPointTy currentIP = builder.saveIP();
 
@@ -903,9 +903,9 @@ convertOmpAtomicRead(Operation &opInst, llvm::IRBuilderBase &builder,
 
   // Restore the IP and insert Atomic Read.
   builder.restoreIP(currentIP);
-  llvm::OpenMPIRBuilder::AtomicOpValue V = {v, false, false};
-  llvm::OpenMPIRBuilder::AtomicOpValue X = {address, false, false};
-  builder.restoreIP(ompBuilder->createAtomicRead(ompLoc, X, V, AO));
+  llvm::OpenMPIRBuilder::AtomicOpValue atomicV = {v, false, false};
+  llvm::OpenMPIRBuilder::AtomicOpValue x = {address, false, false};
+  builder.restoreIP(ompBuilder->createAtomicRead(ompLoc, x, atomicV, ao));
   return success();
 }
 

diff  --git a/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp
index 09519aebfbfac..5bc02dc552709 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp
@@ -29,17 +29,17 @@ using mlir::LLVM::detail::createIntrinsicCall;
 // take a single int32 argument. It is likely that the interface of this
 // function will change to make it more generic.
 static llvm::Value *createDeviceFunctionCall(llvm::IRBuilderBase &builder,
-                                             StringRef fn_name, int parameter) {
+                                             StringRef fnName, int parameter) {
   llvm::Module *module = builder.GetInsertBlock()->getModule();
-  llvm::FunctionType *function_type = llvm::FunctionType::get(
+  llvm::FunctionType *functionType = llvm::FunctionType::get(
       llvm::Type::getInt64Ty(module->getContext()), // return type.
       llvm::Type::getInt32Ty(module->getContext()), // parameter type.
       false);                                       // no variadic arguments.
   llvm::Function *fn = dyn_cast<llvm::Function>(
-      module->getOrInsertFunction(fn_name, function_type).getCallee());
-  llvm::Value *fn_op0 = llvm::ConstantInt::get(
+      module->getOrInsertFunction(fnName, functionType).getCallee());
+  llvm::Value *fnOp0 = llvm::ConstantInt::get(
       llvm::Type::getInt32Ty(module->getContext()), parameter);
-  return builder.CreateCall(fn, ArrayRef<llvm::Value *>(fn_op0));
+  return builder.CreateCall(fn, ArrayRef<llvm::Value *>(fnOp0));
 }
 
 namespace {

diff  --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 0759340f4824e..7f238afd2c927 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -242,10 +242,10 @@ llvm::Constant *mlir::LLVM::detail::getLLVMConstant(
     if (auto *arrayTy = dyn_cast<llvm::ArrayType>(llvmType)) {
       elementType = arrayTy->getElementType();
       numElements = arrayTy->getNumElements();
-    } else if (auto fVectorTy = dyn_cast<llvm::FixedVectorType>(llvmType)) {
+    } else if (auto *fVectorTy = dyn_cast<llvm::FixedVectorType>(llvmType)) {
       elementType = fVectorTy->getElementType();
       numElements = fVectorTy->getNumElements();
-    } else if (auto sVectorTy = dyn_cast<llvm::ScalableVectorType>(llvmType)) {
+    } else if (auto *sVectorTy = dyn_cast<llvm::ScalableVectorType>(llvmType)) {
       elementType = sVectorTy->getElementType();
       numElements = sVectorTy->getMinNumElements();
     } else {

diff  --git a/mlir/lib/Tools/PDLL/Parser/Parser.cpp b/mlir/lib/Tools/PDLL/Parser/Parser.cpp
index 58060075cb1fd..4f98bfd08580f 100644
--- a/mlir/lib/Tools/PDLL/Parser/Parser.cpp
+++ b/mlir/lib/Tools/PDLL/Parser/Parser.cpp
@@ -1525,7 +1525,7 @@ FailureOr<ast::Type> Parser::validateMemberAccess(ast::Expr *parentExpr,
 
     // Handle named results.
     auto elementNames = tupleType.getElementNames();
-    auto it = llvm::find(elementNames, name);
+    const auto *it = llvm::find(elementNames, name);
     if (it != elementNames.end())
       return tupleType.getElementTypes()[it - elementNames.begin()];
   }

diff  --git a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
index 83c00f05c45dd..88aa4626b42f1 100644
--- a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
+++ b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
@@ -133,7 +133,7 @@ static bool isDefOrUse(const AsmParserState::SMDefinition &def, llvm::SMLoc loc,
   }
 
   // Check the uses.
-  auto useIt = llvm::find_if(def.uses, [&](const llvm::SMRange &range) {
+  const auto *useIt = llvm::find_if(def.uses, [&](const llvm::SMRange &range) {
     return contains(range, loc);
   });
   if (useIt != def.uses.end()) {

diff  --git a/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp b/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp
index d895432fa156a..53e29f5284022 100644
--- a/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp
+++ b/mlir/lib/Tools/mlir-reduce/MlirReduceMain.cpp
@@ -42,20 +42,20 @@ LogicalResult mlir::mlirReduceMain(int argc, char **argv,
                                    MLIRContext &context) {
   // Override the default '-h' and use the default PrintHelpMessage() which
   // won't print options in categories.
-  static llvm::cl::opt<bool> Help("h", llvm::cl::desc("Alias for -help"),
+  static llvm::cl::opt<bool> help("h", llvm::cl::desc("Alias for -help"),
                                   llvm::cl::Hidden);
 
-  static llvm::cl::OptionCategory MLIRReduceCategory("mlir-reduce options");
+  static llvm::cl::OptionCategory mlirReduceCategory("mlir-reduce options");
 
   static llvm::cl::opt<std::string> inputFilename(
       llvm::cl::Positional, llvm::cl::desc("<input file>"),
-      llvm::cl::cat(MLIRReduceCategory));
+      llvm::cl::cat(mlirReduceCategory));
 
   static llvm::cl::opt<std::string> outputFilename(
       "o", llvm::cl::desc("Output filename for the reduced test case"),
-      llvm::cl::init("-"), llvm::cl::cat(MLIRReduceCategory));
+      llvm::cl::init("-"), llvm::cl::cat(mlirReduceCategory));
 
-  llvm::cl::HideUnrelatedOptions(MLIRReduceCategory);
+  llvm::cl::HideUnrelatedOptions(mlirReduceCategory);
 
   llvm::InitLLVM y(argc, argv);
 
@@ -65,7 +65,7 @@ LogicalResult mlir::mlirReduceMain(int argc, char **argv,
   llvm::cl::ParseCommandLineOptions(argc, argv,
                                     "MLIR test case reduction tool.\n");
 
-  if (Help) {
+  if (help) {
     llvm::cl::PrintHelpMessage();
     return success();
   }

diff  --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp
index 1a3de13177d7e..801268efdddbe 100644
--- a/mlir/lib/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Transforms/LoopFusion.cpp
@@ -301,14 +301,15 @@ struct MemRefDependenceGraph {
       memrefEdgeCount[value]--;
     }
     // Remove 'srcId' from 'inEdges[dstId]'.
-    for (auto it = inEdges[dstId].begin(); it != inEdges[dstId].end(); ++it) {
+    for (auto *it = inEdges[dstId].begin(); it != inEdges[dstId].end(); ++it) {
       if ((*it).id == srcId && (*it).value == value) {
         inEdges[dstId].erase(it);
         break;
       }
     }
     // Remove 'dstId' from 'outEdges[srcId]'.
-    for (auto it = outEdges[srcId].begin(); it != outEdges[srcId].end(); ++it) {
+    for (auto *it = outEdges[srcId].begin(); it != outEdges[srcId].end();
+         ++it) {
       if ((*it).id == dstId && (*it).value == value) {
         outEdges[srcId].erase(it);
         break;

diff  --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index 71a24104c71ab..15462c95357d8 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -85,7 +85,7 @@ LogicalResult mlir::moveLoopInvariantCode(LoopLikeOpInterface looplike) {
 
   // Helper to check whether an operation is loop invariant wrt. SSA properties.
   auto isDefinedOutsideOfBody = [&](Value value) {
-    auto definingOp = value.getDefiningOp();
+    auto *definingOp = value.getDefiningOp();
     return (definingOp && !!willBeMovedSet.count(definingOp)) ||
            looplike.isDefinedOutsideOfLoop(value);
   };

diff  --git a/mlir/lib/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Transforms/NormalizeMemRefs.cpp
index b890bc70e7025..c33d1b6175e9a 100644
--- a/mlir/lib/Transforms/NormalizeMemRefs.cpp
+++ b/mlir/lib/Transforms/NormalizeMemRefs.cpp
@@ -517,6 +517,6 @@ Operation *NormalizeMemRefs::createOpResultsNormalized(FuncOp funcOp,
       newRegion->takeBody(oldRegion);
     }
     return bb.createOperation(result);
-  } else
-    return oldOp;
+  }
+  return oldOp;
 }

diff  --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index 1613ecc2f727d..6ac860592dda9 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -191,7 +191,7 @@ static void findMatchingStartFinishInsts(
     // Check for dependence with outgoing DMAs. Doing this conservatively.
     // TODO: use the dependence analysis to check for
     // dependences between an incoming and outgoing DMA in the same iteration.
-    auto it = outgoingDmaOps.begin();
+    auto *it = outgoingDmaOps.begin();
     for (; it != outgoingDmaOps.end(); ++it) {
       if (it->getDstMemRef() == dmaStartOp.getSrcMemRef())
         break;

diff  --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp
index 1994012d8a04c..36ebdbd4b5858 100644
--- a/mlir/lib/Transforms/Utils/FoldUtils.cpp
+++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp
@@ -168,7 +168,7 @@ LogicalResult OperationFolder::tryToFold(
   if (op->getNumOperands() >= 2 && op->hasTrait<OpTrait::IsCommutative>()) {
     std::stable_partition(
         op->getOpOperands().begin(), op->getOpOperands().end(),
-        [&](OpOperand &O) { return !matchPattern(O.get(), m_Constant()); });
+        [&](OpOperand &o) { return !matchPattern(o.get(), m_Constant()); });
   }
 
   // Check to see if any operands to the operation is constant and whether

diff  --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index 6b7f9369cbc29..dd97de1eebf1e 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -56,7 +56,8 @@ static bool isDependentLoadOrStoreOp(Operation *op,
   if (auto loadOp = dyn_cast<AffineReadOpInterface>(op)) {
     return values.count(loadOp.getMemRef()) > 0 &&
            values[loadOp.getMemRef()] == true;
-  } else if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
+  }
+  if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
     return values.count(storeOp.getMemRef()) > 0;
   }
   return false;

diff  --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index 719a2ff73b83e..c0eb9cdfaf842 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -3034,7 +3034,7 @@ uint64_t mlir::affineDataCopyGenerate(Block::iterator begin,
     auto updateRegion =
         [&](const SmallMapVector<Value, std::unique_ptr<MemRefRegion>, 4>
                 &targetRegions) {
-          const auto it = targetRegions.find(region->memref);
+          const auto *const it = targetRegions.find(region->memref);
           if (it == targetRegions.end())
             return false;
 

diff  --git a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp
index 2b4899e451bfc..081bc89e87478 100644
--- a/mlir/test/lib/Analysis/TestAliasAnalysis.cpp
+++ b/mlir/test/lib/Analysis/TestAliasAnalysis.cpp
@@ -67,7 +67,7 @@ struct TestAliasAnalysisPass
 
     // Check for aliasing behavior between each of the values.
     for (auto it = valsToCheck.begin(), e = valsToCheck.end(); it != e; ++it)
-      for (auto innerIt = valsToCheck.begin(); innerIt != it; ++innerIt)
+      for (auto *innerIt = valsToCheck.begin(); innerIt != it; ++innerIt)
         printAliasResult(aliasAnalysis.alias(*innerIt, *it), *innerIt, *it);
   }
 

diff  --git a/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp b/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp
index 6f34215ca0307..7cce0ef907e7a 100644
--- a/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp
+++ b/mlir/test/lib/Dialect/Math/TestPolynomialApproximation.cpp
@@ -52,9 +52,9 @@ struct TestMathPolynomialApproximationPass
 
 void TestMathPolynomialApproximationPass::runOnFunction() {
   RewritePatternSet patterns(&getContext());
-  MathPolynomialApproximationOptions approx_options;
-  approx_options.enableAvx2 = enableAvx2;
-  populateMathPolynomialApproximationPatterns(patterns, approx_options);
+  MathPolynomialApproximationOptions approxOptions;
+  approxOptions.enableAvx2 = enableAvx2;
+  populateMathPolynomialApproximationPatterns(patterns, approxOptions);
   (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
 }
 

diff  --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp
index b46aaf1069979..aee0bdb139703 100644
--- a/mlir/test/lib/Dialect/Test/TestDialect.cpp
+++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp
@@ -689,24 +689,24 @@ static ParseResult parseWrappingRegionOp(OpAsmParser &parser,
   Region &body = *result.addRegion();
   body.push_back(new Block);
   Block &block = body.back();
-  Operation *wrapped_op = parser.parseGenericOperation(&block, block.begin());
-  if (!wrapped_op)
+  Operation *wrappedOp = parser.parseGenericOperation(&block, block.begin());
+  if (!wrappedOp)
     return failure();
 
   // Create a return terminator in the inner region, pass as operand to the
   // terminator the returned values from the wrapped operation.
-  SmallVector<Value, 8> return_operands(wrapped_op->getResults());
+  SmallVector<Value, 8> returnOperands(wrappedOp->getResults());
   OpBuilder builder(parser.getContext());
   builder.setInsertionPointToEnd(&block);
-  builder.create<TestReturnOp>(wrapped_op->getLoc(), return_operands);
+  builder.create<TestReturnOp>(wrappedOp->getLoc(), returnOperands);
 
   // Get the results type for the wrapping op from the terminator operands.
-  Operation &return_op = body.back().back();
-  result.types.append(return_op.operand_type_begin(),
-                      return_op.operand_type_end());
+  Operation &returnOp = body.back().back();
+  result.types.append(returnOp.operand_type_begin(),
+                      returnOp.operand_type_end());
 
   // Use the location of the wrapped op for the "test.wrapping_region" op.
-  result.location = wrapped_op->getLoc();
+  result.location = wrappedOp->getLoc();
 
   return success();
 }

diff  --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 80b568c743b01..39f0b0b7da56d 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -808,7 +808,7 @@ def OpFuncRef : TEST_Op<"op_funcref"> {
 // That way, we will know if operations is called once or twice.
 def OpMGetNullAttr : NativeCodeCall<"Attribute()">;
 def OpMAttributeIsNull : Constraint<CPred<"! ($_self)">, "Attribute is null">;
-def OpMVal : NativeCodeCall<"OpMTest($_builder, $0)">;
+def OpMVal : NativeCodeCall<"opMTest($_builder, $0)">;
 def : Pat<(OpM $attr, $optAttr), (OpM $attr, (OpMVal $attr) ),
     [(OpMAttributeIsNull:$optAttr)]>;
 

diff  --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index e2e054c1af67b..2f06d82bd521b 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -56,7 +56,7 @@ static SmallVector<Value, 2> bindMultipleNativeCodeCallResult(Value input1,
 // This let us check the number of times OpM_Test was called by inspecting
 // the returned value in the MLIR output.
 static int64_t opMIncreasingValue = 314159265;
-static Attribute OpMTest(PatternRewriter &rewriter, Value val) {
+static Attribute opMTest(PatternRewriter &rewriter, Value val) {
   int64_t i = opMIncreasingValue++;
   return rewriter.getIntegerAttr(rewriter.getIntegerType(32), i);
 }

diff  --git a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
index 8a30446545d8b..afff0faf0c2f8 100644
--- a/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
+++ b/mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
@@ -71,7 +71,7 @@ ConvertTosaNegateOp::matchAndRewrite(Operation *op,
   double typeRangeMax = double(outputElementType.getStorageTypeMax() -
                                outputElementType.getZeroPoint()) *
                         outputElementType.getScale();
-  bool narrow_range = outputElementType.getStorageTypeMin() == 1 ? true : false;
+  bool narrowRange = outputElementType.getStorageTypeMin() == 1 ? true : false;
 
   auto dstQConstType = RankedTensorType::get(
       outputType.getShape(),
@@ -81,7 +81,7 @@ ConvertTosaNegateOp::matchAndRewrite(Operation *op,
                            rewriter.getI32IntegerAttr(
                                outputElementType.getStorageTypeIntegralWidth()),
                            0, true /* signed */,
-                           rewriter.getBoolAttr(narrow_range)));
+                           rewriter.getBoolAttr(narrowRange)));
 
   ElementsAttr inputElems;
   if (!matchPattern(tosaNegateOp.input1(), m_Constant(&inputElems)))

diff  --git a/mlir/test/lib/IR/TestMatchers.cpp b/mlir/test/lib/IR/TestMatchers.cpp
index a4007cd99ab65..66b9ad81f07ee 100644
--- a/mlir/test/lib/IR/TestMatchers.cpp
+++ b/mlir/test/lib/IR/TestMatchers.cpp
@@ -76,19 +76,19 @@ static void test1(FuncOp f) {
   llvm::outs() << "Pattern mul(mul(*), mul(*)) matched " << countMatches(f, p7)
                << " times\n";
 
-  auto mul_of_mulmul =
+  auto mulOfMulmul =
       m_Op<arith::MulFOp>(m_Op<arith::MulFOp>(), m_Op<arith::MulFOp>());
-  auto p8 = m_Op<arith::MulFOp>(mul_of_mulmul, mul_of_mulmul);
+  auto p8 = m_Op<arith::MulFOp>(mulOfMulmul, mulOfMulmul);
   llvm::outs()
       << "Pattern mul(mul(mul(*), mul(*)), mul(mul(*), mul(*))) matched "
       << countMatches(f, p8) << " times\n";
 
   // clang-format off
-  auto mul_of_muladd = m_Op<arith::MulFOp>(m_Op<arith::MulFOp>(), m_Op<arith::AddFOp>());
-  auto mul_of_anyadd = m_Op<arith::MulFOp>(m_Any(), m_Op<arith::AddFOp>());
+  auto mulOfMuladd = m_Op<arith::MulFOp>(m_Op<arith::MulFOp>(), m_Op<arith::AddFOp>());
+  auto mulOfAnyadd = m_Op<arith::MulFOp>(m_Any(), m_Op<arith::AddFOp>());
   auto p9 = m_Op<arith::MulFOp>(m_Op<arith::MulFOp>(
-                     mul_of_muladd, m_Op<arith::MulFOp>()),
-                   m_Op<arith::MulFOp>(mul_of_anyadd, mul_of_anyadd));
+                     mulOfMuladd, m_Op<arith::MulFOp>()),
+                   m_Op<arith::MulFOp>(mulOfAnyadd, mulOfAnyadd));
   // clang-format on
   llvm::outs() << "Pattern mul(mul(mul(mul(*), add(*)), mul(*)), mul(mul(*, "
                   "add(*)), mul(*, add(*)))) matched "
@@ -118,12 +118,12 @@ static void test1(FuncOp f) {
   llvm::outs() << "Pattern mul(a, add(b, c)) matched " << countMatches(f, p15)
                << " times\n";
 
-  auto mul_of_aany = m_Op<arith::MulFOp>(a, m_Any());
-  auto p16 = m_Op<arith::MulFOp>(mul_of_aany, m_Op<arith::AddFOp>(a, c));
+  auto mulOfAany = m_Op<arith::MulFOp>(a, m_Any());
+  auto p16 = m_Op<arith::MulFOp>(mulOfAany, m_Op<arith::AddFOp>(a, c));
   llvm::outs() << "Pattern mul(mul(a, *), add(a, c)) matched "
                << countMatches(f, p16) << " times\n";
 
-  auto p17 = m_Op<arith::MulFOp>(mul_of_aany, m_Op<arith::AddFOp>(c, b));
+  auto p17 = m_Op<arith::MulFOp>(mulOfAany, m_Op<arith::AddFOp>(c, b));
   llvm::outs() << "Pattern mul(mul(a, *), add(c, b)) matched "
                << countMatches(f, p17) << " times\n";
 }

diff  --git a/mlir/test/lib/IR/TestOpaqueLoc.cpp b/mlir/test/lib/IR/TestOpaqueLoc.cpp
index ea268bf91af2d..92a5c5a41b2cc 100644
--- a/mlir/test/lib/IR/TestOpaqueLoc.cpp
+++ b/mlir/test/lib/IR/TestOpaqueLoc.cpp
@@ -35,10 +35,10 @@ struct TestOpaqueLoc
 
   void runOnOperation() override {
     std::vector<std::unique_ptr<MyLocation>> myLocs;
-    int last_it = 0;
+    int lastIt = 0;
 
     getOperation().getBody()->walk([&](Operation *op) {
-      myLocs.push_back(std::make_unique<MyLocation>(last_it++));
+      myLocs.push_back(std::make_unique<MyLocation>(lastIt++));
 
       Location loc = op->getLoc();
 
@@ -54,14 +54,13 @@ struct TestOpaqueLoc
 
       /// Add the same operation but with fallback location to test the
       /// corresponding get method and serialization.
-      Operation *op_cloned_1 = builder.clone(*op);
-      op_cloned_1->setLoc(
-          OpaqueLoc::get<MyLocation *>(myLocs.back().get(), loc));
+      Operation *opCloned1 = builder.clone(*op);
+      opCloned1->setLoc(OpaqueLoc::get<MyLocation *>(myLocs.back().get(), loc));
 
       /// Add the same operation but with void* instead of MyLocation* to test
       /// getUnderlyingLocationOrNull method.
-      Operation *op_cloned_2 = builder.clone(*op);
-      op_cloned_2->setLoc(OpaqueLoc::get<void *>(nullptr, loc));
+      Operation *opCloned2 = builder.clone(*op);
+      opCloned2->setLoc(OpaqueLoc::get<void *>(nullptr, loc));
     });
 
     ScopedDiagnosticHandler diagHandler(&getContext(), [](Diagnostic &diag) {

diff  --git a/mlir/test/lib/Transforms/TestLoopFusion.cpp b/mlir/test/lib/Transforms/TestLoopFusion.cpp
index 380d4e68a7ce6..30de8ebe05e1c 100644
--- a/mlir/test/lib/Transforms/TestLoopFusion.cpp
+++ b/mlir/test/lib/Transforms/TestLoopFusion.cpp
@@ -156,7 +156,7 @@ using LoopFunc = function_ref<bool(AffineForOp, AffineForOp, unsigned, unsigned,
 // If 'return_on_change' is true, returns on first invocation of 'fn' which
 // returns true.
 static bool iterateLoops(ArrayRef<SmallVector<AffineForOp, 2>> depthToLoops,
-                         LoopFunc fn, bool return_on_change = false) {
+                         LoopFunc fn, bool returnOnChange = false) {
   bool changed = false;
   for (unsigned loopDepth = 0, end = depthToLoops.size(); loopDepth < end;
        ++loopDepth) {
@@ -167,7 +167,7 @@ static bool iterateLoops(ArrayRef<SmallVector<AffineForOp, 2>> depthToLoops,
         if (j != k)
           changed |=
               fn(loops[j], loops[k], j, k, loopDepth, depthToLoops.size());
-        if (changed && return_on_change)
+        if (changed && returnOnChange)
           return true;
       }
     }

diff  --git a/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp b/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp
index 82179a6dc7708..70f33bc5bbe1f 100644
--- a/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp
+++ b/mlir/test/mlir-spirv-cpu-runner/mlir_test_spirv_cpu_runner_c_wrappers.cpp
@@ -12,6 +12,8 @@
 
 #include "mlir/ExecutionEngine/RunnerUtils.h"
 
+// NOLINTBEGIN(*-identifier-naming)
+
 extern "C" void
 _mlir_ciface_fillI32Buffer(StridedMemRefType<int32_t, 1> *mem_ref,
                            int32_t value) {
@@ -36,3 +38,5 @@ _mlir_ciface_fillF32Buffer3D(StridedMemRefType<float, 3> *mem_ref,
   std::fill_n(mem_ref->basePtr,
               mem_ref->sizes[0] * mem_ref->sizes[1] * mem_ref->sizes[2], value);
 }
+
+// NOLINTEND(*-identifier-naming)

diff  --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
index 63ce33a3ed5bd..7651e19b43c2e 100644
--- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
+++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp
@@ -1009,9 +1009,8 @@ static LogicalResult generateOp(LinalgOpConfig &opConfig,
     return success(
         succeeded(generateNamedGenericOpOds(opConfig, genContext)) &&
         succeeded(generateNamedGenericOpDefns(opConfig, genContext)));
-  } else {
-    return emitError(genContext.getLoc()) << "unsupported operation type";
   }
+  return emitError(genContext.getLoc()) << "unsupported operation type";
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/tools/mlir-tblgen/DialectGen.cpp b/mlir/tools/mlir-tblgen/DialectGen.cpp
index de596b408de34..73b41ebde7ec3 100644
--- a/mlir/tools/mlir-tblgen/DialectGen.cpp
+++ b/mlir/tools/mlir-tblgen/DialectGen.cpp
@@ -68,9 +68,10 @@ findSelectedDialect(ArrayRef<const llvm::Record *> dialectDefs) {
     return llvm::None;
   }
 
-  auto dialectIt = llvm::find_if(dialectDefs, [](const llvm::Record *def) {
-    return Dialect(def).getName() == selectedDialect;
-  });
+  const auto *dialectIt =
+      llvm::find_if(dialectDefs, [](const llvm::Record *def) {
+        return Dialect(def).getName() == selectedDialect;
+      });
   if (dialectIt == dialectDefs.end()) {
     llvm::errs() << "selected dialect with '-dialect' does not exist\n";
     return llvm::None;

diff  --git a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp
index 522832aea414c..18315ec366a0a 100644
--- a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp
+++ b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp
@@ -24,31 +24,31 @@
 #include "llvm/TableGen/Record.h"
 #include "llvm/TableGen/TableGenBackend.h"
 
-static llvm::cl::OptionCategory IntrinsicGenCat("Intrinsics Generator Options");
+static llvm::cl::OptionCategory intrinsicGenCat("Intrinsics Generator Options");
 
 static llvm::cl::opt<std::string>
     nameFilter("llvmir-intrinsics-filter",
                llvm::cl::desc("Only keep the intrinsics with the specified "
                               "substring in their record name"),
-               llvm::cl::cat(IntrinsicGenCat));
+               llvm::cl::cat(intrinsicGenCat));
 
 static llvm::cl::opt<std::string>
     opBaseClass("dialect-opclass-base",
                 llvm::cl::desc("The base class for the ops in the dialect we "
                                "are planning to emit"),
-                llvm::cl::init("LLVM_IntrOp"), llvm::cl::cat(IntrinsicGenCat));
+                llvm::cl::init("LLVM_IntrOp"), llvm::cl::cat(intrinsicGenCat));
 
 static llvm::cl::opt<std::string> accessGroupRegexp(
     "llvmir-intrinsics-access-group-regexp",
     llvm::cl::desc("Mark intrinsics that match the specified "
                    "regexp as taking an access group metadata"),
-    llvm::cl::cat(IntrinsicGenCat));
+    llvm::cl::cat(intrinsicGenCat));
 
 static llvm::cl::opt<std::string> aliasScopesRegexp(
     "llvmir-intrinsics-alias-scopes-regexp",
     llvm::cl::desc("Mark intrinsics that match the specified "
                    "regexp as taking alias.scopes and noalias metadata"),
-    llvm::cl::cat(IntrinsicGenCat));
+    llvm::cl::cat(intrinsicGenCat));
 
 // Used to represent the indices of overloadable operands/results.
 using IndicesTy = llvm::SmallBitVector;
@@ -104,7 +104,7 @@ class LLVMIntrinsic {
     llvm::SmallVector<llvm::StringRef, 8> chunks;
     llvm::StringRef targetPrefix = record.getValueAsString("TargetPrefix");
     name.split(chunks, '_');
-    auto chunksBegin = chunks.begin();
+    auto *chunksBegin = chunks.begin();
     // Remove the target prefix from target specific intrinsics.
     if (!targetPrefix.empty()) {
       assert(targetPrefix == *chunksBegin &&

diff  --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
index e331397c8423a..2f9f079da8c95 100644
--- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
@@ -527,7 +527,7 @@ static void genAttributeVerifier(
                                emitHelper.isEmittingForOp());
 
     // Prefix with `tblgen_` to avoid hiding the attribute accessor.
-    Twine varName = tblgenNamePrefix + attrName;
+    std::string varName = (tblgenNamePrefix + attrName).str();
 
     // If the attribute is not required and we cannot emit the condition, then
     // there is nothing to be done.

diff  --git a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
index 59b472615c005..2a290bb86b270 100644
--- a/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
+++ b/mlir/tools/mlir-tblgen/SPIRVUtilsGen.cpp
@@ -891,7 +891,7 @@ static void emitOperandDeserialization(const Operator &op, ArrayRef<SMLoc> loc,
   unsigned operandNum = 0;
   for (unsigned i = 0, e = op.getNumArgs(); i < e; ++i) {
     auto argument = op.getArg(i);
-    if (auto valueArg = argument.dyn_cast<NamedTypeConstraint *>()) {
+    if (auto *valueArg = argument.dyn_cast<NamedTypeConstraint *>()) {
       if (valueArg->isVariableLength()) {
         if (i != e - 1) {
           PrintFatalError(loc, "SPIR-V ops can have Variadic<..> or "
@@ -921,7 +921,7 @@ static void emitOperandDeserialization(const Operator &op, ArrayRef<SMLoc> loc,
       os << tabs << "}\n";
     } else {
       os << tabs << formatv("if ({0} < {1}.size()) {{\n", wordIndex, words);
-      auto attr = argument.get<NamedAttribute *>();
+      auto *attr = argument.get<NamedAttribute *>();
       auto newtabs = tabs.str() + "  ";
       emitAttributeDeserialization(
           (attr->attr.isOptional() ? attr->attr.getBaseAttr() : attr->attr),

diff  --git a/mlir/tools/mlir-tblgen/mlir-tblgen.cpp b/mlir/tools/mlir-tblgen/mlir-tblgen.cpp
index 4b2dcbea1f321..0f14b190d8917 100644
--- a/mlir/tools/mlir-tblgen/mlir-tblgen.cpp
+++ b/mlir/tools/mlir-tblgen/mlir-tblgen.cpp
@@ -40,16 +40,16 @@ GenNameParser::GenNameParser(llvm::cl::Option &opt)
   }
 }
 
-void GenNameParser::printOptionInfo(const llvm::cl::Option &O,
-                                    size_t GlobalWidth) const {
-  GenNameParser *TP = const_cast<GenNameParser *>(this);
-  llvm::array_pod_sort(TP->Values.begin(), TP->Values.end(),
-                       [](const GenNameParser::OptionInfo *VT1,
-                          const GenNameParser::OptionInfo *VT2) {
-                         return VT1->Name.compare(VT2->Name);
+void GenNameParser::printOptionInfo(const llvm::cl::Option &o,
+                                    size_t globalWidth) const {
+  GenNameParser *tp = const_cast<GenNameParser *>(this);
+  llvm::array_pod_sort(tp->Values.begin(), tp->Values.end(),
+                       [](const GenNameParser::OptionInfo *vT1,
+                          const GenNameParser::OptionInfo *vT2) {
+                         return vT1->Name.compare(vT2->Name);
                        });
   using llvm::cl::parser;
-  parser<const GenInfo *>::printOptionInfo(O, GlobalWidth);
+  parser<const GenInfo *>::printOptionInfo(o, globalWidth);
 }
 
 // Generator that prints records.
@@ -64,7 +64,7 @@ const mlir::GenInfo *generator;
 
 // TableGenMain requires a function pointer so this function is passed in which
 // simply wraps the call to the generator.
-static bool MlirTableGenMain(raw_ostream &os, RecordKeeper &records) {
+static bool mlirTableGenMain(raw_ostream &os, RecordKeeper &records) {
   if (!generator) {
     os << records;
     return false;
@@ -79,5 +79,5 @@ int main(int argc, char **argv) {
   cl::ParseCommandLineOptions(argc, argv);
   ::generator = generator.getValue();
 
-  return TableGenMain(argv[0], &MlirTableGenMain);
+  return TableGenMain(argv[0], &mlirTableGenMain);
 }

diff  --git a/mlir/unittests/ExecutionEngine/Invoke.cpp b/mlir/unittests/ExecutionEngine/Invoke.cpp
index 23545e9f78ffc..b9565665be17d 100644
--- a/mlir/unittests/ExecutionEngine/Invoke.cpp
+++ b/mlir/unittests/ExecutionEngine/Invoke.cpp
@@ -103,10 +103,10 @@ TEST(MLIRExecutionEngine, SubtractFloat) {
 }
 
 TEST(NativeMemRefJit, ZeroRankMemref) {
-  OwningMemRef<float, 0> A({});
-  A[{}] = 42.;
-  ASSERT_EQ(*A->data, 42);
-  A[{}] = 0;
+  OwningMemRef<float, 0> a({});
+  a[{}] = 42.;
+  ASSERT_EQ(*a->data, 42);
+  a[{}] = 0;
   std::string moduleStr = R"mlir(
   func @zero_ranked(%arg0 : memref<f32>) attributes { llvm.emit_c_interface } {
     %cst42 = arith.constant 42.0 : f32
@@ -125,19 +125,19 @@ TEST(NativeMemRefJit, ZeroRankMemref) {
   ASSERT_TRUE(!!jitOrError);
   auto jit = std::move(jitOrError.get());
 
-  llvm::Error error = jit->invoke("zero_ranked", &*A);
+  llvm::Error error = jit->invoke("zero_ranked", &*a);
   ASSERT_TRUE(!error);
-  EXPECT_EQ((A[{}]), 42.);
-  for (float &elt : *A)
-    EXPECT_EQ(&elt, &(A[{}]));
+  EXPECT_EQ((a[{}]), 42.);
+  for (float &elt : *a)
+    EXPECT_EQ(&elt, &(a[{}]));
 }
 
 TEST(NativeMemRefJit, RankOneMemref) {
   int64_t shape[] = {9};
-  OwningMemRef<float, 1> A(shape);
+  OwningMemRef<float, 1> a(shape);
   int count = 1;
-  for (float &elt : *A) {
-    EXPECT_EQ(&elt, &(A[{count - 1}]));
+  for (float &elt : *a) {
+    EXPECT_EQ(&elt, &(a[{count - 1}]));
     elt = count++;
   }
 
@@ -160,10 +160,10 @@ TEST(NativeMemRefJit, RankOneMemref) {
   ASSERT_TRUE(!!jitOrError);
   auto jit = std::move(jitOrError.get());
 
-  llvm::Error error = jit->invoke("one_ranked", &*A);
+  llvm::Error error = jit->invoke("one_ranked", &*a);
   ASSERT_TRUE(!error);
   count = 1;
-  for (float &elt : *A) {
+  for (float &elt : *a) {
     if (count == 6)
       EXPECT_EQ(elt, 42.);
     else
@@ -173,24 +173,24 @@ TEST(NativeMemRefJit, RankOneMemref) {
 }
 
 TEST(NativeMemRefJit, BasicMemref) {
-  constexpr int K = 3;
-  constexpr int M = 7;
+  constexpr int k = 3;
+  constexpr int m = 7;
   // Prepare arguments beforehand.
   auto init = [=](float &elt, ArrayRef<int64_t> indices) {
     assert(indices.size() == 2);
-    elt = M * indices[0] + indices[1];
+    elt = m * indices[0] + indices[1];
   };
-  int64_t shape[] = {K, M};
-  int64_t shapeAlloc[] = {K + 1, M + 1};
-  OwningMemRef<float, 2> A(shape, shapeAlloc, init);
-  ASSERT_EQ(A->sizes[0], K);
-  ASSERT_EQ(A->sizes[1], M);
-  ASSERT_EQ(A->strides[0], M + 1);
-  ASSERT_EQ(A->strides[1], 1);
-  for (int i = 0; i < K; ++i) {
-    for (int j = 0; j < M; ++j) {
-      EXPECT_EQ((A[{i, j}]), i * M + j);
-      EXPECT_EQ(&(A[{i, j}]), &((*A)[i][j]));
+  int64_t shape[] = {k, m};
+  int64_t shapeAlloc[] = {k + 1, m + 1};
+  OwningMemRef<float, 2> a(shape, shapeAlloc, init);
+  ASSERT_EQ(a->sizes[0], k);
+  ASSERT_EQ(a->sizes[1], m);
+  ASSERT_EQ(a->strides[0], m + 1);
+  ASSERT_EQ(a->strides[1], 1);
+  for (int i = 0; i < k; ++i) {
+    for (int j = 0; j < m; ++j) {
+      EXPECT_EQ((a[{i, j}]), i * m + j);
+      EXPECT_EQ(&(a[{i, j}]), &((*a)[i][j]));
     }
   }
   std::string moduleStr = R"mlir(
@@ -214,27 +214,27 @@ TEST(NativeMemRefJit, BasicMemref) {
   ASSERT_TRUE(!!jitOrError);
   std::unique_ptr<ExecutionEngine> jit = std::move(jitOrError.get());
 
-  llvm::Error error = jit->invoke("rank2_memref", &*A, &*A);
+  llvm::Error error = jit->invoke("rank2_memref", &*a, &*a);
   ASSERT_TRUE(!error);
-  EXPECT_EQ(((*A)[1][2]), 42.);
-  EXPECT_EQ((A[{2, 1}]), 42.);
+  EXPECT_EQ(((*a)[1][2]), 42.);
+  EXPECT_EQ((a[{2, 1}]), 42.);
 }
 
 // A helper function that will be called from the JIT
-static void memref_multiply(::StridedMemRefType<float, 2> *memref,
-                            int32_t coefficient) {
+static void memrefMultiply(::StridedMemRefType<float, 2> *memref,
+                           int32_t coefficient) {
   for (float &elt : *memref)
     elt *= coefficient;
 }
 
 TEST(NativeMemRefJit, JITCallback) {
-  constexpr int K = 2;
-  constexpr int M = 2;
-  int64_t shape[] = {K, M};
-  int64_t shapeAlloc[] = {K + 1, M + 1};
-  OwningMemRef<float, 2> A(shape, shapeAlloc);
+  constexpr int k = 2;
+  constexpr int m = 2;
+  int64_t shape[] = {k, m};
+  int64_t shapeAlloc[] = {k + 1, m + 1};
+  OwningMemRef<float, 2> a(shape, shapeAlloc);
   int count = 1;
-  for (float &elt : *A)
+  for (float &elt : *a)
     elt = count++;
 
   std::string moduleStr = R"mlir(
@@ -259,15 +259,15 @@ TEST(NativeMemRefJit, JITCallback) {
   jit->registerSymbols([&](llvm::orc::MangleAndInterner interner) {
     llvm::orc::SymbolMap symbolMap;
     symbolMap[interner("_mlir_ciface_callback")] =
-        llvm::JITEvaluatedSymbol::fromPointer(memref_multiply);
+        llvm::JITEvaluatedSymbol::fromPointer(memrefMultiply);
     return symbolMap;
   });
 
   int32_t coefficient = 3.;
-  llvm::Error error = jit->invoke("caller_for_callback", &*A, coefficient);
+  llvm::Error error = jit->invoke("caller_for_callback", &*a, coefficient);
   ASSERT_TRUE(!error);
   count = 1;
-  for (float elt : *A)
+  for (float elt : *a)
     ASSERT_EQ(elt, coefficient * count++);
 }
 

diff  --git a/mlir/unittests/IR/OperationSupportTest.cpp b/mlir/unittests/IR/OperationSupportTest.cpp
index 759c00ae152c5..b5184a1f772cc 100644
--- a/mlir/unittests/IR/OperationSupportTest.cpp
+++ b/mlir/unittests/IR/OperationSupportTest.cpp
@@ -236,7 +236,7 @@ TEST(NamedAttrListTest, TestAppendAssign) {
   attrs.append("baz", b.getStringAttr("boo"));
 
   {
-    auto it = attrs.begin();
+    auto *it = attrs.begin();
     EXPECT_EQ(it->getName(), b.getStringAttr("foo"));
     EXPECT_EQ(it->getValue(), b.getStringAttr("bar"));
     ++it;
@@ -260,7 +260,7 @@ TEST(NamedAttrListTest, TestAppendAssign) {
   ASSERT_FALSE(dup.hasValue());
 
   {
-    auto it = attrs.begin();
+    auto *it = attrs.begin();
     EXPECT_EQ(it->getName(), b.getStringAttr("foo"));
     EXPECT_EQ(it->getValue(), b.getStringAttr("f"));
     ++it;

diff  --git a/mlir/unittests/TableGen/StructsGenTest.cpp b/mlir/unittests/TableGen/StructsGenTest.cpp
index ead1156a9424b..3d46ebab9f9e5 100644
--- a/mlir/unittests/TableGen/StructsGenTest.cpp
+++ b/mlir/unittests/TableGen/StructsGenTest.cpp
@@ -18,7 +18,7 @@
 namespace mlir {
 
 /// Pull in generated enum utility declarations and definitions.
-#include "StructAttrGenTest.h.inc"
+#include "StructAttrGenTest.h.inc" // NOLINT
 #include "StructAttrGenTest.cpp.inc"
 
 /// Helper that returns an example test::TestStruct for testing its


        


More information about the Mlir-commits mailing list