[flang-commits] [flang] 4c48f01 - [mlir][Affine][NFC] Wrap dialect in "affine" namespace

Matthias Springer via flang-commits flang-commits at lists.llvm.org
Wed Apr 19 19:19:53 PDT 2023


Author: Matthias Springer
Date: 2023-04-20T11:19:21+09:00
New Revision: 4c48f016effde67d500fc95290096aec9f3bdb70

URL: https://github.com/llvm/llvm-project/commit/4c48f016effde67d500fc95290096aec9f3bdb70
DIFF: https://github.com/llvm/llvm-project/commit/4c48f016effde67d500fc95290096aec9f3bdb70.diff

LOG: [mlir][Affine][NFC] Wrap dialect in "affine" namespace

This cleanup aligns the affine dialect with all the other dialects.

Differential Revision: https://reviews.llvm.org/D148687

Added: 
    

Modified: 
    flang/include/flang/Optimizer/Support/InitFIR.h
    flang/include/flang/Optimizer/Transforms/Passes.td
    flang/lib/Optimizer/Transforms/AffineDemotion.cpp
    flang/lib/Optimizer/Transforms/AffinePromotion.cpp
    flang/lib/Optimizer/Transforms/CharacterConversion.cpp
    flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
    flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
    mlir/docs/DialectConversion.md
    mlir/docs/Tutorials/Toy/Ch-5.md
    mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
    mlir/examples/toy/Ch5/toyc.cpp
    mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
    mlir/examples/toy/Ch6/toyc.cpp
    mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
    mlir/examples/toy/Ch7/toyc.cpp
    mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
    mlir/include/mlir/Conversion/Passes.td
    mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h
    mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
    mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
    mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
    mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h
    mlir/include/mlir/Dialect/Affine/Analysis/Utils.h
    mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h
    mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
    mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
    mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
    mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h
    mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h
    mlir/include/mlir/Dialect/Affine/LoopUtils.h
    mlir/include/mlir/Dialect/Affine/Passes.h
    mlir/include/mlir/Dialect/Affine/Passes.td
    mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h
    mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h
    mlir/include/mlir/Dialect/Affine/Utils.h
    mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
    mlir/include/mlir/Dialect/Linalg/Passes.td
    mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
    mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
    mlir/include/mlir/Dialect/SCF/Transforms/Passes.td
    mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h
    mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
    mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td
    mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
    mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
    mlir/include/mlir/InitAllDialects.h
    mlir/include/mlir/InitAllPasses.h
    mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
    mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
    mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
    mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp
    mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
    mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
    mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
    mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
    mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
    mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp
    mlir/lib/Dialect/Affine/Analysis/Utils.cpp
    mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp
    mlir/lib/Dialect/Affine/IR/AffineOps.cpp
    mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp
    mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp
    mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
    mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
    mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
    mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
    mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
    mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp
    mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
    mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
    mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
    mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
    mlir/lib/Dialect/Affine/Utils/Utils.cpp
    mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
    mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
    mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
    mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
    mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
    mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
    mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
    mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
    mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
    mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
    mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
    mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
    mlir/lib/Dialect/Linalg/Transforms/Split.cpp
    mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
    mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
    mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
    mlir/lib/Dialect/Linalg/Utils/Utils.cpp
    mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
    mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
    mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
    mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
    mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
    mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
    mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
    mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
    mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
    mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
    mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
    mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
    mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
    mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
    mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
    mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
    mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
    mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
    mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
    mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
    mlir/lib/Dialect/Tensor/Utils/Utils.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
    mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
    mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
    mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp
    mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
    mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
    mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
    mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp
    mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp
    mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
    mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp
    mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp
    mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
    mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
    mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp
    mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
    mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
    mlir/test/lib/Dialect/Linalg/TestPadFusion.cpp
    mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp
    mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp
    mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
    mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
    mlir/unittests/Analysis/Presburger/Parser.h

Removed: 
    


################################################################################
diff  --git a/flang/include/flang/Optimizer/Support/InitFIR.h b/flang/include/flang/Optimizer/Support/InitFIR.h
index 12bd80e7abd4f..948cba656bed8 100644
--- a/flang/include/flang/Optimizer/Support/InitFIR.h
+++ b/flang/include/flang/Optimizer/Support/InitFIR.h
@@ -27,7 +27,7 @@
 namespace fir::support {
 
 #define FLANG_NONCODEGEN_DIALECT_LIST                                          \
-  mlir::AffineDialect, FIROpsDialect, hlfir::hlfirDialect,                     \
+  mlir::affine::AffineDialect, FIROpsDialect, hlfir::hlfirDialect,             \
       mlir::acc::OpenACCDialect, mlir::omp::OpenMPDialect,                     \
       mlir::scf::SCFDialect, mlir::arith::ArithDialect,                        \
       mlir::cf::ControlFlowDialect, mlir::func::FuncDialect,                   \
@@ -63,25 +63,25 @@ inline void loadDialects(mlir::MLIRContext &context) {
 inline void registerMLIRPassesForFortranTools() {
   mlir::registerCanonicalizerPass();
   mlir::registerCSEPass();
-  mlir::registerAffineLoopFusionPass();
+  mlir::affine::registerAffineLoopFusionPass();
   mlir::registerLoopInvariantCodeMotionPass();
-  mlir::registerLoopCoalescingPass();
+  mlir::affine::registerLoopCoalescingPass();
   mlir::registerStripDebugInfoPass();
   mlir::registerPrintOpStatsPass();
   mlir::registerInlinerPass();
   mlir::registerSCCPPass();
-  mlir::registerAffineScalarReplacementPass();
+  mlir::affine::registerAffineScalarReplacementPass();
   mlir::registerSymbolDCEPass();
   mlir::registerLocationSnapshotPass();
-  mlir::registerAffinePipelineDataTransferPass();
+  mlir::affine::registerAffinePipelineDataTransferPass();
 
-  mlir::registerAffineVectorizePass();
-  mlir::registerAffineLoopUnrollPass();
-  mlir::registerAffineLoopUnrollAndJamPass();
-  mlir::registerSimplifyAffineStructuresPass();
-  mlir::registerAffineLoopInvariantCodeMotionPass();
-  mlir::registerAffineLoopTilingPass();
-  mlir::registerAffineDataCopyGenerationPass();
+  mlir::affine::registerAffineVectorizePass();
+  mlir::affine::registerAffineLoopUnrollPass();
+  mlir::affine::registerAffineLoopUnrollAndJamPass();
+  mlir::affine::registerSimplifyAffineStructuresPass();
+  mlir::affine::registerAffineLoopInvariantCodeMotionPass();
+  mlir::affine::registerAffineLoopTilingPass();
+  mlir::affine::registerAffineDataCopyGenerationPass();
 
   mlir::registerConvertAffineToStandardPass();
 }

diff  --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td
index 1da69b5d57a88..e7a7c61b47972 100644
--- a/flang/include/flang/Optimizer/Transforms/Passes.td
+++ b/flang/include/flang/Optimizer/Transforms/Passes.td
@@ -64,7 +64,8 @@ def AffineDialectPromotion : Pass<"promote-to-affine", "::mlir::func::FuncOp"> {
   }];
   let constructor = "::fir::createPromoteToAffinePass()";
   let dependentDialects = [
-    "fir::FIROpsDialect", "mlir::func::FuncDialect", "mlir::AffineDialect"
+    "fir::FIROpsDialect", "mlir::func::FuncDialect",
+    "mlir::affine::AffineDialect"
   ];
 }
 
@@ -78,7 +79,8 @@ def AffineDialectDemotion : Pass<"demote-affine", "::mlir::func::FuncOp"> {
   }];
   let constructor = "::fir::createAffineDemotionPass()";
   let dependentDialects = [
-    "fir::FIROpsDialect", "mlir::func::FuncDialect", "mlir::AffineDialect"
+    "fir::FIROpsDialect", "mlir::func::FuncDialect",
+    "mlir::affine::AffineDialect"
   ];
 }
 

diff  --git a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
index 51b19ef780967..0c256deeca416 100644
--- a/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffineDemotion.cpp
@@ -46,16 +46,17 @@ using namespace mlir;
 
 namespace {
 
-class AffineLoadConversion : public OpConversionPattern<mlir::AffineLoadOp> {
+class AffineLoadConversion
+    : public OpConversionPattern<mlir::affine::AffineLoadOp> {
 public:
-  using OpConversionPattern<mlir::AffineLoadOp>::OpConversionPattern;
+  using OpConversionPattern<mlir::affine::AffineLoadOp>::OpConversionPattern;
 
   LogicalResult
-  matchAndRewrite(mlir::AffineLoadOp op, OpAdaptor adaptor,
+  matchAndRewrite(mlir::affine::AffineLoadOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     SmallVector<Value> indices(adaptor.getIndices());
-    auto maybeExpandedMap =
-        expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
+    auto maybeExpandedMap = affine::expandAffineMap(rewriter, op.getLoc(),
+                                                    op.getAffineMap(), indices);
     if (!maybeExpandedMap)
       return failure();
 
@@ -68,16 +69,17 @@ class AffineLoadConversion : public OpConversionPattern<mlir::AffineLoadOp> {
   }
 };
 
-class AffineStoreConversion : public OpConversionPattern<mlir::AffineStoreOp> {
+class AffineStoreConversion
+    : public OpConversionPattern<mlir::affine::AffineStoreOp> {
 public:
-  using OpConversionPattern<mlir::AffineStoreOp>::OpConversionPattern;
+  using OpConversionPattern<mlir::affine::AffineStoreOp>::OpConversionPattern;
 
   LogicalResult
-  matchAndRewrite(mlir::AffineStoreOp op, OpAdaptor adaptor,
+  matchAndRewrite(mlir::affine::AffineStoreOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     SmallVector<Value> indices(op.getIndices());
-    auto maybeExpandedMap =
-        expandAffineMap(rewriter, op.getLoc(), op.getAffineMap(), indices);
+    auto maybeExpandedMap = affine::expandAffineMap(rewriter, op.getLoc(),
+                                                    op.getAffineMap(), indices);
     if (!maybeExpandedMap)
       return failure();
 

diff  --git a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
index 321a07892b0cd..af2200f6a7b02 100644
--- a/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
+++ b/flang/lib/Optimizer/Transforms/AffinePromotion.cpp
@@ -227,7 +227,7 @@ struct AffineIfCondition {
     if (auto blockArg = value.dyn_cast<mlir::BlockArgument>()) {
       affineArgs.push_back(value);
       if (isa<fir::DoLoopOp>(blockArg.getOwner()->getParentOp()) ||
-          isa<mlir::AffineForOp>(blockArg.getOwner()->getParentOp()))
+          isa<mlir::affine::AffineForOp>(blockArg.getOwner()->getParentOp()))
         return {mlir::getAffineDimExpr(dimCount++, value.getContext())};
       return {mlir::getAffineSymbolExpr(symCount++, value.getContext())};
     }
@@ -397,7 +397,7 @@ static void populateIndexArgs(fir::ArrayCoorOp acoOp,
 }
 
 /// Returns affine.apply and fir.convert from array_coor and gendims
-static std::pair<mlir::AffineApplyOp, fir::ConvertOp>
+static std::pair<affine::AffineApplyOp, fir::ConvertOp>
 createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) {
   auto acoOp = arrayRef.getDefiningOp<ArrayCoorOp>();
   auto affineMap =
@@ -407,8 +407,8 @@ createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) {
 
   populateIndexArgs(acoOp, indexArgs, rewriter);
 
-  auto affineApply = rewriter.create<mlir::AffineApplyOp>(acoOp.getLoc(),
-                                                          affineMap, indexArgs);
+  auto affineApply = rewriter.create<affine::AffineApplyOp>(
+      acoOp.getLoc(), affineMap, indexArgs);
   auto arrayElementType = coordinateArrayElement(acoOp);
   auto newType =
       mlir::MemRefType::get({mlir::ShapedType::kDynamic}, arrayElementType);
@@ -420,7 +420,7 @@ createAffineOps(mlir::Value arrayRef, mlir::PatternRewriter &rewriter) {
 static void rewriteLoad(fir::LoadOp loadOp, mlir::PatternRewriter &rewriter) {
   rewriter.setInsertionPoint(loadOp);
   auto affineOps = createAffineOps(loadOp.getMemref(), rewriter);
-  rewriter.replaceOpWithNewOp<mlir::AffineLoadOp>(
+  rewriter.replaceOpWithNewOp<affine::AffineLoadOp>(
       loadOp, affineOps.second.getResult(), affineOps.first.getResult());
 }
 
@@ -428,9 +428,9 @@ static void rewriteStore(fir::StoreOp storeOp,
                          mlir::PatternRewriter &rewriter) {
   rewriter.setInsertionPoint(storeOp);
   auto affineOps = createAffineOps(storeOp.getMemref(), rewriter);
-  rewriter.replaceOpWithNewOp<mlir::AffineStoreOp>(storeOp, storeOp.getValue(),
-                                                   affineOps.second.getResult(),
-                                                   affineOps.first.getResult());
+  rewriter.replaceOpWithNewOp<affine::AffineStoreOp>(
+      storeOp, storeOp.getValue(), affineOps.second.getResult(),
+      affineOps.first.getResult());
 }
 
 static void rewriteMemoryOps(Block *block, mlir::PatternRewriter &rewriter) {
@@ -483,7 +483,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
   }
 
 private:
-  std::pair<mlir::AffineForOp, mlir::Value>
+  std::pair<affine::AffineForOp, mlir::Value>
   createAffineFor(fir::DoLoopOp op, mlir::PatternRewriter &rewriter) const {
     if (auto constantStep = constantIntegerLike(op.getStep()))
       if (*constantStep > 0)
@@ -492,10 +492,10 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
   }
 
   // when step for the loop is positive compile time constant
-  std::pair<mlir::AffineForOp, mlir::Value>
+  std::pair<affine::AffineForOp, mlir::Value>
   positiveConstantStep(fir::DoLoopOp op, int64_t step,
                        mlir::PatternRewriter &rewriter) const {
-    auto affineFor = rewriter.create<mlir::AffineForOp>(
+    auto affineFor = rewriter.create<affine::AffineForOp>(
         op.getLoc(), ValueRange(op.getLowerBound()),
         mlir::AffineMap::get(0, 1,
                              mlir::getAffineSymbolExpr(0, op.getContext())),
@@ -506,14 +506,14 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
     return std::make_pair(affineFor, affineFor.getInductionVar());
   }
 
-  std::pair<mlir::AffineForOp, mlir::Value>
+  std::pair<affine::AffineForOp, mlir::Value>
   genericBounds(fir::DoLoopOp op, mlir::PatternRewriter &rewriter) const {
     auto lowerBound = mlir::getAffineSymbolExpr(0, op.getContext());
     auto upperBound = mlir::getAffineSymbolExpr(1, op.getContext());
     auto step = mlir::getAffineSymbolExpr(2, op.getContext());
     mlir::AffineMap upperBoundMap = mlir::AffineMap::get(
         0, 3, (upperBound - lowerBound + step).floorDiv(step));
-    auto genericUpperBound = rewriter.create<mlir::AffineApplyOp>(
+    auto genericUpperBound = rewriter.create<affine::AffineApplyOp>(
         op.getLoc(), upperBoundMap,
         ValueRange({op.getLowerBound(), op.getUpperBound(), op.getStep()}));
     auto actualIndexMap = mlir::AffineMap::get(
@@ -521,7 +521,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
         (lowerBound + mlir::getAffineDimExpr(0, op.getContext())) *
             mlir::getAffineSymbolExpr(1, op.getContext()));
 
-    auto affineFor = rewriter.create<mlir::AffineForOp>(
+    auto affineFor = rewriter.create<affine::AffineForOp>(
         op.getLoc(), ValueRange(),
         AffineMap::getConstantMap(0, op.getContext()),
         genericUpperBound.getResult(),
@@ -529,7 +529,7 @@ class AffineLoopConversion : public mlir::OpRewritePattern<fir::DoLoopOp> {
                              1 + mlir::getAffineSymbolExpr(0, op.getContext())),
         1);
     rewriter.setInsertionPointToStart(affineFor.getBody());
-    auto actualIndex = rewriter.create<mlir::AffineApplyOp>(
+    auto actualIndex = rewriter.create<affine::AffineApplyOp>(
         op.getLoc(), actualIndexMap,
         ValueRange(
             {affineFor.getInductionVar(), op.getLowerBound(), op.getStep()}));
@@ -558,7 +558,7 @@ class AffineIfConversion : public mlir::OpRewritePattern<fir::IfOp> {
               << "AffineIfConversion: couldn't calculate affine condition\n";);
       return failure();
     }
-    auto affineIf = rewriter.create<mlir::AffineIfOp>(
+    auto affineIf = rewriter.create<affine::AffineIfOp>(
         op.getLoc(), affineCondition.getIntegerSet(),
         affineCondition.getAffineArgs(), !op.getElseRegion().empty());
     rewriter.startRootUpdate(affineIf);
@@ -596,7 +596,7 @@ class AffineDialectPromotion
     patterns.insert<AffineIfConversion>(context, functionAnalysis);
     patterns.insert<AffineLoopConversion>(context, functionAnalysis);
     mlir::ConversionTarget target = *context;
-    target.addLegalDialect<mlir::AffineDialect, FIROpsDialect,
+    target.addLegalDialect<mlir::affine::AffineDialect, FIROpsDialect,
                            mlir::scf::SCFDialect, mlir::arith::ArithDialect,
                            mlir::func::FuncDialect>();
     target.addDynamicallyLegalOp<IfOp>([&functionAnalysis](fir::IfOp op) {

diff  --git a/flang/lib/Optimizer/Transforms/CharacterConversion.cpp b/flang/lib/Optimizer/Transforms/CharacterConversion.cpp
index 4940092871cbb..2e8fc42487a5f 100644
--- a/flang/lib/Optimizer/Transforms/CharacterConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/CharacterConversion.cpp
@@ -110,7 +110,7 @@ class CharacterConversion
       mlir::RewritePatternSet patterns(context);
       patterns.insert<CharacterConvertConversion>(context);
       mlir::ConversionTarget target(*context);
-      target.addLegalDialect<mlir::AffineDialect, fir::FIROpsDialect,
+      target.addLegalDialect<mlir::affine::AffineDialect, fir::FIROpsDialect,
                              mlir::arith::ArithDialect,
                              mlir::func::FuncDialect>();
 

diff  --git a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
index 631989a05b57c..0944b184ca0d4 100644
--- a/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
+++ b/flang/lib/Optimizer/Transforms/ControlFlowConverter.cpp
@@ -316,8 +316,9 @@ class CfgConversion : public fir::impl::CFGConversionBase<CfgConversion> {
     patterns.insert<CfgLoopConv, CfgIfConv, CfgIterWhileConv>(
         context, forceLoopToExecuteOnce);
     mlir::ConversionTarget target(*context);
-    target.addLegalDialect<mlir::AffineDialect, mlir::cf::ControlFlowDialect,
-                           FIROpsDialect, mlir::func::FuncDialect>();
+    target.addLegalDialect<mlir::affine::AffineDialect,
+                           mlir::cf::ControlFlowDialect, FIROpsDialect,
+                           mlir::func::FuncDialect>();
 
     // apply the patterns
     target.addIllegalOp<ResultOp, DoLoopOp, IfOp, IterWhileOp>();

diff  --git a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
index c562139d835a3..ab9f1d768b039 100644
--- a/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
+++ b/flang/lib/Optimizer/Transforms/PolymorphicOpConversion.cpp
@@ -238,8 +238,9 @@ class PolymorphicOpConversion
     patterns.insert<SelectTypeConv>(context, moduleMutex);
     patterns.insert<DispatchOpConv>(context, bindingTables);
     mlir::ConversionTarget target(*context);
-    target.addLegalDialect<mlir::AffineDialect, mlir::cf::ControlFlowDialect,
-                           FIROpsDialect, mlir::func::FuncDialect>();
+    target.addLegalDialect<mlir::affine::AffineDialect,
+                           mlir::cf::ControlFlowDialect, FIROpsDialect,
+                           mlir::func::FuncDialect>();
 
     // apply the patterns
     target.addIllegalOp<SelectTypeOp>();

diff  --git a/mlir/docs/DialectConversion.md b/mlir/docs/DialectConversion.md
index 9485064375b5b..a355d5a90e4d1 100644
--- a/mlir/docs/DialectConversion.md
+++ b/mlir/docs/DialectConversion.md
@@ -98,7 +98,8 @@ struct MyTarget : public ConversionTarget {
 
     /// Mark all operations within Affine dialect have dynamic legality
     /// constraints.
-    addDynamicallyLegalDialect<AffineDialect>([](Operation *op) { ... });
+    addDynamicallyLegalDialect<affine::AffineDialect>(
+        [](Operation *op) { ... });
 
     /// Mark `func.return` as dynamically legal, but provide a specific legality
     /// callback.

diff  --git a/mlir/docs/Tutorials/Toy/Ch-5.md b/mlir/docs/Tutorials/Toy/Ch-5.md
index 70ffa32fef453..6fe0080d77294 100644
--- a/mlir/docs/Tutorials/Toy/Ch-5.md
+++ b/mlir/docs/Tutorials/Toy/Ch-5.md
@@ -64,7 +64,7 @@ void ToyToAffineLoweringPass::runOnOperation() {
   // We define the specific operations, or dialects, that are legal targets for
   // this lowering. In our case, we are lowering to a combination of the
   // `Affine`, `Arith`, `Func`, and `MemRef` dialects.
-  target.addLegalDialect<AffineDialect, arith::ArithDialect,
+  target.addLegalDialect<affine::AffineDialect, arith::ArithDialect,
                          func::FuncDialect, memref::MemRefDialect>();
 
   // We also define the Toy dialect as Illegal so that the conversion will fail

diff  --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
index a40353e3fd8bb..988175508ca1b 100644
--- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
@@ -75,14 +75,15 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
   // loop induction variables.
   SmallVector<int64_t, 4> lowerBounds(tensorType.getRank(), /*Value=*/0);
   SmallVector<int64_t, 4> steps(tensorType.getRank(), /*Value=*/1);
-  buildAffineLoopNest(
+  affine::buildAffineLoopNest(
       rewriter, loc, lowerBounds, tensorType.getShape(), steps,
       [&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) {
         // Call the processing function with the rewriter, the memref operands,
         // and the loop induction variables. This function will return the value
         // to store at the current index.
         Value valueToStore = processIteration(nestedBuilder, operands, ivs);
-        nestedBuilder.create<AffineStoreOp>(loc, valueToStore, alloc, ivs);
+        nestedBuilder.create<affine::AffineStoreOp>(loc, valueToStore, alloc,
+                                                    ivs);
       });
 
   // Replace this operation with the generated alloc.
@@ -113,9 +114,9 @@ struct BinaryOpLowering : public ConversionPattern {
 
                      // Generate loads for the element of 'lhs' and 'rhs' at the
                      // inner loop.
-                     auto loadedLhs = builder.create<AffineLoadOp>(
+                     auto loadedLhs = builder.create<affine::AffineLoadOp>(
                          loc, binaryAdaptor.getLhs(), loopIvs);
-                     auto loadedRhs = builder.create<AffineLoadOp>(
+                     auto loadedRhs = builder.create<affine::AffineLoadOp>(
                          loc, binaryAdaptor.getRhs(), loopIvs);
 
                      // Create the binary operation performed on the loaded
@@ -174,7 +175,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
       // The last dimension is the base case of the recursion, at this point
       // we store the element at the given index.
       if (dimension == valueShape.size()) {
-        rewriter.create<AffineStoreOp>(
+        rewriter.create<affine::AffineStoreOp>(
             loc, rewriter.create<arith::ConstantOp>(loc, *valueIt++), alloc,
             llvm::ArrayRef(indices));
         return;
@@ -291,8 +292,8 @@ struct TransposeOpLowering : public ConversionPattern {
                      // Transpose the elements by generating a load from the
                      // reverse indices.
                      SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
-                     return builder.create<AffineLoadOp>(loc, input,
-                                                         reverseIvs);
+                     return builder.create<affine::AffineLoadOp>(loc, input,
+                                                                 reverseIvs);
                    });
     return success();
   }
@@ -313,7 +314,8 @@ struct ToyToAffineLoweringPass
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ToyToAffineLoweringPass)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, func::FuncDialect, memref::MemRefDialect>();
+    registry.insert<affine::AffineDialect, func::FuncDialect,
+                    memref::MemRefDialect>();
   }
   void runOnOperation() final;
 };
@@ -327,8 +329,9 @@ void ToyToAffineLoweringPass::runOnOperation() {
   // We define the specific operations, or dialects, that are legal targets for
   // this lowering. In our case, we are lowering to a combination of the
   // `Affine`, `Arith`, `Func`, and `MemRef` dialects.
-  target.addLegalDialect<AffineDialect, BuiltinDialect, arith::ArithDialect,
-                         func::FuncDialect, memref::MemRefDialect>();
+  target.addLegalDialect<affine::AffineDialect, BuiltinDialect,
+                         arith::ArithDialect, func::FuncDialect,
+                         memref::MemRefDialect>();
 
   // We also define the Toy dialect as Illegal so that the conversion will fail
   // if any of these operations are *not* converted. Given that we actually want

diff  --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp
index abe052ab5ebed..b0d4595ca40e7 100644
--- a/mlir/examples/toy/Ch5/toyc.cpp
+++ b/mlir/examples/toy/Ch5/toyc.cpp
@@ -148,8 +148,8 @@ int dumpMLIR() {
 
     // Add optimizations if enabled.
     if (enableOpt) {
-      optPM.addPass(mlir::createLoopFusionPass());
-      optPM.addPass(mlir::createAffineScalarReplacementPass());
+      optPM.addPass(mlir::affine::createLoopFusionPass());
+      optPM.addPass(mlir::affine::createAffineScalarReplacementPass());
     }
   }
 

diff  --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
index a40353e3fd8bb..988175508ca1b 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
@@ -75,14 +75,15 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
   // loop induction variables.
   SmallVector<int64_t, 4> lowerBounds(tensorType.getRank(), /*Value=*/0);
   SmallVector<int64_t, 4> steps(tensorType.getRank(), /*Value=*/1);
-  buildAffineLoopNest(
+  affine::buildAffineLoopNest(
       rewriter, loc, lowerBounds, tensorType.getShape(), steps,
       [&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) {
         // Call the processing function with the rewriter, the memref operands,
         // and the loop induction variables. This function will return the value
         // to store at the current index.
         Value valueToStore = processIteration(nestedBuilder, operands, ivs);
-        nestedBuilder.create<AffineStoreOp>(loc, valueToStore, alloc, ivs);
+        nestedBuilder.create<affine::AffineStoreOp>(loc, valueToStore, alloc,
+                                                    ivs);
       });
 
   // Replace this operation with the generated alloc.
@@ -113,9 +114,9 @@ struct BinaryOpLowering : public ConversionPattern {
 
                      // Generate loads for the element of 'lhs' and 'rhs' at the
                      // inner loop.
-                     auto loadedLhs = builder.create<AffineLoadOp>(
+                     auto loadedLhs = builder.create<affine::AffineLoadOp>(
                          loc, binaryAdaptor.getLhs(), loopIvs);
-                     auto loadedRhs = builder.create<AffineLoadOp>(
+                     auto loadedRhs = builder.create<affine::AffineLoadOp>(
                          loc, binaryAdaptor.getRhs(), loopIvs);
 
                      // Create the binary operation performed on the loaded
@@ -174,7 +175,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
       // The last dimension is the base case of the recursion, at this point
       // we store the element at the given index.
       if (dimension == valueShape.size()) {
-        rewriter.create<AffineStoreOp>(
+        rewriter.create<affine::AffineStoreOp>(
             loc, rewriter.create<arith::ConstantOp>(loc, *valueIt++), alloc,
             llvm::ArrayRef(indices));
         return;
@@ -291,8 +292,8 @@ struct TransposeOpLowering : public ConversionPattern {
                      // Transpose the elements by generating a load from the
                      // reverse indices.
                      SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
-                     return builder.create<AffineLoadOp>(loc, input,
-                                                         reverseIvs);
+                     return builder.create<affine::AffineLoadOp>(loc, input,
+                                                                 reverseIvs);
                    });
     return success();
   }
@@ -313,7 +314,8 @@ struct ToyToAffineLoweringPass
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ToyToAffineLoweringPass)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, func::FuncDialect, memref::MemRefDialect>();
+    registry.insert<affine::AffineDialect, func::FuncDialect,
+                    memref::MemRefDialect>();
   }
   void runOnOperation() final;
 };
@@ -327,8 +329,9 @@ void ToyToAffineLoweringPass::runOnOperation() {
   // We define the specific operations, or dialects, that are legal targets for
   // this lowering. In our case, we are lowering to a combination of the
   // `Affine`, `Arith`, `Func`, and `MemRef` dialects.
-  target.addLegalDialect<AffineDialect, BuiltinDialect, arith::ArithDialect,
-                         func::FuncDialect, memref::MemRefDialect>();
+  target.addLegalDialect<affine::AffineDialect, BuiltinDialect,
+                         arith::ArithDialect, func::FuncDialect,
+                         memref::MemRefDialect>();
 
   // We also define the Toy dialect as Illegal so that the conversion will fail
   // if any of these operations are *not* converted. Given that we actually want

diff  --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp
index f19744b36e642..c58779c7c8a57 100644
--- a/mlir/examples/toy/Ch6/toyc.cpp
+++ b/mlir/examples/toy/Ch6/toyc.cpp
@@ -166,8 +166,8 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
 
     // Add optimizations if enabled.
     if (enableOpt) {
-      optPM.addPass(mlir::createLoopFusionPass());
-      optPM.addPass(mlir::createAffineScalarReplacementPass());
+      optPM.addPass(mlir::affine::createLoopFusionPass());
+      optPM.addPass(mlir::affine::createAffineScalarReplacementPass());
     }
   }
 

diff  --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
index a40353e3fd8bb..988175508ca1b 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
@@ -75,14 +75,15 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
   // loop induction variables.
   SmallVector<int64_t, 4> lowerBounds(tensorType.getRank(), /*Value=*/0);
   SmallVector<int64_t, 4> steps(tensorType.getRank(), /*Value=*/1);
-  buildAffineLoopNest(
+  affine::buildAffineLoopNest(
       rewriter, loc, lowerBounds, tensorType.getShape(), steps,
       [&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) {
         // Call the processing function with the rewriter, the memref operands,
         // and the loop induction variables. This function will return the value
         // to store at the current index.
         Value valueToStore = processIteration(nestedBuilder, operands, ivs);
-        nestedBuilder.create<AffineStoreOp>(loc, valueToStore, alloc, ivs);
+        nestedBuilder.create<affine::AffineStoreOp>(loc, valueToStore, alloc,
+                                                    ivs);
       });
 
   // Replace this operation with the generated alloc.
@@ -113,9 +114,9 @@ struct BinaryOpLowering : public ConversionPattern {
 
                      // Generate loads for the element of 'lhs' and 'rhs' at the
                      // inner loop.
-                     auto loadedLhs = builder.create<AffineLoadOp>(
+                     auto loadedLhs = builder.create<affine::AffineLoadOp>(
                          loc, binaryAdaptor.getLhs(), loopIvs);
-                     auto loadedRhs = builder.create<AffineLoadOp>(
+                     auto loadedRhs = builder.create<affine::AffineLoadOp>(
                          loc, binaryAdaptor.getRhs(), loopIvs);
 
                      // Create the binary operation performed on the loaded
@@ -174,7 +175,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
       // The last dimension is the base case of the recursion, at this point
       // we store the element at the given index.
       if (dimension == valueShape.size()) {
-        rewriter.create<AffineStoreOp>(
+        rewriter.create<affine::AffineStoreOp>(
             loc, rewriter.create<arith::ConstantOp>(loc, *valueIt++), alloc,
             llvm::ArrayRef(indices));
         return;
@@ -291,8 +292,8 @@ struct TransposeOpLowering : public ConversionPattern {
                      // Transpose the elements by generating a load from the
                      // reverse indices.
                      SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
-                     return builder.create<AffineLoadOp>(loc, input,
-                                                         reverseIvs);
+                     return builder.create<affine::AffineLoadOp>(loc, input,
+                                                                 reverseIvs);
                    });
     return success();
   }
@@ -313,7 +314,8 @@ struct ToyToAffineLoweringPass
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ToyToAffineLoweringPass)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, func::FuncDialect, memref::MemRefDialect>();
+    registry.insert<affine::AffineDialect, func::FuncDialect,
+                    memref::MemRefDialect>();
   }
   void runOnOperation() final;
 };
@@ -327,8 +329,9 @@ void ToyToAffineLoweringPass::runOnOperation() {
   // We define the specific operations, or dialects, that are legal targets for
   // this lowering. In our case, we are lowering to a combination of the
   // `Affine`, `Arith`, `Func`, and `MemRef` dialects.
-  target.addLegalDialect<AffineDialect, BuiltinDialect, arith::ArithDialect,
-                         func::FuncDialect, memref::MemRefDialect>();
+  target.addLegalDialect<affine::AffineDialect, BuiltinDialect,
+                         arith::ArithDialect, func::FuncDialect,
+                         memref::MemRefDialect>();
 
   // We also define the Toy dialect as Illegal so that the conversion will fail
   // if any of these operations are *not* converted. Given that we actually want

diff  --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp
index a4990dcd65a1a..7dec734ac4ec1 100644
--- a/mlir/examples/toy/Ch7/toyc.cpp
+++ b/mlir/examples/toy/Ch7/toyc.cpp
@@ -167,8 +167,8 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
 
     // Add optimizations if enabled.
     if (enableOpt) {
-      optPM.addPass(mlir::createLoopFusionPass());
-      optPM.addPass(mlir::createAffineScalarReplacementPass());
+      optPM.addPass(mlir::affine::createLoopFusionPass());
+      optPM.addPass(mlir::affine::createAffineScalarReplacementPass());
     }
   }
 

diff  --git a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
index 8a0d33cbf7a00..3850a0088575e 100644
--- a/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
+++ b/mlir/include/mlir/Conversion/AffineToStandard/AffineToStandard.h
@@ -12,16 +12,18 @@
 #include "mlir/Support/LLVM.h"
 
 namespace mlir {
-class AffineForOp;
 class Location;
 struct LogicalResult;
 class OpBuilder;
 class Pass;
 class RewritePattern;
+class RewritePatternSet;
 class Value;
 class ValueRange;
 
-class RewritePatternSet;
+namespace affine {
+class AffineForOp;
+} // namespace affine
 
 #define GEN_PASS_DECL_CONVERTAFFINETOSTANDARD
 #include "mlir/Conversion/Passes.h.inc"
@@ -37,11 +39,11 @@ void populateAffineToVectorConversionPatterns(RewritePatternSet &patterns);
 
 /// Emit code that computes the lower bound of the given affine loop using
 /// standard arithmetic operations.
-Value lowerAffineLowerBound(AffineForOp op, OpBuilder &builder);
+Value lowerAffineLowerBound(affine::AffineForOp op, OpBuilder &builder);
 
 /// Emit code that computes the upper bound of the given affine loop using
 /// standard arithmetic operations.
-Value lowerAffineUpperBound(AffineForOp op, OpBuilder &builder);
+Value lowerAffineUpperBound(affine::AffineForOp op, OpBuilder &builder);
 
 /// Lowers affine control flow operations (ForStmt, IfStmt and AffineApplyOp)
 /// to equivalent lower-level constructs (flow of basic blocks and arithmetic

diff  --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 31489a2245eea..e14b25a78e569 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -856,7 +856,7 @@ def ConvertAffineForToGPU
 def ConvertParallelLoopToGpu : Pass<"convert-parallel-loops-to-gpu"> {
   let summary = "Convert mapped scf.parallel ops to gpu launch operations";
   let constructor = "mlir::createParallelLoopToGpuPass()";
-  let dependentDialects = ["AffineDialect", "gpu::GPUDialect"];
+  let dependentDialects = ["affine::AffineDialect", "gpu::GPUDialect"];
 }
 
 //===----------------------------------------------------------------------===//
@@ -1033,7 +1033,7 @@ def ConvertVectorToGPU : Pass<"convert-vector-to-gpu"> {
                 "dialect";
   let constructor = "mlir::createConvertVectorToGPUPass()";
   let dependentDialects = [
-    "memref::MemRefDialect", "gpu::GPUDialect", "AffineDialect",
+    "memref::MemRefDialect", "gpu::GPUDialect", "affine::AffineDialect",
     "vector::VectorDialect", "nvgpu::NVGPUDialect"
   ];
 
@@ -1052,7 +1052,7 @@ def ConvertVectorToSCF : Pass<"convert-vector-to-scf"> {
                 "dialect";
   let constructor = "mlir::createConvertVectorToSCFPass()";
   let dependentDialects = [
-    "AffineDialect",
+    "affine::AffineDialect",
     "memref::MemRefDialect",
     "scf::SCFDialect",
     "tensor::TensorDialect"

diff  --git a/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h b/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h
index 83af4389af8cf..a180a12cad231 100644
--- a/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h
+++ b/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPU.h
@@ -11,7 +11,6 @@
 #include "mlir/Support/LLVM.h"
 
 namespace mlir {
-class AffineForOp;
 class ConversionTarget;
 struct LogicalResult;
 class MLIRContext;
@@ -19,6 +18,10 @@ class Value;
 class Operation;
 class RewritePatternSet;
 
+namespace affine {
+class AffineForOp;
+} // namespace affine
+
 namespace scf {
 class ForOp;
 } // namespace scf
@@ -37,7 +40,7 @@ class ForOp;
 // TODO: Consider removing this in favor of affine.for -> affine.parallel
 // detection followed by an affine.parallel -> scf.parallel -> gpu.launch
 // conversion
-LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
+LogicalResult convertAffineLoopNestToGPULaunch(affine::AffineForOp forOp,
                                                unsigned numBlockDims,
                                                unsigned numThreadDims);
 

diff  --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
index 826c740c90cff..a27583877b603 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
@@ -21,13 +21,14 @@
 #include <optional>
 
 namespace mlir {
+class Operation;
 
+namespace affine {
 class AffineApplyOp;
 class AffineForOp;
 class AffineValueMap;
 class FlatAffineRelation;
 class FlatAffineValueConstraints;
-class Operation;
 
 /// A description of a (parallelizable) reduction in an affine loop.
 struct LoopReduction {
@@ -191,6 +192,7 @@ void getDependenceComponents(
     AffineForOp forOp, unsigned maxLoopDepth,
     std::vector<SmallVector<DependenceComponent, 2>> *depCompsVec);
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_ANALYSIS_AFFINEANALYSIS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
index e59836444cc19..7c500f13895af 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineStructures.h
@@ -22,23 +22,24 @@
 #include <optional>
 
 namespace mlir {
-
-class AffineCondition;
-class AffineForOp;
-class AffineIfOp;
-class AffineParallelOp;
 class AffineMap;
-class AffineValueMap;
 class IntegerSet;
-class MLIRContext;
-class Value;
 class MemRefType;
+class MLIRContext;
 struct MutableAffineMap;
+class Value;
 
 namespace presburger {
 class MultiAffineFunction;
 } // namespace presburger
 
+namespace affine {
+class AffineCondition;
+class AffineForOp;
+class AffineIfOp;
+class AffineParallelOp;
+class AffineValueMap;
+
 /// FlatAffineValueConstraints is an extension of FlatLinearValueConstraints
 /// with helper functions for Affine dialect ops.
 class FlatAffineValueConstraints : public FlatLinearValueConstraints {
@@ -254,6 +255,7 @@ LogicalResult getRelationFromMap(AffineMap &map, FlatAffineRelation &rel);
 LogicalResult getRelationFromMap(const AffineValueMap &map,
                                  FlatAffineRelation &rel);
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_ANALYSIS_AFFINESTRUCTURES_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
index 07c01579930c3..0f39f03decdfa 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h
@@ -18,16 +18,17 @@
 #include <optional>
 
 namespace mlir {
-
 class AffineExpr;
-class AffineForOp;
 class AffineMap;
 class BlockArgument;
 class MemRefType;
-class NestedPattern;
 class Operation;
 class Value;
 
+namespace affine {
+class AffineForOp;
+class NestedPattern;
+
 /// Returns the trip count of the loop as an affine map with its corresponding
 /// operands if the latter is expressible as an affine expression, and nullptr
 /// otherwise. This method always succeeds as long as the lower bound is not a
@@ -83,6 +84,7 @@ bool isVectorizableLoopBody(AffineForOp loop, int *memRefDim,
 // the support.
 bool isOpwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts);
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_ANALYSIS_LOOPANALYSIS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h b/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h
index 48453ef7f6cad..108f86226646a 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/NestedMatcher.h
@@ -14,9 +14,10 @@
 #include "llvm/Support/Allocator.h"
 
 namespace mlir {
+class Operation;
 
+namespace affine {
 class NestedPattern;
-class Operation;
 
 /// An NestedPattern captures nested patterns in the IR.
 /// It is used in conjunction with a scoped NestedPatternContext which is an
@@ -191,6 +192,7 @@ bool isReductionLoop(Operation &op);
 bool isLoadOrStore(Operation &op);
 
 } // namespace matcher
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_ANALYSIS_NESTEDMATCHER_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h b/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h
index c16fce39484a6..f86ae7a14de67 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/Utils.h
@@ -22,15 +22,16 @@
 #include <optional>
 
 namespace mlir {
-
-class AffineForOp;
-class AffineValueMap;
 class Block;
 class Location;
-struct MemRefAccess;
 class Operation;
 class Value;
 
+namespace affine {
+class AffineForOp;
+class AffineValueMap;
+struct MemRefAccess;
+
 // LoopNestStateCollector walks loop nests and collects load and store
 // operations, and whether or not a region holding op other than ForOp and IfOp
 // was encountered in the loop nest.
@@ -576,6 +577,7 @@ FailureOr<AffineValueMap>
 simplifyConstrainedMinMaxOp(Operation *op,
                             FlatAffineValueConstraints constraints);
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_ANALYSIS_UTILS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h
index 9fd70bb814c4d..b9208ef937cf0 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h
@@ -17,8 +17,6 @@
 #include "mlir/IR/BuiltinTypes.h"
 #include "mlir/IR/OpDefinition.h"
 
-namespace mlir {
 #include "mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h.inc"
-} // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_IR_AFFINEMEMORYOPINTERFACES_H

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
index 497da9b2d0fe1..528421d14e8d9 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
@@ -20,62 +20,59 @@ def AffineReadOpInterface : OpInterface<"AffineReadOpInterface"> {
       Interface to query characteristics of read-like ops with affine
       restrictions.
   }];
+  let cppNamespace = "::mlir::affine";
 
   let methods = [
     InterfaceMethod<
       /*desc=*/"Returns the memref operand to read from.",
-      /*retTy=*/"Value",
+      /*retTy=*/"::mlir::Value",
       /*methodName=*/"getMemRef",
       /*args=*/(ins),
       /*methodBody*/[{}],
       /*defaultImplementation=*/ [{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getOperand(op.getMemRefOperandIndex());
+        return $_op.getOperand($_op.getMemRefOperandIndex());
       }]
     >,
     InterfaceMethod<
       /*desc=*/"Returns the type of the memref operand.",
-      /*retTy=*/"MemRefType",
+      /*retTy=*/"::mlir::MemRefType",
       /*methodName=*/"getMemRefType",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getMemRef().getType().template cast<MemRefType>();
+        return $_op.getMemRef().getType().template cast<::mlir::MemRefType>();
       }]
     >,
     InterfaceMethod<
       /*desc=*/"Returns affine map operands.",
-      /*retTy=*/"Operation::operand_range",
+      /*retTy=*/"::mlir::Operation::operand_range",
       /*methodName=*/"getMapOperands",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return llvm::drop_begin(op.getOperands(), 1);
+        return llvm::drop_begin($_op.getOperands(), 1);
       }]
     >,
     InterfaceMethod<
       /*desc=*/[{
         Returns the affine map used to index the memref for this operation.
       }],
-      /*retTy=*/"AffineMap",
+      /*retTy=*/"::mlir::AffineMap",
       /*methodName=*/"getAffineMap",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getAffineMapAttr().getValue();
+        return $_op.getAffineMapAttr().getValue();
       }]
     >,
     InterfaceMethod<
       /*desc=*/"Returns the value read by this operation.",
-      /*retTy=*/"Value",
+      /*retTy=*/"::mlir::Value",
       /*methodName=*/"getValue",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        return cast<ConcreteOp>(this->getOperation());
+        return $_op;
       }]
     >,
   ];
@@ -86,63 +83,59 @@ def AffineWriteOpInterface : OpInterface<"AffineWriteOpInterface"> {
       Interface to query characteristics of write-like ops with affine
       restrictions.
   }];
+  let cppNamespace = "::mlir::affine";
 
   let methods = [
     InterfaceMethod<
       /*desc=*/"Returns the memref operand to write to.",
-      /*retTy=*/"Value",
+      /*retTy=*/"::mlir::Value",
       /*methodName=*/"getMemRef",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getOperand(op.getMemRefOperandIndex());
+        return $_op.getOperand($_op.getMemRefOperandIndex());
       }]
     >,
     InterfaceMethod<
       /*desc=*/"Returns the type of the memref operand.",
-      /*retTy=*/"MemRefType",
+      /*retTy=*/"::mlir::MemRefType",
       /*methodName=*/"getMemRefType",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getMemRef().getType().template cast<MemRefType>();
+        return $_op.getMemRef().getType().template cast<::mlir::MemRefType>();
       }]
     >,
     InterfaceMethod<
       /*desc=*/"Returns affine map operands.",
-      /*retTy=*/"Operation::operand_range",
+      /*retTy=*/"::mlir::Operation::operand_range",
       /*methodName=*/"getMapOperands",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return llvm::drop_begin(op.getOperands(), 2);
+        return llvm::drop_begin($_op.getOperands(), 2);
       }]
     >,
     InterfaceMethod<
       /*desc=*/[{
         Returns the affine map used to index the memref for this operation.
       }],
-      /*retTy=*/"AffineMap",
+      /*retTy=*/"::mlir::AffineMap",
       /*methodName=*/"getAffineMap",
       /*args=*/(ins),
       /*methodName=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getAffineMapAttr().getValue();
+        return $_op.getAffineMapAttr().getValue();
       }]
     >,
     InterfaceMethod<
       /*desc=*/"Returns the value to store.",
-      /*retTy=*/"Value",
+      /*retTy=*/"::mlir::Value",
       /*methodName=*/"getValueToStore",
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        return op.getOperand(op.getStoredValOperandIndex());
+        return $_op.getOperand($_op.getStoredValOperandIndex());
       }]
     >,
   ];
@@ -155,20 +148,21 @@ def AffineMapAccessInterface : OpInterface<"AffineMapAccessInterface"> {
       memref operand.  The memref argument given to this interface much match
       one of those memref operands.
   }];
+  let cppNamespace = "::mlir::affine";
 
   let methods = [
     InterfaceMethod<
       /*desc=*/"Returns the AffineMapAttr associated with 'memref'.",
-      /*retTy=*/"NamedAttribute",
+      /*retTy=*/"::mlir::NamedAttribute",
       /*methodName=*/"getAffineMapAttrForMemRef",
-      /*args=*/(ins "Value":$memref),
+      /*args=*/(ins "::mlir::Value":$memref),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
-        assert(memref == op.getMemRef() &&
+        assert(memref == $_op.getMemRef() &&
                "Expected memref argument to match memref operand");
-        return {StringAttr::get(op.getContext(), op.getMapAttrStrName()),
-                op.getAffineMapAttr()};
+        return {::mlir::StringAttr::get(
+                    $_op.getContext(), $_op.getMapAttrStrName()),
+                    $_op.getAffineMapAttr()};
       }]
     >,
   ];

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
index 3d46bfd016a8c..008d398a080d6 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
@@ -22,13 +22,12 @@
 #include "mlir/Interfaces/LoopLikeInterface.h"
 
 namespace mlir {
+namespace affine {
+
 class AffineApplyOp;
 class AffineBound;
 class AffineValueMap;
 
-/// TODO: These should be renamed if they are on the mlir namespace.
-///       Ideally, they should go in a mlir::affine:: namespace.
-
 /// A utility function to check if a value is defined at the top level of an
 /// op with trait `AffineScope` or is a region argument for such an op. A value
 /// of index type defined at the top level is always a valid symbol for all its
@@ -438,13 +437,18 @@ SmallVector<Value, 4> applyMapToValues(OpBuilder &b, Location loc,
 /// argument.
 void fullyComposeAffineMapAndOperands(AffineMap *map,
                                       SmallVectorImpl<Value> *operands);
+
+} // namespace affine
 } // namespace mlir
+
 #include "mlir/Dialect/Affine/IR/AffineOpsDialect.h.inc"
 
 #define GET_OP_CLASSES
 #include "mlir/Dialect/Affine/IR/AffineOps.h.inc"
 
 namespace mlir {
+namespace affine {
+
 /// Returns true if the provided value is the induction variable of an
 /// AffineForOp.
 bool isAffineForInductionVar(Value val);
@@ -537,6 +541,7 @@ class AffineBound {
   friend class AffineForOp;
 };
 
+} // namespace affine
 } // namespace mlir
 
 #endif

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index d622d1e0a13b7..1ad10cc976dd5 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -21,7 +21,7 @@ include "mlir/Interfaces/SideEffectInterfaces.td"
 
 def Affine_Dialect : Dialect {
   let name = "affine";
-  let cppNamespace = "mlir";
+  let cppNamespace = "::mlir::affine";
   let hasConstantMaterializer = 1;
   let dependentDialects = ["arith::ArithDialect"];
 }

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h b/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h
index 05c810ec674a1..8439930a87467 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineValueMap.h
@@ -18,6 +18,7 @@
 #include "mlir/IR/Value.h"
 
 namespace mlir {
+namespace affine {
 
 /// An AffineValueMap is an affine map plus its ML value operands and
 /// results for analysis purposes. The structure is still a tree form that is
@@ -89,6 +90,7 @@ class AffineValueMap {
   SmallVector<Value, 4> results;
 };
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_IR_AFFINEVALUEMAP_H

diff  --git a/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h b/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h
index 486d142e0a20e..284d394ddceff 100644
--- a/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h
+++ b/mlir/include/mlir/Dialect/Affine/LoopFusionUtils.h
@@ -21,9 +21,11 @@
 #include "llvm/ADT/SmallVector.h"
 
 namespace mlir {
+class Operation;
+
+namespace affine {
 class AffineForOp;
 struct ComputationSliceState;
-class Operation;
 
 // TODO: Extend this module to include utility functions for querying fusion
 // cost/storage reduction, and for performing the loop fusion transformation.
@@ -165,6 +167,8 @@ bool getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
 void gatherProducerConsumerMemrefs(ArrayRef<Operation *> srcOps,
                                    ArrayRef<Operation *> dstOps,
                                    DenseSet<Value> &producerConsumerMemrefs);
+
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_LOOPFUSIONUTILS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/LoopUtils.h b/mlir/include/mlir/Dialect/Affine/LoopUtils.h
index 523accfd821ab..723a262f24acc 100644
--- a/mlir/include/mlir/Dialect/Affine/LoopUtils.h
+++ b/mlir/include/mlir/Dialect/Affine/LoopUtils.h
@@ -22,10 +22,8 @@
 #include <optional>
 
 namespace mlir {
-class AffineForOp;
 class AffineMap;
 class LoopLikeOpInterface;
-struct MemRefRegion;
 class OpBuilder;
 class Value;
 class ValueRange;
@@ -39,6 +37,10 @@ class ForOp;
 class ParallelOp;
 } // namespace scf
 
+namespace affine {
+class AffineForOp;
+struct MemRefRegion;
+
 /// Unrolls this for operation completely if the trip count is known to be
 /// constant. Returns failure otherwise.
 LogicalResult loopUnrollFull(AffineForOp forOp);
@@ -345,6 +347,7 @@ LogicalResult coalescePerfectlyNestedLoops(LoopOpTy op) {
   return result;
 }
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_LOOPUTILS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Passes.h b/mlir/include/mlir/Dialect/Affine/Passes.h
index 9d253fb7660d0..61f24255f305f 100644
--- a/mlir/include/mlir/Dialect/Affine/Passes.h
+++ b/mlir/include/mlir/Dialect/Affine/Passes.h
@@ -23,6 +23,7 @@ namespace func {
 class FuncOp;
 } // namespace func
 
+namespace affine {
 class AffineForOp;
 
 /// Fusion mode to attempt. The default mode `Greedy` does both
@@ -123,6 +124,7 @@ std::unique_ptr<Pass> createAffineExpandIndexOpsPass();
 #define GEN_PASS_REGISTRATION
 #include "mlir/Dialect/Affine/Passes.h.inc"
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_PASSES_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Passes.td b/mlir/include/mlir/Dialect/Affine/Passes.td
index b3efdc5872b3d..891a6661ca87b 100644
--- a/mlir/include/mlir/Dialect/Affine/Passes.td
+++ b/mlir/include/mlir/Dialect/Affine/Passes.td
@@ -17,7 +17,7 @@ include "mlir/Pass/PassBase.td"
 
 def AffineDataCopyGeneration : Pass<"affine-data-copy-generate", "func::FuncOp"> {
   let summary = "Generate explicit copying for affine memory operations";
-  let constructor = "mlir::createAffineDataCopyGenerationPass()";
+  let constructor = "mlir::affine::createAffineDataCopyGenerationPass()";
   let dependentDialects = ["memref::MemRefDialect"];
   let options = [
     Option<"fastMemoryCapacity", "fast-mem-capacity", "uint64_t",
@@ -152,7 +152,7 @@ def AffineLoopFusion : Pass<"affine-loop-fusion"> {
     }
     ```
   }];
-  let constructor = "mlir::createLoopFusionPass()";
+  let constructor = "mlir::affine::createLoopFusionPass()";
   let options = [
     Option<"computeToleranceThreshold", "fusion-compute-tolerance", "double",
            /*default=*/"0.30f", "Fractional increase in additional computation "
@@ -166,12 +166,12 @@ def AffineLoopFusion : Pass<"affine-loop-fusion"> {
     Option<"maximalFusion", "fusion-maximal", "bool", /*default=*/"false",
            "Enables maximal loop fusion">,
     Option<"affineFusionMode", "mode", "enum FusionMode",
-           "mlir::FusionMode::Greedy", "fusion mode to attempt",
-           "llvm::cl::values(clEnumValN(mlir::FusionMode::Greedy,"
+           "mlir::affine::FusionMode::Greedy", "fusion mode to attempt",
+           "llvm::cl::values(clEnumValN(mlir::affine::FusionMode::Greedy,"
            " \"greedy\", \"Perform greedy (both producer-consumer and sibling)  fusion\"), "
-           "clEnumValN( mlir::FusionMode::ProducerConsumer, "
+           "clEnumValN( mlir::affine::FusionMode::ProducerConsumer, "
            "\"producer\", \"Perform only producer-consumer fusion\"), "
-           "clEnumValN( mlir::FusionMode::Sibling, "
+           "clEnumValN( mlir::affine::FusionMode::Sibling, "
            "\"sibling\", \"Perform only sibling fusion\"))">,
     ];
   let dependentDialects = ["memref::MemRefDialect"];
@@ -180,12 +180,12 @@ def AffineLoopFusion : Pass<"affine-loop-fusion"> {
 def AffineLoopInvariantCodeMotion
     : Pass<"affine-loop-invariant-code-motion", "func::FuncOp"> {
   let summary = "Hoist loop invariant instructions outside of affine loops";
-  let constructor = "mlir::createAffineLoopInvariantCodeMotionPass()";
+  let constructor = "mlir::affine::createAffineLoopInvariantCodeMotionPass()";
 }
 
 def AffineLoopTiling : Pass<"affine-loop-tile", "func::FuncOp"> {
   let summary = "Tile affine loop nests";
-  let constructor = "mlir::createLoopTilingPass()";
+  let constructor = "mlir::affine::createLoopTilingPass()";
   let options = [
     Option<"cacheSizeInKiB", "cache-size", "uint64_t", /*default=*/"512",
            "Set size of cache to tile for in KiB (default: 512)">,
@@ -201,7 +201,7 @@ def AffineLoopTiling : Pass<"affine-loop-tile", "func::FuncOp"> {
 
 def AffineLoopUnroll : Pass<"affine-loop-unroll", "func::FuncOp"> {
   let summary = "Unroll affine loops";
-  let constructor = "mlir::createLoopUnrollPass()";
+  let constructor = "mlir::affine::createLoopUnrollPass()";
   let options = [
     Option<"unrollFactor", "unroll-factor", "unsigned", /*default=*/"4",
            "Use this unroll factor for all loops being unrolled">,
@@ -221,7 +221,7 @@ def AffineLoopUnroll : Pass<"affine-loop-unroll", "func::FuncOp"> {
 
 def AffineLoopUnrollAndJam : Pass<"affine-loop-unroll-jam", "func::FuncOp"> {
   let summary = "Unroll and jam affine loops";
-  let constructor = "mlir::createLoopUnrollAndJamPass()";
+  let constructor = "mlir::affine::createLoopUnrollAndJamPass()";
   let options = [
     Option<"unrollJamFactor", "unroll-jam-factor", "unsigned",
            /*default=*/"4",
@@ -295,7 +295,7 @@ def AffinePipelineDataTransfer
     }
     ```
   }];
-  let constructor = "mlir::createPipelineDataTransferPass()";
+  let constructor = "mlir::affine::createPipelineDataTransferPass()";
 }
 
 def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> {
@@ -341,7 +341,7 @@ def AffineScalarReplacement : Pass<"affine-scalrep", "func::FuncOp"> {
     }
     ```
   }];
-  let constructor = "mlir::createAffineScalarReplacementPass()";
+  let constructor = "mlir::affine::createAffineScalarReplacementPass()";
 }
 
 def AffineVectorize : Pass<"affine-super-vectorize", "func::FuncOp"> {
@@ -369,7 +369,7 @@ def AffineVectorize : Pass<"affine-super-vectorize", "func::FuncOp"> {
 
 def AffineParallelize : Pass<"affine-parallelize", "func::FuncOp"> {
   let summary = "Convert affine.for ops into 1-D affine.parallel";
-  let constructor = "mlir::createAffineParallelizePass()";
+  let constructor = "mlir::affine::createAffineParallelizePass()";
   let options = [
     Option<"maxNested", "max-nested", "unsigned", /*default=*/"-1u",
            "Maximum number of nested parallel loops to produce. "
@@ -382,7 +382,7 @@ def AffineParallelize : Pass<"affine-parallelize", "func::FuncOp"> {
 
 def AffineLoopNormalize : Pass<"affine-loop-normalize", "func::FuncOp"> {
   let summary = "Apply normalization transformations to affine loop-like ops";
-  let constructor = "mlir::createAffineLoopNormalizePass()";
+  let constructor = "mlir::affine::createAffineLoopNormalizePass()";
   let options = [
     Option<"promoteSingleIter", "promote-single-iter", "bool",
            /*default=*/"true", "Promote single iteration loops">,
@@ -392,19 +392,19 @@ def AffineLoopNormalize : Pass<"affine-loop-normalize", "func::FuncOp"> {
 def LoopCoalescing : Pass<"affine-loop-coalescing", "func::FuncOp"> {
   let summary = "Coalesce nested loops with independent bounds into a single "
                 "loop";
-  let constructor = "mlir::createLoopCoalescingPass()";
+  let constructor = "mlir::affine::createLoopCoalescingPass()";
   let dependentDialects = ["arith::ArithDialect"];
 }
 
 def SimplifyAffineStructures : Pass<"affine-simplify-structures", "func::FuncOp"> {
   let summary = "Simplify affine expressions in maps/sets and normalize "
                 "memrefs";
-  let constructor = "mlir::createSimplifyAffineStructuresPass()";
+  let constructor = "mlir::affine::createSimplifyAffineStructuresPass()";
 }
 
 def AffineExpandIndexOps : Pass<"affine-expand-index-ops"> {
   let summary = "Lower affine operations operating on indices into more fundamental operations";
-  let constructor = "mlir::createAffineExpandIndexOpsPass()";
+  let constructor = "mlir::affine::createAffineExpandIndexOpsPass()";
 }
 
 #endif // MLIR_DIALECT_AFFINE_PASSES

diff  --git a/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h b/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h
index fbb3868ccbe92..4a27f3035e6be 100644
--- a/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h
+++ b/mlir/include/mlir/Dialect/Affine/TransformOps/AffineTransformOps.h
@@ -15,12 +15,11 @@
 #include "mlir/IR/OpImplementation.h"
 
 namespace mlir {
-class AffineForOp;
 namespace func {
 class FuncOp;
 } // namespace func
 namespace affine {
-class ForOp;
+class AffineForOp;
 } // namespace affine
 } // namespace mlir
 

diff  --git a/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h
index 70928813bab87..02938d1f7e6c4 100644
--- a/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Affine/Transforms/Transforms.h
@@ -18,7 +18,6 @@
 #include "mlir/Support/LogicalResult.h"
 
 namespace mlir {
-class AffineApplyOp;
 class Location;
 class OpBuilder;
 class OpFoldResult;
@@ -30,6 +29,9 @@ namespace presburger {
 enum class BoundType;
 } // namespace presburger
 
+namespace affine {
+class AffineApplyOp;
+
 /// Populate patterns that expand affine index operations into more fundamental
 /// operations (not necessarily restricted to Affine dialect).
 void populateAffineExpandIndexOpsPatterns(RewritePatternSet &patterns);
@@ -83,6 +85,7 @@ FailureOr<OpFoldResult> reifyShapedValueDimBound(
     ValueBoundsConstraintSet::StopConditionFn stopCondition = nullptr,
     bool closedUB = false);
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_TRANSFORMS_TRANSFORMS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/Utils.h b/mlir/include/mlir/Dialect/Affine/Utils.h
index b6e60b064cca4..8e54a02a89105 100644
--- a/mlir/include/mlir/Dialect/Affine/Utils.h
+++ b/mlir/include/mlir/Dialect/Affine/Utils.h
@@ -18,10 +18,6 @@
 #include <optional>
 
 namespace mlir {
-
-class AffineForOp;
-class AffineIfOp;
-class AffineParallelOp;
 class DominanceInfo;
 class Operation;
 class PostDominanceInfo;
@@ -36,6 +32,11 @@ class AllocOp;
 
 struct LogicalResult;
 
+namespace affine {
+class AffineForOp;
+class AffineIfOp;
+class AffineParallelOp;
+
 using ReductionLoopMap = DenseMap<Operation *, SmallVector<LoopReduction, 2>>;
 
 /// Replaces a parallel affine.for op with a 1-d affine.parallel op. `forOp`'s
@@ -384,6 +385,7 @@ struct AffineBuilder {
   Location loc;
 };
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_UTILS_H

diff  --git a/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h b/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h
index 7a55fe97c064e..a4ec4cec08873 100644
--- a/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h
+++ b/mlir/include/mlir/Dialect/Affine/ViewLikeInterfaceUtils.h
@@ -16,6 +16,8 @@
 namespace mlir {
 class RewriterBase;
 
+namespace affine {
+
 /// Fills the `combinedOffsets`, `combinedSizes` and `combinedStrides` to use
 /// when combining a producer slice **into** a consumer slice.
 ///
@@ -99,6 +101,7 @@ void resolveSizesIntoOpWithSizes(
     const llvm::SmallBitVector &rankReducedSourceDims,
     SmallVectorImpl<OpFoldResult> &resolvedSizes);
 
+} // namespace affine
 } // namespace mlir
 
 #endif // MLIR_DIALECT_AFFINE_VIEWLIKEINTERFACEUTILS_H

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
index 280bfdb380177..764992dfa9b5d 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td
@@ -26,7 +26,7 @@ def Bufferization_Dialect : Dialect {
     deallocation](/docs/BufferDeallocationInternals/).
   }];
   let dependentDialects = [
-    "AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect"
+    "affine::AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect"
   ];
 
   let extraClassDeclaration = [{

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
index 1ee9d2ac241f6..c2d567f631666 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td
@@ -37,7 +37,7 @@ def Linalg_Dialect : Dialect {
   let cppNamespace = "::mlir::linalg";
   let dependentDialects = [
     "arith::ArithDialect",
-    "AffineDialect",
+    "affine::AffineDialect",
     "math::MathDialect",
     "memref::MemRefDialect",
     "tensor::TensorDialect",

diff  --git a/mlir/include/mlir/Dialect/Linalg/Passes.td b/mlir/include/mlir/Dialect/Linalg/Passes.td
index efef04f6d44f2..be4c870d4129c 100644
--- a/mlir/include/mlir/Dialect/Linalg/Passes.td
+++ b/mlir/include/mlir/Dialect/Linalg/Passes.td
@@ -37,7 +37,7 @@ def LinalgFoldUnitExtentDims : Pass<"linalg-fold-unit-extent-dims", ""> {
            "Generate rank-reducing slices instead of reassociative reshapes">
   ];
   let dependentDialects = [
-    "linalg::LinalgDialect", "AffineDialect", "memref::MemRefDialect"
+    "linalg::LinalgDialect", "affine::AffineDialect", "memref::MemRefDialect"
   ];
 }
 
@@ -45,7 +45,7 @@ def LinalgElementwiseOpFusion : Pass<"linalg-fuse-elementwise-ops"> {
   let summary = "Fuse elementwise operations on tensors";
   let constructor = "mlir::createLinalgElementwiseOpFusionPass()";
   let dependentDialects = [
-    "AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"
+    "affine::AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"
   ];
 }
 
@@ -68,7 +68,7 @@ def LinalgLowerToAffineLoops : Pass<"convert-linalg-to-affine-loops", "func::Fun
                 "loops";
   let constructor = "mlir::createConvertLinalgToAffineLoopsPass()";
   let dependentDialects = [
-    "AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"];
+    "affine::AffineDialect", "linalg::LinalgDialect", "memref::MemRefDialect"];
 }
 
 def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "func::FuncOp"> {
@@ -77,7 +77,7 @@ def LinalgLowerToLoops : Pass<"convert-linalg-to-loops", "func::FuncOp"> {
   let dependentDialects = [
     "linalg::LinalgDialect",
     "scf::SCFDialect",
-    "AffineDialect"
+    "affine::AffineDialect"
   ];
 }
 
@@ -87,7 +87,7 @@ def LinalgLowerToParallelLoops
                 "loops";
   let constructor = "mlir::createConvertLinalgToParallelLoopsPass()";
   let dependentDialects = [
-    "AffineDialect",
+    "affine::AffineDialect",
     "linalg::LinalgDialect",
     "memref::MemRefDialect",
     "scf::SCFDialect"
@@ -98,7 +98,7 @@ def LinalgBufferize : Pass<"linalg-bufferize", "func::FuncOp"> {
   let summary = "Bufferize the linalg dialect";
   let constructor = "mlir::createLinalgBufferizePass()";
   let dependentDialects = [
-    "AffineDialect",
+    "affine::AffineDialect",
     "bufferization::BufferizationDialect",
     "linalg::LinalgDialect",
     "memref::MemRefDialect",

diff  --git a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
index 828a0625e594c..0d8b89fdf9539 100644
--- a/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/Linalg/Utils/Utils.h
@@ -18,10 +18,13 @@
 
 namespace mlir {
 class AffineExpr;
-class AffineForOp;
 class AffineMap;
 class PatternRewriter;
 
+namespace affine {
+class AffineForOp;
+} // namespace affine
+
 namespace tensor {
 class ExtractSliceOp;
 } // namespace tensor

diff  --git a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
index f339c7c346742..89f3db5357370 100644
--- a/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/MemRef/Transforms/Passes.td
@@ -24,7 +24,7 @@ def FoldMemRefAliasOps : Pass<"fold-memref-alias-ops"> {
   }];
   let constructor = "mlir::memref::createFoldMemRefAliasOpsPass()";
   let dependentDialects = [
-      "AffineDialect", "memref::MemRefDialect", "vector::VectorDialect"
+      "affine::AffineDialect", "memref::MemRefDialect", "vector::VectorDialect"
   ];
 }
 
@@ -156,7 +156,7 @@ def NormalizeMemRefs : Pass<"normalize-memrefs", "ModuleOp"> {
   ```
   }];
   let constructor = "mlir::memref::createNormalizeMemRefsPass()";
-  let dependentDialects = ["AffineDialect"];
+  let dependentDialects = ["affine::AffineDialect"];
 }
 
 def ResolveRankedShapeTypeResultDims :
@@ -184,7 +184,7 @@ def ResolveShapedTypeResultDims : Pass<"resolve-shaped-type-result-dims"> {
   }];
   let constructor = "mlir::memref::createResolveShapedTypeResultDimsPass()";
   let dependentDialects = [
-    "AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect"
+    "affine::AffineDialect", "memref::MemRefDialect", "tensor::TensorDialect"
   ];
 }
 
@@ -199,7 +199,7 @@ def ExpandStridedMetadata : Pass<"expand-strided-metadata"> {
   }];
   let constructor = "mlir::memref::createExpandStridedMetadataPass()";
   let dependentDialects = [
-      "AffineDialect", "memref::MemRefDialect"
+      "affine::AffineDialect", "memref::MemRefDialect"
   ];
 }
 #endif // MLIR_DIALECT_MEMREF_TRANSFORMS_PASSES

diff  --git a/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td b/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td
index 16eb415c8cb61..bbc673f44977a 100644
--- a/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/SCF/Transforms/Passes.td
@@ -24,7 +24,7 @@ def SCFForLoopCanonicalization
     : Pass<"scf-for-loop-canonicalization"> {
   let summary = "Canonicalize operations within scf.for loop bodies";
   let constructor = "mlir::createSCFForLoopCanonicalizationPass()";
-  let dependentDialects = ["AffineDialect", "tensor::TensorDialect",
+  let dependentDialects = ["affine::AffineDialect", "tensor::TensorDialect",
                            "memref::MemRefDialect"];
 }
 
@@ -37,7 +37,7 @@ def SCFForLoopPeeling : Pass<"scf-for-loop-peeling"> {
            "Do not peel loops inside of the last, partial iteration of another "
            "already peeled loop.">
   ];
-  let dependentDialects = ["AffineDialect"];
+  let dependentDialects = ["affine::AffineDialect"];
 }
 
 def SCFForLoopSpecialization : Pass<"scf-for-loop-specialization"> {
@@ -109,7 +109,7 @@ def SCFParallelLoopTiling : Pass<"scf-parallel-loop-tiling"> {
            "Perform tiling with fixed upper bound with inbound check "
            "inside the internal loops">
   ];
-  let dependentDialects = ["AffineDialect"];
+  let dependentDialects = ["affine::AffineDialect"];
 }
 
 def SCFForLoopRangeFolding : Pass<"scf-for-loop-range-folding"> {

diff  --git a/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h b/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h
index 88c93dbfd7f18..5022cbf898ec1 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/AffineCanonicalizationUtils.h
@@ -18,9 +18,7 @@
 #include "mlir/Support/LogicalResult.h"
 
 namespace mlir {
-class AffineApplyOp;
 class AffineMap;
-class FlatAffineValueConstraints;
 struct LogicalResult;
 class Operation;
 class OpFoldResult;
@@ -28,6 +26,10 @@ class RewriterBase;
 class Value;
 class ValueRange;
 
+namespace affine {
+class FlatAffineValueConstraints;
+} // namespace affine
+
 namespace scf {
 class IfOp;
 
@@ -45,7 +47,7 @@ LogicalResult matchForLikeLoop(Value iv, OpFoldResult &lb, OpFoldResult &ub,
 
 /// Populate the given constraint set with induction variable constraints of a
 /// "for" loop with the given range and step.
-LogicalResult addLoopRangeConstraints(FlatAffineValueConstraints &cstr,
+LogicalResult addLoopRangeConstraints(affine::FlatAffineValueConstraints &cstr,
                                       Value iv, OpFoldResult lb,
                                       OpFoldResult ub, OpFoldResult step);
 

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
index 91126f497b423..3ea68f9da6700 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
@@ -70,7 +70,7 @@ def SparsificationPass : Pass<"sparsification", "ModuleOp"> {
   }];
   let constructor = "mlir::createSparsificationPass()";
   let dependentDialects = [
-    "AffineDialect",
+    "affine::AffineDialect",
     "arith::ArithDialect",
     "bufferization::BufferizationDialect",
     "LLVM::LLVMDialect",

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td
index fe49f8db9810d..1231c0a67bc30 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td
+++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorBase.td
@@ -48,7 +48,7 @@ def Tensor_Dialect : Dialect {
   let hasCanonicalizer = 1;
   let hasConstantMaterializer = 1;
   let dependentDialects = [
-    "AffineDialect",
+    "affine::AffineDialect",
     "arith::ArithDialect",
     "complex::ComplexDialect",
   ];

diff  --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
index b4673599a5def..4cc3844f29120 100644
--- a/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Passes.td
@@ -23,7 +23,7 @@ def FoldTensorSubsetOps : Pass<"fold-tensor-subset-ops"> {
   }];
   let constructor = "mlir::tensor::createFoldTensorSubsetOpsPass()";
   let dependentDialects = [
-      "AffineDialect", "tensor::TensorDialect", "vector::VectorDialect"
+      "affine::AffineDialect", "tensor::TensorDialect", "vector::VectorDialect"
   ];
 }
 

diff  --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 3c9f1f41419ad..66f8cbbf4bdb6 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -18,8 +18,6 @@
 namespace mlir {
 
 // Forward declarations.
-class AffineApplyOp;
-class AffineForOp;
 class AffineMap;
 class Block;
 class Location;
@@ -30,6 +28,11 @@ class Value;
 class VectorType;
 class VectorTransferOpInterface;
 
+namespace affine {
+class AffineApplyOp;
+class AffineForOp;
+} // namespace affine
+
 namespace vector {
 /// Helper function that creates a memref::DimOp or tensor::DimOp depending on
 /// the type of `source`.

diff  --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h
index 4df45931d3a84..578594eee9dff 100644
--- a/mlir/include/mlir/InitAllDialects.h
+++ b/mlir/include/mlir/InitAllDialects.h
@@ -86,7 +86,7 @@ namespace mlir {
 inline void registerAllDialects(DialectRegistry &registry) {
   // clang-format off
   registry.insert<acc::OpenACCDialect,
-                  AffineDialect,
+                  affine::AffineDialect,
                   arith::ArithDialect,
                   amdgpu::AMDGPUDialect,
                   amx::AMXDialect,

diff  --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h
index e517c17129430..216fc12323718 100644
--- a/mlir/include/mlir/InitAllPasses.h
+++ b/mlir/include/mlir/InitAllPasses.h
@@ -55,7 +55,7 @@ inline void registerAllPasses() {
   registerConversionPasses();
 
   // Dialect passes
-  registerAffinePasses();
+  affine::registerAffinePasses();
   registerAsyncPasses();
   arith::registerArithPasses();
   bufferization::registerBufferizationPasses();

diff  --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
index ebff3cc9a0c6f..debb7e804b652 100644
--- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
+++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
@@ -30,6 +30,7 @@ namespace mlir {
 } // namespace mlir
 
 using namespace mlir;
+using namespace mlir::affine;
 using namespace mlir::vector;
 
 /// Given a range of values, emit the code that reduces them with "min" or "max"

diff  --git a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
index 57419b7f1073a..f94da68e87b34 100644
--- a/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
+++ b/mlir/lib/Conversion/LinalgToStandard/LinalgToStandard.cpp
@@ -141,8 +141,9 @@ struct ConvertLinalgToStandardPass
 void ConvertLinalgToStandardPass::runOnOperation() {
   auto module = getOperation();
   ConversionTarget target(getContext());
-  target.addLegalDialect<AffineDialect, arith::ArithDialect, func::FuncDialect,
-                         memref::MemRefDialect, scf::SCFDialect>();
+  target.addLegalDialect<affine::AffineDialect, arith::ArithDialect,
+                         func::FuncDialect, memref::MemRefDialect,
+                         scf::SCFDialect>();
   target.addLegalOp<ModuleOp, func::FuncOp, func::ReturnOp>();
   RewritePatternSet patterns(&getContext());
   populateLinalgToStandardConversionPatterns(patterns);

diff  --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index 54ae158983fa7..c1a57d37a20e0 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -36,6 +36,7 @@
 #define DEBUG_TYPE "loops-to-gpu"
 
 using namespace mlir;
+using namespace mlir::affine;
 using namespace mlir::scf;
 
 // Name of internal attribute to mark visited operations during conversion.

diff  --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp
index 48957dfa377cb..572753986b42e 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp
@@ -42,7 +42,7 @@ struct ForLoopMapper : public impl::ConvertAffineForToGPUBase<ForLoopMapper> {
   void runOnOperation() override {
     for (Operation &op : llvm::make_early_inc_range(
              getOperation().getFunctionBody().getOps())) {
-      if (auto forOp = dyn_cast<AffineForOp>(&op)) {
+      if (auto forOp = dyn_cast<affine::AffineForOp>(&op)) {
         if (failed(convertAffineLoopNestToGPULaunch(forOp, numBlockDims,
                                                     numThreadDims)))
           signalPassFailure();

diff  --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
index 7d643fff3cad1..5de3bef3af9d6 100644
--- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
+++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
@@ -66,7 +66,7 @@ static void getXferIndices(RewriterBase &rewriter, TransferOpType xferOp,
       SmallVector<Value, 3> dims(dimValues.begin(), dimValues.end());
       dims.push_back(prevIdx);
       AffineExpr d0 = rewriter.getAffineDimExpr(offsetMap.getNumDims());
-      indices[dim.getPosition()] = makeComposedAffineApply(
+      indices[dim.getPosition()] = affine::makeComposedAffineApply(
           rewriter, loc, d0 + offsetMap.getResult(offsetsIdx++), dims);
       continue;
     }

diff  --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index ec2e2aa4c0624..1a47dd1610bec 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -102,7 +102,8 @@ static void getXferIndices(OpBuilder &b, OpTy xferOp, Value iv,
     AffineExpr d0, d1;
     bindDims(xferOp.getContext(), d0, d1);
     Value offset = adaptor.getIndices()[*dim];
-    indices[*dim] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv});
+    indices[*dim] =
+        affine::makeComposedAffineApply(b, loc, d0 + d1, {offset, iv});
   }
 }
 
@@ -178,7 +179,8 @@ static Value generateInBoundsCheck(
     AffineExpr d0, d1;
     bindDims(xferOp.getContext(), d0, d1);
     Value base = xferOp.getIndices()[*dim];
-    Value memrefIdx = makeComposedAffineApply(b, loc, d0 + d1, {base, iv});
+    Value memrefIdx =
+        affine::makeComposedAffineApply(b, loc, d0 + d1, {base, iv});
     cond = lb.create<arith::CmpIOp>(arith::CmpIPredicate::sgt, memrefDim,
                                     memrefIdx);
   }
@@ -1111,7 +1113,8 @@ get1dMemrefIndices(OpBuilder &b, OpTy xferOp, Value iv,
     AffineExpr d0, d1;
     bindDims(xferOp.getContext(), d0, d1);
     Value offset = memrefIndices[dim];
-    memrefIndices[dim] = makeComposedAffineApply(b, loc, d0 + d1, {offset, iv});
+    memrefIndices[dim] =
+        affine::makeComposedAffineApply(b, loc, d0 + d1, {offset, iv});
     return dim;
   }
 

diff  --git a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
index da8f0883d7d5d..9b0ad3c119d04 100644
--- a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
@@ -31,6 +31,7 @@
 #define DEBUG_TYPE "affine-analysis"
 
 using namespace mlir;
+using namespace affine;
 using namespace presburger;
 
 /// Get the value that is being reduced by `pos`-th reduction in the loop if
@@ -78,7 +79,7 @@ static Value getSupportedReduction(AffineForOp forOp, unsigned pos,
 }
 
 /// Populate `supportedReductions` with descriptors of the supported reductions.
-void mlir::getSupportedReductions(
+void mlir::affine::getSupportedReductions(
     AffineForOp forOp, SmallVectorImpl<LoopReduction> &supportedReductions) {
   unsigned numIterArgs = forOp.getNumIterOperands();
   if (numIterArgs == 0)
@@ -94,8 +95,8 @@ void mlir::getSupportedReductions(
 /// Returns true if `forOp' is a parallel loop. If `parallelReductions` is
 /// provided, populates it with descriptors of the parallelizable reductions and
 /// treats them as not preventing parallelization.
-bool mlir::isLoopParallel(AffineForOp forOp,
-                          SmallVectorImpl<LoopReduction> *parallelReductions) {
+bool mlir::affine::isLoopParallel(
+    AffineForOp forOp, SmallVectorImpl<LoopReduction> *parallelReductions) {
   unsigned numIterArgs = forOp.getNumIterOperands();
 
   // Loop is not parallel if it has SSA loop-carried dependences and reduction
@@ -132,7 +133,7 @@ static bool isLocallyDefined(Value v, Operation *enclosingOp) {
   return viewOp && isLocallyDefined(viewOp.getViewSource(), enclosingOp);
 }
 
-bool mlir::isLoopMemoryParallel(AffineForOp forOp) {
+bool mlir::affine::isLoopMemoryParallel(AffineForOp forOp) {
   // Any memref-typed iteration arguments are treated as serializing.
   if (llvm::any_of(forOp.getResultTypes(),
                    [](Type type) { return type.isa<BaseMemRefType>(); }))
@@ -186,7 +187,7 @@ bool mlir::isLoopMemoryParallel(AffineForOp forOp) {
 /// and ending at operands which are not defined by AffineApplyOps.
 // TODO: Add a method to AffineApplyOp which forward substitutes the
 // AffineApplyOp into any user AffineApplyOps.
-void mlir::getReachableAffineApplyOps(
+void mlir::affine::getReachableAffineApplyOps(
     ArrayRef<Value> operands, SmallVectorImpl<Operation *> &affineApplyOps) {
   struct State {
     // The ssa value for this node in the DFS traversal.
@@ -236,8 +237,8 @@ void mlir::getReachableAffineApplyOps(
 // FlatAffineValueConstraints. (For eg., by using iv - lb % step = 0 and/or by
 // introducing a method in FlatAffineValueConstraints
 // setExprStride(ArrayRef<int64_t> expr, int64_t stride)
-LogicalResult mlir::getIndexSet(MutableArrayRef<Operation *> ops,
-                                FlatAffineValueConstraints *domain) {
+LogicalResult mlir::affine::getIndexSet(MutableArrayRef<Operation *> ops,
+                                        FlatAffineValueConstraints *domain) {
   SmallVector<Value, 4> indices;
   SmallVector<Operation *, 8> loopOps;
   size_t numDims = 0;
@@ -594,7 +595,7 @@ void MemRefAccess::getAccessMap(AffineValueMap *accessMap) const {
 //
 //
 // TODO: Support AffineExprs mod/floordiv/ceildiv.
-DependenceResult mlir::checkMemrefAccessDependence(
+DependenceResult mlir::affine::checkMemrefAccessDependence(
     const MemRefAccess &srcAccess, const MemRefAccess &dstAccess,
     unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints,
     SmallVector<DependenceComponent, 2> *dependenceComponents, bool allowRAR) {
@@ -671,7 +672,7 @@ DependenceResult mlir::checkMemrefAccessDependence(
 
 /// Gathers dependence components for dependences between all ops in loop nest
 /// rooted at 'forOp' at loop depths in range [1, maxLoopDepth].
-void mlir::getDependenceComponents(
+void mlir::affine::getDependenceComponents(
     AffineForOp forOp, unsigned maxLoopDepth,
     std::vector<SmallVector<DependenceComponent, 2>> *depCompsVec) {
   // Collect all load and store ops in loop nest rooted at 'forOp'.

diff  --git a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
index f087dca20f34c..fc567b9bc645c 100644
--- a/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
@@ -31,6 +31,7 @@
 #define DEBUG_TYPE "affine-structures"
 
 using namespace mlir;
+using namespace affine;
 using namespace presburger;
 
 
@@ -489,8 +490,8 @@ void FlatAffineRelation::removeVarRange(VarKind kind, unsigned varStart,
     numRangeDims -= intersectRangeLHS - intersectRangeRHS;
 }
 
-LogicalResult mlir::getRelationFromMap(AffineMap &map,
-                                       FlatAffineRelation &rel) {
+LogicalResult mlir::affine::getRelationFromMap(AffineMap &map,
+                                               FlatAffineRelation &rel) {
   // Get flattened affine expressions.
   std::vector<SmallVector<int64_t, 8>> flatExprs;
   FlatAffineValueConstraints localVarCst;
@@ -525,8 +526,8 @@ LogicalResult mlir::getRelationFromMap(AffineMap &map,
   return success();
 }
 
-LogicalResult mlir::getRelationFromMap(const AffineValueMap &map,
-                                       FlatAffineRelation &rel) {
+LogicalResult mlir::affine::getRelationFromMap(const AffineValueMap &map,
+                                               FlatAffineRelation &rel) {
 
   AffineMap affineMap = map.getAffineMap();
   if (failed(getRelationFromMap(affineMap, rel)))

diff  --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index 6b028b7691ddd..9db1e998bb165 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -28,13 +28,14 @@
 #include <type_traits>
 
 using namespace mlir;
+using namespace mlir::affine;
 
 /// Returns the trip count of the loop as an affine expression if the latter is
 /// expressible as an affine expression, and nullptr otherwise. The trip count
 /// expression is simplified before returning. This method only utilizes map
 /// composition to construct lower and upper bounds before computing the trip
 /// count expressions.
-void mlir::getTripCountMapAndOperands(
+void mlir::affine::getTripCountMapAndOperands(
     AffineForOp forOp, AffineMap *tripCountMap,
     SmallVectorImpl<Value> *tripCountOperands) {
   MLIRContext *context = forOp.getContext();
@@ -83,7 +84,7 @@ void mlir::getTripCountMapAndOperands(
 /// otherwise. This method uses affine expression analysis (in turn using
 /// getTripCount) and is able to determine constant trip count in non-trivial
 /// cases.
-std::optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
+std::optional<uint64_t> mlir::affine::getConstantTripCount(AffineForOp forOp) {
   SmallVector<Value, 4> operands;
   AffineMap map;
   getTripCountMapAndOperands(forOp, &map, &operands);
@@ -109,7 +110,7 @@ std::optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
 /// Returns the greatest known integral divisor of the trip count. Affine
 /// expression analysis is used (indirectly through getTripCount), and
 /// this method is thus able to determine non-trivial divisors.
-uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
+uint64_t mlir::affine::getLargestDivisorOfTripCount(AffineForOp forOp) {
   SmallVector<Value, 4> operands;
   AffineMap map;
   getTripCountMapAndOperands(forOp, &map, &operands);
@@ -183,7 +184,8 @@ static bool isAccessIndexInvariant(Value iv, Value index) {
   return !composeOp.getAffineValueMap().isFunctionOf(0, iv);
 }
 
-DenseSet<Value> mlir::getInvariantAccesses(Value iv, ArrayRef<Value> indices) {
+DenseSet<Value> mlir::affine::getInvariantAccesses(Value iv,
+                                                   ArrayRef<Value> indices) {
   DenseSet<Value> res;
   for (auto val : indices) {
     if (isAccessIndexInvariant(iv, val)) {
@@ -335,8 +337,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
   return true;
 }
 
-bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim,
-                                  NestedPattern &vectorTransferMatcher) {
+bool mlir::affine::isVectorizableLoopBody(
+    AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) {
   *memRefDim = -1;
   VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
     auto load = dyn_cast<AffineLoadOp>(op);
@@ -358,8 +360,8 @@ bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim,
   return isVectorizableLoopBodyWithOpCond(loop, fun, vectorTransferMatcher);
 }
 
-bool mlir::isVectorizableLoopBody(AffineForOp loop,
-                                  NestedPattern &vectorTransferMatcher) {
+bool mlir::affine::isVectorizableLoopBody(
+    AffineForOp loop, NestedPattern &vectorTransferMatcher) {
   return isVectorizableLoopBodyWithOpCond(loop, nullptr, vectorTransferMatcher);
 }
 
@@ -368,7 +370,8 @@ bool mlir::isVectorizableLoopBody(AffineForOp loop,
 /// 'def' and all its uses have the same shift factor.
 // TODO: extend this to check for memory-based dependence violation when we have
 // the support.
-bool mlir::isOpwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
+bool mlir::affine::isOpwiseShiftValid(AffineForOp forOp,
+                                      ArrayRef<uint64_t> shifts) {
   auto *forBody = forOp.getBody();
   assert(shifts.size() == forBody->getOperations().size());
 

diff  --git a/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp b/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp
index 0ba6f59d02508..be13a89c7ab4f 100644
--- a/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/NestedMatcher.cpp
@@ -17,6 +17,7 @@
 #include "llvm/Support/raw_ostream.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 llvm::BumpPtrAllocator *&NestedMatch::allocator() {
   thread_local llvm::BumpPtrAllocator *allocator = nullptr;
@@ -130,6 +131,7 @@ static bool isAffineForOp(Operation &op) { return isa<AffineForOp>(op); }
 static bool isAffineIfOp(Operation &op) { return isa<AffineIfOp>(op); }
 
 namespace mlir {
+namespace affine {
 namespace matcher {
 
 NestedPattern Op(FilterFunctionType filter) {
@@ -176,4 +178,5 @@ bool isLoadOrStore(Operation &op) {
 }
 
 } // namespace matcher
+} // namespace affine
 } // namespace mlir

diff  --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
index 247b3786031ce..438296f25096f 100644
--- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
@@ -28,6 +28,7 @@
 #define DEBUG_TYPE "analysis-utils"
 
 using namespace mlir;
+using namespace affine;
 using namespace presburger;
 
 using llvm::SmallDenseMap;
@@ -501,7 +502,8 @@ void MemRefDependenceGraph::print(raw_ostream &os) const {
   }
 }
 
-void mlir::getAffineForIVs(Operation &op, SmallVectorImpl<AffineForOp> *loops) {
+void mlir::affine::getAffineForIVs(Operation &op,
+                                   SmallVectorImpl<AffineForOp> *loops) {
   auto *currOp = op.getParentOp();
   AffineForOp currAffineForOp;
   // Traverse up the hierarchy collecting all 'affine.for' operation while
@@ -514,8 +516,8 @@ void mlir::getAffineForIVs(Operation &op, SmallVectorImpl<AffineForOp> *loops) {
   std::reverse(loops->begin(), loops->end());
 }
 
-void mlir::getEnclosingAffineOps(Operation &op,
-                                 SmallVectorImpl<Operation *> *ops) {
+void mlir::affine::getEnclosingAffineOps(Operation &op,
+                                         SmallVectorImpl<Operation *> *ops) {
   ops->clear();
   Operation *currOp = op.getParentOp();
 
@@ -1063,7 +1065,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
 }
 
 std::optional<int64_t>
-mlir::getMemRefIntOrFloatEltSizeInBytes(MemRefType memRefType) {
+mlir::affine::getMemRefIntOrFloatEltSizeInBytes(MemRefType memRefType) {
   auto elementType = memRefType.getElementType();
 
   unsigned sizeInBits;
@@ -1113,7 +1115,7 @@ std::optional<int64_t> MemRefRegion::getRegionSize() {
 /// into account size of the vector as well.
 //  TODO: improve/complete this when we have target data.
 std::optional<uint64_t>
-mlir::getIntOrFloatMemRefSizeInBytes(MemRefType memRefType) {
+mlir::affine::getIntOrFloatMemRefSizeInBytes(MemRefType memRefType) {
   if (!memRefType.hasStaticShape())
     return std::nullopt;
   auto elementType = memRefType.getElementType();
@@ -1130,8 +1132,8 @@ mlir::getIntOrFloatMemRefSizeInBytes(MemRefType memRefType) {
 }
 
 template <typename LoadOrStoreOp>
-LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOp loadOrStoreOp,
-                                            bool emitError) {
+LogicalResult mlir::affine::boundCheckLoadOrStoreOp(LoadOrStoreOp loadOrStoreOp,
+                                                    bool emitError) {
   static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
                                 AffineWriteOpInterface>::value,
                 "argument should be either a AffineReadOpInterface or a "
@@ -1186,9 +1188,11 @@ LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOp loadOrStoreOp,
 
 // Explicitly instantiate the template so that the compiler knows we need them!
 template LogicalResult
-mlir::boundCheckLoadOrStoreOp(AffineReadOpInterface loadOp, bool emitError);
+mlir::affine::boundCheckLoadOrStoreOp(AffineReadOpInterface loadOp,
+                                      bool emitError);
 template LogicalResult
-mlir::boundCheckLoadOrStoreOp(AffineWriteOpInterface storeOp, bool emitError);
+mlir::affine::boundCheckLoadOrStoreOp(AffineWriteOpInterface storeOp,
+                                      bool emitError);
 
 // Returns in 'positions' the Block positions of 'op' in each ancestor
 // Block from the Block containing operation, stopping at 'limitBlock'.
@@ -1250,7 +1254,7 @@ static LogicalResult addMissingLoopIVBounds(SmallPtrSet<Value, 8> &ivs,
 
 /// Returns the innermost common loop depth for the set of operations in 'ops'.
 // TODO: Move this to LoopUtils.
-unsigned mlir::getInnermostCommonLoopDepth(
+unsigned mlir::affine::getInnermostCommonLoopDepth(
     ArrayRef<Operation *> ops, SmallVectorImpl<AffineForOp> *surroundingLoops) {
   unsigned numOps = ops.size();
   assert(numOps > 0 && "Expected at least one operation");
@@ -1282,10 +1286,10 @@ unsigned mlir::getInnermostCommonLoopDepth(
 /// then verifies if it is valid. Returns 'SliceComputationResult::Success' if
 /// union was computed correctly, an appropriate failure otherwise.
 SliceComputationResult
-mlir::computeSliceUnion(ArrayRef<Operation *> opsA, ArrayRef<Operation *> opsB,
-                        unsigned loopDepth, unsigned numCommonLoops,
-                        bool isBackwardSlice,
-                        ComputationSliceState *sliceUnion) {
+mlir::affine::computeSliceUnion(ArrayRef<Operation *> opsA,
+                                ArrayRef<Operation *> opsB, unsigned loopDepth,
+                                unsigned numCommonLoops, bool isBackwardSlice,
+                                ComputationSliceState *sliceUnion) {
   // Compute the union of slice bounds between all pairs in 'opsA' and
   // 'opsB' in 'sliceUnionCst'.
   FlatAffineValueConstraints sliceUnionCst;
@@ -1322,8 +1326,9 @@ mlir::computeSliceUnion(ArrayRef<Operation *> opsA, ArrayRef<Operation *> opsB,
 
       // Compute slice bounds for 'srcAccess' and 'dstAccess'.
       ComputationSliceState tmpSliceState;
-      mlir::getComputationSliceState(i, j, &dependenceConstraints, loopDepth,
-                                     isBackwardSlice, &tmpSliceState);
+      mlir::affine::getComputationSliceState(i, j, &dependenceConstraints,
+                                             loopDepth, isBackwardSlice,
+                                             &tmpSliceState);
 
       if (sliceUnionCst.getNumDimAndSymbolVars() == 0) {
         // Initialize 'sliceUnionCst' with the bounds computed in previous step.
@@ -1465,7 +1470,7 @@ static std::optional<uint64_t> getConstDifference(AffineMap lbMap,
 // nest surrounding represented by slice loop bounds in 'slice'. Returns true
 // on success, false otherwise (if a non-constant trip count was encountered).
 // TODO: Make this work with non-unit step loops.
-bool mlir::buildSliceTripCountMap(
+bool mlir::affine::buildSliceTripCountMap(
     const ComputationSliceState &slice,
     llvm::SmallDenseMap<Operation *, uint64_t, 8> *tripCountMap) {
   unsigned numSrcLoopIVs = slice.ivs.size();
@@ -1503,7 +1508,7 @@ bool mlir::buildSliceTripCountMap(
 }
 
 // Return the number of iterations in the given slice.
-uint64_t mlir::getSliceIterationCount(
+uint64_t mlir::affine::getSliceIterationCount(
     const llvm::SmallDenseMap<Operation *, uint64_t, 8> &sliceTripCountMap) {
   uint64_t iterCount = 1;
   for (const auto &count : sliceTripCountMap) {
@@ -1517,7 +1522,7 @@ const char *const kSliceFusionBarrierAttrName = "slice_fusion_barrier";
 // 'dependenceConstraints' at depth greater than 'loopDepth', and computes slice
 // bounds in 'sliceState' which represent the one loop nest's IVs in terms of
 // the other loop nest's IVs, symbols and constants (using 'isBackwardsSlice').
-void mlir::getComputationSliceState(
+void mlir::affine::getComputationSliceState(
     Operation *depSourceOp, Operation *depSinkOp,
     FlatAffineValueConstraints *dependenceConstraints, unsigned loopDepth,
     bool isBackwardSlice, ComputationSliceState *sliceState) {
@@ -1631,10 +1636,9 @@ void mlir::getComputationSliceState(
 // entire destination index set. Subtract out the dependent destination
 // iterations from destination index set and check for emptiness --- this is one
 // solution.
-AffineForOp
-mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst,
-                                     unsigned dstLoopDepth,
-                                     ComputationSliceState *sliceState) {
+AffineForOp mlir::affine::insertBackwardComputationSlice(
+    Operation *srcOpInst, Operation *dstOpInst, unsigned dstLoopDepth,
+    ComputationSliceState *sliceState) {
   // Get loop nest surrounding src operation.
   SmallVector<AffineForOp, 4> srcLoopIVs;
   getAffineForIVs(*srcOpInst, &srcLoopIVs);
@@ -1713,7 +1717,7 @@ bool MemRefAccess::isStore() const {
 
 /// Returns the nesting depth of this statement, i.e., the number of loops
 /// surrounding this statement.
-unsigned mlir::getNestingDepth(Operation *op) {
+unsigned mlir::affine::getNestingDepth(Operation *op) {
   Operation *currOp = op;
   unsigned depth = 0;
   while ((currOp = currOp->getParentOp())) {
@@ -1741,7 +1745,7 @@ bool MemRefAccess::operator==(const MemRefAccess &rhs) const {
                       [](AffineExpr e) { return e == 0; });
 }
 
-void mlir::getAffineIVs(Operation &op, SmallVectorImpl<Value> &ivs) {
+void mlir::affine::getAffineIVs(Operation &op, SmallVectorImpl<Value> &ivs) {
   auto *currOp = op.getParentOp();
   AffineForOp currAffineForOp;
   // Traverse up the hierarchy collecting all 'affine.for' and affine.parallel
@@ -1758,7 +1762,8 @@ void mlir::getAffineIVs(Operation &op, SmallVectorImpl<Value> &ivs) {
 
 /// Returns the number of surrounding loops common to 'loopsA' and 'loopsB',
 /// where each lists loops from outer-most to inner-most in loop nest.
-unsigned mlir::getNumCommonSurroundingLoops(Operation &a, Operation &b) {
+unsigned mlir::affine::getNumCommonSurroundingLoops(Operation &a,
+                                                    Operation &b) {
   SmallVector<Value, 4> loopsA, loopsB;
   getAffineIVs(a, loopsA);
   getAffineIVs(b, loopsB);
@@ -1817,8 +1822,8 @@ static std::optional<int64_t> getMemoryFootprintBytes(Block &block,
   return totalSizeInBytes;
 }
 
-std::optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
-                                                     int memorySpace) {
+std::optional<int64_t> mlir::affine::getMemoryFootprintBytes(AffineForOp forOp,
+                                                             int memorySpace) {
   auto *forInst = forOp.getOperation();
   return ::getMemoryFootprintBytes(
       *forInst->getBlock(), Block::iterator(forInst),
@@ -1826,7 +1831,7 @@ std::optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
 }
 
 /// Returns whether a loop is parallel and contains a reduction loop.
-bool mlir::isLoopParallelAndContainsReduction(AffineForOp forOp) {
+bool mlir::affine::isLoopParallelAndContainsReduction(AffineForOp forOp) {
   SmallVector<LoopReduction> reductions;
   if (!isLoopParallel(forOp, &reductions))
     return false;
@@ -1835,8 +1840,8 @@ bool mlir::isLoopParallelAndContainsReduction(AffineForOp forOp) {
 
 /// Returns in 'sequentialLoops' all sequential loops in loop nest rooted
 /// at 'forOp'.
-void mlir::getSequentialLoops(AffineForOp forOp,
-                              llvm::SmallDenseSet<Value, 8> *sequentialLoops) {
+void mlir::affine::getSequentialLoops(
+    AffineForOp forOp, llvm::SmallDenseSet<Value, 8> *sequentialLoops) {
   forOp->walk([&](Operation *op) {
     if (auto innerFor = dyn_cast<AffineForOp>(op))
       if (!isLoopParallel(innerFor))
@@ -1844,7 +1849,7 @@ void mlir::getSequentialLoops(AffineForOp forOp,
   });
 }
 
-IntegerSet mlir::simplifyIntegerSet(IntegerSet set) {
+IntegerSet mlir::affine::simplifyIntegerSet(IntegerSet set) {
   FlatAffineValueConstraints fac(set);
   if (fac.isEmpty())
     return IntegerSet::getEmptySet(set.getNumDims(), set.getNumSymbols(),
@@ -1930,9 +1935,8 @@ static AffineMap addConstToResults(AffineMap map, int64_t val) {
 //    ... |     0 |       0 |  -1 |           ... |   ... |               = 0
 //      0 |     0 |       1 |  -1 |             0 |    -1 |              >= 0
 //
-FailureOr<AffineValueMap>
-mlir::simplifyConstrainedMinMaxOp(Operation *op,
-                                  FlatAffineValueConstraints constraints) {
+FailureOr<AffineValueMap> mlir::affine::simplifyConstrainedMinMaxOp(
+    Operation *op, FlatAffineValueConstraints constraints) {
   bool isMin = isa<AffineMinOp>(op);
   assert((isMin || isa<AffineMaxOp>(op)) && "expect AffineMin/MaxOp");
   MLIRContext *ctx = op->getContext();
@@ -2032,6 +2036,6 @@ mlir::simplifyConstrainedMinMaxOp(Operation *op,
                               newMap.getNumDims(), newMap.getNumSymbols());
     }
   }
-  mlir::canonicalizeMapAndOperands(&newMap, &newOperands);
+  affine::canonicalizeMapAndOperands(&newMap, &newOperands);
   return AffineValueMap(newMap, newOperands);
 }

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp b/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp
index 33c1b3376d0a4..4890d4f298c2e 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineMemoryOpInterfaces.cpp
@@ -9,6 +9,7 @@
 #include "mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 //===----------------------------------------------------------------------===//
 // Affine Memory Op Interfaces

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 90a42eced78f5..0ffb258f3f488 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -26,6 +26,7 @@
 #include <optional>
 
 using namespace mlir;
+using namespace mlir::affine;
 
 #define DEBUG_TYPE "affine-ops"
 
@@ -35,7 +36,7 @@ using namespace mlir;
 /// `region` or is an argument of `region`. A value of index type defined at the
 /// top level of a `AffineScope` region is always a valid symbol for all
 /// uses in that region.
-bool mlir::isTopLevelValue(Value value, Region *region) {
+bool mlir::affine::isTopLevelValue(Value value, Region *region) {
   if (auto arg = value.dyn_cast<BlockArgument>())
     return arg.getParentRegion() == region;
   return value.getDefiningOp()->getParentRegion() == region;
@@ -231,7 +232,7 @@ Operation *AffineDialect::materializeConstant(OpBuilder &builder,
 /// op with trait `AffineScope`. If the value is defined in an unlinked region,
 /// conservatively assume it is not top-level. A value of index type defined at
 /// the top level is always a valid symbol.
-bool mlir::isTopLevelValue(Value value) {
+bool mlir::affine::isTopLevelValue(Value value) {
   if (auto arg = value.dyn_cast<BlockArgument>()) {
     // The block owning the argument may be unlinked, e.g. when the surrounding
     // region has not yet been attached to an Op, at which point the parent Op
@@ -246,7 +247,7 @@ bool mlir::isTopLevelValue(Value value) {
 
 /// Returns the closest region enclosing `op` that is held by an operation with
 /// trait `AffineScope`; `nullptr` if there is no such region.
-Region *mlir::getAffineScope(Operation *op) {
+Region *mlir::affine::getAffineScope(Operation *op) {
   auto *curOp = op;
   while (auto *parentOp = curOp->getParentOp()) {
     if (parentOp->hasTrait<OpTrait::AffineScope>())
@@ -261,7 +262,7 @@ Region *mlir::getAffineScope(Operation *op) {
 // *) It is valid as a symbol.
 // *) It is an induction variable.
 // *) It is the result of affine apply operation with dimension id arguments.
-bool mlir::isValidDim(Value value) {
+bool mlir::affine::isValidDim(Value value) {
   // The value must be an index type.
   if (!value.getType().isIndex())
     return false;
@@ -281,7 +282,7 @@ bool mlir::isValidDim(Value value) {
 // *) It is valid as a symbol.
 // *) It is an induction variable.
 // *) It is the result of an affine apply operation with dimension id operands.
-bool mlir::isValidDim(Value value, Region *region) {
+bool mlir::affine::isValidDim(Value value, Region *region) {
   // The value must be an index type.
   if (!value.getType().isIndex())
     return false;
@@ -358,7 +359,7 @@ static bool isDimOpValidSymbol(ShapedDimOpInterface dimOp, Region *region) {
 // *) It is the result of an affine.apply operation with symbol operands.
 // *) It is a result of the dim op on a memref whose corresponding size is a
 //    valid symbol.
-bool mlir::isValidSymbol(Value value) {
+bool mlir::affine::isValidSymbol(Value value) {
   if (!value)
     return false;
 
@@ -387,7 +388,7 @@ bool mlir::isValidSymbol(Value value) {
 /// If `region` is null, conservatively assume the symbol definition scope does
 /// not exist and only accept the values that would be symbols regardless of
 /// the surrounding region structure, i.e. the first three cases above.
-bool mlir::isValidSymbol(Value value, Region *region) {
+bool mlir::affine::isValidSymbol(Value value, Region *region) {
   // The value must be an index type.
   if (!value.getType().isIndex())
     return false;
@@ -447,9 +448,8 @@ static void printDimAndSymbolList(Operation::operand_iterator begin,
 }
 
 /// Parses dimension and symbol list and returns true if parsing failed.
-ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser,
-                                        SmallVectorImpl<Value> &operands,
-                                        unsigned &numDims) {
+ParseResult mlir::affine::parseDimAndSymbolList(
+    OpAsmParser &parser, SmallVectorImpl<Value> &operands, unsigned &numDims) {
   SmallVector<OpAsmParser::UnresolvedOperand, 8> opInfos;
   if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren))
     return failure();
@@ -541,7 +541,7 @@ LogicalResult AffineApplyOp::verify() {
 // its operands are valid dimension ids.
 bool AffineApplyOp::isValidDim() {
   return llvm::all_of(getOperands(),
-                      [](Value op) { return mlir::isValidDim(op); });
+                      [](Value op) { return affine::isValidDim(op); });
 }
 
 // The result of the affine apply operation can be used as a dimension id if all
@@ -556,14 +556,14 @@ bool AffineApplyOp::isValidDim(Region *region) {
 // operands are symbols.
 bool AffineApplyOp::isValidSymbol() {
   return llvm::all_of(getOperands(),
-                      [](Value op) { return mlir::isValidSymbol(op); });
+                      [](Value op) { return affine::isValidSymbol(op); });
 }
 
 // The result of the affine apply operation can be used as a symbol in `region`
 // if all its operands are symbols in `region`.
 bool AffineApplyOp::isValidSymbol(Region *region) {
   return llvm::all_of(getOperands(), [&](Value operand) {
-    return mlir::isValidSymbol(operand, region);
+    return affine::isValidSymbol(operand, region);
   });
 }
 
@@ -1071,8 +1071,8 @@ static void composeAffineMapAndOperands(AffineMap *map,
   *map = simplifyAffineMap(*map);
 }
 
-void mlir::fullyComposeAffineMapAndOperands(AffineMap *map,
-                                            SmallVectorImpl<Value> *operands) {
+void mlir::affine::fullyComposeAffineMapAndOperands(
+    AffineMap *map, SmallVectorImpl<Value> *operands) {
   while (llvm::any_of(*operands, [](Value v) {
     return isa_and_nonnull<AffineApplyOp>(v.getDefiningOp());
   })) {
@@ -1168,9 +1168,9 @@ createOrFold(OpBuilder &b, Location loc, ValueRange operands,
   return op->getResult(0);
 }
 
-AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
-                                            AffineMap map,
-                                            ValueRange operands) {
+AffineApplyOp mlir::affine::makeComposedAffineApply(OpBuilder &b, Location loc,
+                                                    AffineMap map,
+                                                    ValueRange operands) {
   AffineMap normalizedMap = map;
   SmallVector<Value, 8> normalizedOperands(operands.begin(), operands.end());
   composeAffineMapAndOperands(&normalizedMap, &normalizedOperands);
@@ -1178,8 +1178,9 @@ AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
   return b.create<AffineApplyOp>(loc, normalizedMap, normalizedOperands);
 }
 
-AffineApplyOp mlir::makeComposedAffineApply(OpBuilder &b, Location loc,
-                                            AffineExpr e, ValueRange values) {
+AffineApplyOp mlir::affine::makeComposedAffineApply(OpBuilder &b, Location loc,
+                                                    AffineExpr e,
+                                                    ValueRange values) {
   return makeComposedAffineApply(
       b, loc, AffineMap::inferFromExprList(ArrayRef<AffineExpr>{e}).front(),
       values);
@@ -1216,8 +1217,9 @@ static void composeMultiResultAffineMap(AffineMap &map,
 }
 
 OpFoldResult
-mlir::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map,
-                                    ArrayRef<OpFoldResult> operands) {
+mlir::affine::makeComposedFoldedAffineApply(OpBuilder &b, Location loc,
+                                            AffineMap map,
+                                            ArrayRef<OpFoldResult> operands) {
   assert(map.getNumResults() == 1 && "building affine.apply with !=1 result");
 
   SmallVector<Operation *> constants;
@@ -1234,14 +1236,16 @@ mlir::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map,
 }
 
 OpFoldResult
-mlir::makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineExpr expr,
-                                    ArrayRef<OpFoldResult> operands) {
+mlir::affine::makeComposedFoldedAffineApply(OpBuilder &b, Location loc,
+                                            AffineExpr expr,
+                                            ArrayRef<OpFoldResult> operands) {
   return makeComposedFoldedAffineApply(
       b, loc, AffineMap::inferFromExprList(ArrayRef<AffineExpr>{expr}).front(),
       operands);
 }
 
-SmallVector<OpFoldResult> mlir::makeComposedFoldedMultiResultAffineApply(
+SmallVector<OpFoldResult>
+mlir::affine::makeComposedFoldedMultiResultAffineApply(
     OpBuilder &b, Location loc, AffineMap map,
     ArrayRef<OpFoldResult> operands) {
   return llvm::to_vector(llvm::map_range(
@@ -1251,8 +1255,8 @@ SmallVector<OpFoldResult> mlir::makeComposedFoldedMultiResultAffineApply(
       }));
 }
 
-Value mlir::makeComposedAffineMin(OpBuilder &b, Location loc, AffineMap map,
-                                  ValueRange operands) {
+Value mlir::affine::makeComposedAffineMin(OpBuilder &b, Location loc,
+                                          AffineMap map, ValueRange operands) {
   SmallVector<Value> allOperands = llvm::to_vector(operands);
   composeMultiResultAffineMap(map, allOperands);
   return b.createOrFold<AffineMinOp>(loc, b.getIndexType(), map, allOperands);
@@ -1277,14 +1281,16 @@ static OpFoldResult makeComposedFoldedMinMax(OpBuilder &b, Location loc,
 }
 
 OpFoldResult
-mlir::makeComposedFoldedAffineMin(OpBuilder &b, Location loc, AffineMap map,
-                                  ArrayRef<OpFoldResult> operands) {
+mlir::affine::makeComposedFoldedAffineMin(OpBuilder &b, Location loc,
+                                          AffineMap map,
+                                          ArrayRef<OpFoldResult> operands) {
   return makeComposedFoldedMinMax<AffineMinOp>(b, loc, map, operands);
 }
 
 OpFoldResult
-mlir::makeComposedFoldedAffineMax(OpBuilder &b, Location loc, AffineMap map,
-                                  ArrayRef<OpFoldResult> operands) {
+mlir::affine::makeComposedFoldedAffineMax(OpBuilder &b, Location loc,
+                                          AffineMap map,
+                                          ArrayRef<OpFoldResult> operands) {
   return makeComposedFoldedMinMax<AffineMaxOp>(b, loc, map, operands);
 }
 
@@ -1299,8 +1305,9 @@ static Value createFoldedComposedAffineApply(OpBuilder &b, Location loc,
   return b.createOrFold<AffineApplyOp>(loc, map, operands);
 }
 
-SmallVector<Value, 4> mlir::applyMapToValues(OpBuilder &b, Location loc,
-                                             AffineMap map, ValueRange values) {
+SmallVector<Value, 4> mlir::affine::applyMapToValues(OpBuilder &b, Location loc,
+                                                     AffineMap map,
+                                                     ValueRange values) {
   SmallVector<Value, 4> res;
   res.reserve(map.getNumResults());
   unsigned numDims = map.getNumDims(), numSym = map.getNumSymbols();
@@ -1436,13 +1443,13 @@ static void canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet,
   *operands = resultOperands;
 }
 
-void mlir::canonicalizeMapAndOperands(AffineMap *map,
-                                      SmallVectorImpl<Value> *operands) {
+void mlir::affine::canonicalizeMapAndOperands(
+    AffineMap *map, SmallVectorImpl<Value> *operands) {
   canonicalizeMapOrSetAndOperands<AffineMap>(map, operands);
 }
 
-void mlir::canonicalizeSetAndOperands(IntegerSet *set,
-                                      SmallVectorImpl<Value> *operands) {
+void mlir::affine::canonicalizeSetAndOperands(
+    IntegerSet *set, SmallVectorImpl<Value> *operands) {
   canonicalizeMapOrSetAndOperands<IntegerSet>(set, operands);
 }
 
@@ -2518,19 +2525,19 @@ Speculation::Speculatability AffineForOp::getSpeculatability() {
 
 /// Returns true if the provided value is the induction variable of a
 /// AffineForOp.
-bool mlir::isAffineForInductionVar(Value val) {
+bool mlir::affine::isAffineForInductionVar(Value val) {
   return getForInductionVarOwner(val) != AffineForOp();
 }
 
-bool mlir::isAffineParallelInductionVar(Value val) {
+bool mlir::affine::isAffineParallelInductionVar(Value val) {
   return getAffineParallelInductionVarOwner(val) != nullptr;
 }
 
-bool mlir::isAffineInductionVar(Value val) {
+bool mlir::affine::isAffineInductionVar(Value val) {
   return isAffineForInductionVar(val) || isAffineParallelInductionVar(val);
 }
 
-AffineForOp mlir::getForInductionVarOwner(Value val) {
+AffineForOp mlir::affine::getForInductionVarOwner(Value val) {
   auto ivArg = val.dyn_cast<BlockArgument>();
   if (!ivArg || !ivArg.getOwner())
     return AffineForOp();
@@ -2541,7 +2548,7 @@ AffineForOp mlir::getForInductionVarOwner(Value val) {
   return AffineForOp();
 }
 
-AffineParallelOp mlir::getAffineParallelInductionVarOwner(Value val) {
+AffineParallelOp mlir::affine::getAffineParallelInductionVarOwner(Value val) {
   auto ivArg = val.dyn_cast<BlockArgument>();
   if (!ivArg || !ivArg.getOwner())
     return nullptr;
@@ -2554,15 +2561,15 @@ AffineParallelOp mlir::getAffineParallelInductionVarOwner(Value val) {
 
 /// Extracts the induction variables from a list of AffineForOps and returns
 /// them.
-void mlir::extractForInductionVars(ArrayRef<AffineForOp> forInsts,
-                                   SmallVectorImpl<Value> *ivs) {
+void mlir::affine::extractForInductionVars(ArrayRef<AffineForOp> forInsts,
+                                           SmallVectorImpl<Value> *ivs) {
   ivs->reserve(forInsts.size());
   for (auto forInst : forInsts)
     ivs->push_back(forInst.getInductionVar());
 }
 
-void mlir::extractInductionVars(ArrayRef<mlir::Operation *> affineOps,
-                                SmallVectorImpl<mlir::Value> &ivs) {
+void mlir::affine::extractInductionVars(ArrayRef<mlir::Operation *> affineOps,
+                                        SmallVectorImpl<mlir::Value> &ivs) {
   ivs.reserve(affineOps.size());
   for (Operation *op : affineOps) {
     // Add constraints from forOp's bounds.
@@ -2640,7 +2647,7 @@ buildAffineLoopFromValues(OpBuilder &builder, Location loc, Value lb, Value ub,
                                      /*iterArgs=*/std::nullopt, bodyBuilderFn);
 }
 
-void mlir::buildAffineLoopNest(
+void mlir::affine::buildAffineLoopNest(
     OpBuilder &builder, Location loc, ArrayRef<int64_t> lbs,
     ArrayRef<int64_t> ubs, ArrayRef<int64_t> steps,
     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilderFn) {
@@ -2648,7 +2655,7 @@ void mlir::buildAffineLoopNest(
                           buildAffineLoopFromConstants);
 }
 
-void mlir::buildAffineLoopNest(
+void mlir::affine::buildAffineLoopNest(
     OpBuilder &builder, Location loc, ValueRange lbs, ValueRange ubs,
     ArrayRef<int64_t> steps,
     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilderFn) {
@@ -2656,11 +2663,12 @@ void mlir::buildAffineLoopNest(
                           buildAffineLoopFromValues);
 }
 
-AffineForOp mlir::replaceForOpWithNewYields(OpBuilder &b, AffineForOp loop,
-                                            ValueRange newIterOperands,
-                                            ValueRange newYieldedValues,
-                                            ValueRange newIterArgs,
-                                            bool replaceLoopResults) {
+AffineForOp mlir::affine::replaceForOpWithNewYields(OpBuilder &b,
+                                                    AffineForOp loop,
+                                                    ValueRange newIterOperands,
+                                                    ValueRange newYieldedValues,
+                                                    ValueRange newIterArgs,
+                                                    bool replaceLoopResults) {
   assert(newIterOperands.size() == newYieldedValues.size() &&
          "newIterOperands must be of the same size as newYieldedValues");
   // Create a new loop before the existing one, with the extra operands.

diff  --git a/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp b/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp
index 7e2dfaf4db21c..2800237fd05ac 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineValueMap.cpp
@@ -10,6 +10,7 @@
 #include "mlir/Dialect/Affine/IR/AffineOps.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 AffineValueMap::AffineValueMap(AffineMap map, ValueRange operands,
                                ValueRange results)

diff  --git a/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp b/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp
index 0036023a8a015..97dd70e4f1d2b 100644
--- a/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Affine/IR/ValueBoundsOpInterfaceImpl.cpp
@@ -12,6 +12,7 @@
 #include "mlir/Interfaces/ValueBoundsOpInterface.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace mlir {
 namespace {

diff  --git a/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp b/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
index 999adfad2ab5b..728da70b85731 100644
--- a/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
+++ b/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
@@ -19,6 +19,7 @@
 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 using namespace mlir::transform;
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
index 716f95314eac9..529c58cb43e43 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
@@ -35,13 +35,16 @@
 #include <optional>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINEDATACOPYGENERATION
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-data-copy-generate"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 
@@ -57,7 +60,8 @@ namespace {
 // TODO: We currently can't generate copies correctly when stores
 // are strided. Check for strided stores.
 struct AffineDataCopyGeneration
-    : public impl::AffineDataCopyGenerationBase<AffineDataCopyGeneration> {
+    : public affine::impl::AffineDataCopyGenerationBase<
+          AffineDataCopyGeneration> {
   AffineDataCopyGeneration() = default;
   explicit AffineDataCopyGeneration(unsigned slowMemorySpace,
                                     unsigned fastMemorySpace,
@@ -85,17 +89,15 @@ struct AffineDataCopyGeneration
 /// by the latter. Only load op's handled for now.
 /// TODO: extend this to store op's.
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createAffineDataCopyGenerationPass(unsigned slowMemorySpace,
-                                         unsigned fastMemorySpace,
-                                         unsigned tagMemorySpace,
-                                         int minDmaTransferSize,
-                                         uint64_t fastMemCapacityBytes) {
+mlir::affine::createAffineDataCopyGenerationPass(
+    unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace,
+    int minDmaTransferSize, uint64_t fastMemCapacityBytes) {
   return std::make_unique<AffineDataCopyGeneration>(
       slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize,
       fastMemCapacityBytes);
 }
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createAffineDataCopyGenerationPass() {
+mlir::affine::createAffineDataCopyGenerationPass() {
   return std::make_unique<AffineDataCopyGeneration>();
 }
 

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp
index dfb33d79f8bc6..c6bc3862256a7 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp
@@ -18,11 +18,14 @@
 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINEEXPANDINDEXOPS
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 /// Lowers `affine.delinearize_index` into a sequence of division and remainder
@@ -43,7 +46,7 @@ struct LowerDelinearizeIndexOps
 };
 
 class ExpandAffineIndexOpsPass
-    : public impl::AffineExpandIndexOpsBase<ExpandAffineIndexOpsPass> {
+    : public affine::impl::AffineExpandIndexOpsBase<ExpandAffineIndexOpsPass> {
 public:
   ExpandAffineIndexOpsPass() = default;
 
@@ -59,10 +62,11 @@ class ExpandAffineIndexOpsPass
 
 } // namespace
 
-void mlir::populateAffineExpandIndexOpsPatterns(RewritePatternSet &patterns) {
+void mlir::affine::populateAffineExpandIndexOpsPatterns(
+    RewritePatternSet &patterns) {
   patterns.insert<LowerDelinearizeIndexOps>(patterns.getContext());
 }
 
-std::unique_ptr<Pass> mlir::createAffineExpandIndexOpsPass() {
+std::unique_ptr<Pass> mlir::affine::createAffineExpandIndexOpsPass() {
   return std::make_unique<ExpandAffineIndexOpsPass>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
index 3d8ea2e5d607d..3f599b060f365 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopInvariantCodeMotion.cpp
@@ -34,13 +34,16 @@
 #include "llvm/Support/raw_ostream.h"
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINELOOPINVARIANTCODEMOTION
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "licm"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 
@@ -50,7 +53,8 @@ namespace {
 /// TODO: This code should be removed once the new LICM pass can handle its
 ///       uses.
 struct LoopInvariantCodeMotion
-    : public impl::AffineLoopInvariantCodeMotionBase<LoopInvariantCodeMotion> {
+    : public affine::impl::AffineLoopInvariantCodeMotionBase<
+          LoopInvariantCodeMotion> {
   void runOnOperation() override;
   void runOnAffineForOp(AffineForOp forOp);
 };
@@ -71,9 +75,9 @@ areAllOpsInTheBlockListInvariant(Region &blockList, Value indVar,
                                  SmallPtrSetImpl<Operation *> &opsToHoist);
 
 // Returns true if the individual op is loop invariant.
-bool isOpLoopInvariant(Operation &op, Value indVar, ValueRange iterArgs,
-                       SmallPtrSetImpl<Operation *> &opsWithUsers,
-                       SmallPtrSetImpl<Operation *> &opsToHoist) {
+static bool isOpLoopInvariant(Operation &op, Value indVar, ValueRange iterArgs,
+                              SmallPtrSetImpl<Operation *> &opsWithUsers,
+                              SmallPtrSetImpl<Operation *> &opsToHoist) {
   LLVM_DEBUG(llvm::dbgs() << "iterating on op: " << op;);
 
   if (auto ifOp = dyn_cast<AffineIfOp>(op)) {
@@ -167,10 +171,11 @@ bool isOpLoopInvariant(Operation &op, Value indVar, ValueRange iterArgs,
 }
 
 // Checks if all ops in a region (i.e. list of blocks) are loop invariant.
-bool areAllOpsInTheBlockListInvariant(
-    Region &blockList, Value indVar, ValueRange iterArgs,
-    SmallPtrSetImpl<Operation *> &opsWithUsers,
-    SmallPtrSetImpl<Operation *> &opsToHoist) {
+static bool
+areAllOpsInTheBlockListInvariant(Region &blockList, Value indVar,
+                                 ValueRange iterArgs,
+                                 SmallPtrSetImpl<Operation *> &opsWithUsers,
+                                 SmallPtrSetImpl<Operation *> &opsToHoist) {
 
   for (auto &b : blockList) {
     for (auto &op : b) {
@@ -183,10 +188,10 @@ bool areAllOpsInTheBlockListInvariant(
 }
 
 // Returns true if the affine.if op can be hoisted.
-bool checkInvarianceOfNestedIfOps(AffineIfOp ifOp, Value indVar,
-                                  ValueRange iterArgs,
-                                  SmallPtrSetImpl<Operation *> &opsWithUsers,
-                                  SmallPtrSetImpl<Operation *> &opsToHoist) {
+static bool
+checkInvarianceOfNestedIfOps(AffineIfOp ifOp, Value indVar, ValueRange iterArgs,
+                             SmallPtrSetImpl<Operation *> &opsWithUsers,
+                             SmallPtrSetImpl<Operation *> &opsToHoist) {
   if (!areAllOpsInTheBlockListInvariant(ifOp.getThenRegion(), indVar, iterArgs,
                                         opsWithUsers, opsToHoist))
     return false;
@@ -243,6 +248,6 @@ void LoopInvariantCodeMotion::runOnOperation() {
 }
 
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createAffineLoopInvariantCodeMotionPass() {
+mlir::affine::createAffineLoopInvariantCodeMotionPass() {
   return std::make_unique<LoopInvariantCodeMotion>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
index f760866b96f27..5cc38f7051726 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
@@ -17,11 +17,14 @@
 #include "mlir/Dialect/Func/IR/FuncOps.h"
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINELOOPNORMALIZE
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 
@@ -29,7 +32,7 @@ namespace {
 /// As currently implemented, this pass cannot fail, but it might skip over ops
 /// that are already in a normalized form.
 struct AffineLoopNormalizePass
-    : public impl::AffineLoopNormalizeBase<AffineLoopNormalizePass> {
+    : public affine::impl::AffineLoopNormalizeBase<AffineLoopNormalizePass> {
   explicit AffineLoopNormalizePass(bool promoteSingleIter) {
     this->promoteSingleIter = promoteSingleIter;
   }
@@ -47,6 +50,6 @@ struct AffineLoopNormalizePass
 } // namespace
 
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createAffineLoopNormalizePass(bool promoteSingleIter) {
+mlir::affine::createAffineLoopNormalizePass(bool promoteSingleIter) {
   return std::make_unique<AffineLoopNormalizePass>(promoteSingleIter);
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
index 0e0a1a21d7f07..64f2bc6e745c3 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
@@ -27,18 +27,21 @@
 #include <deque>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINEPARALLELIZE
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-parallel"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 /// Convert all parallel affine.for op into 1-D affine.parallel op.
 struct AffineParallelize
-    : public impl::AffineParallelizeBase<AffineParallelize> {
+    : public affine::impl::AffineParallelizeBase<AffineParallelize> {
   void runOnOperation() override;
 };
 
@@ -89,6 +92,6 @@ void AffineParallelize::runOnOperation() {
 }
 
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createAffineParallelizePass() {
+mlir::affine::createAffineParallelizePass() {
   return std::make_unique<AffineParallelize>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
index cef2948828e42..b4c6cfecbdded 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineScalarReplacement.cpp
@@ -23,24 +23,28 @@
 #include <algorithm>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINESCALARREPLACEMENT
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-scalrep"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 struct AffineScalarReplacement
-    : public impl::AffineScalarReplacementBase<AffineScalarReplacement> {
+    : public affine::impl::AffineScalarReplacementBase<
+          AffineScalarReplacement> {
   void runOnOperation() override;
 };
 
 } // namespace
 
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createAffineScalarReplacementPass() {
+mlir::affine::createAffineScalarReplacementPass() {
   return std::make_unique<AffineScalarReplacement>();
 }
 

diff  --git a/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp b/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp
index daa58b5d8986b..e87c5c030c5b9 100644
--- a/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp
@@ -18,6 +18,7 @@
 #include "llvm/Support/Debug.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 #define DEBUG_TYPE "decompose-affine-ops"
 #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ")
@@ -38,8 +39,8 @@ static int64_t numEnclosingInvariantLoops(OpOperand &operand) {
   return count;
 }
 
-void mlir::reorderOperandsByHoistability(RewriterBase &rewriter,
-                                         AffineApplyOp op) {
+void mlir::affine::reorderOperandsByHoistability(RewriterBase &rewriter,
+                                                 AffineApplyOp op) {
   SmallVector<int64_t> numInvariant = llvm::to_vector(
       llvm::map_range(op->getOpOperands(), [&](OpOperand &operand) {
         return numEnclosingInvariantLoops(operand);
@@ -92,8 +93,8 @@ static AffineApplyOp createSubApply(RewriterBase &rewriter,
                                         rhsOperands);
 }
 
-FailureOr<AffineApplyOp> mlir::decompose(RewriterBase &rewriter,
-                                         AffineApplyOp op) {
+FailureOr<AffineApplyOp> mlir::affine::decompose(RewriterBase &rewriter,
+                                                 AffineApplyOp op) {
   // 1. Preconditions: only handle dimensionless AffineApplyOp maps with a
   // top-level binary expression that we can reassociate (i.e. add or mul).
   AffineMap m = op.getAffineMap();

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
index 13092701f376e..1dc69ab493d47 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
@@ -19,18 +19,21 @@
 #include "llvm/Support/Debug.h"
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_LOOPCOALESCING
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define PASS_NAME "loop-coalescing"
 #define DEBUG_TYPE PASS_NAME
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 struct LoopCoalescingPass
-    : public impl::LoopCoalescingBase<LoopCoalescingPass> {
+    : public affine::impl::LoopCoalescingBase<LoopCoalescingPass> {
 
   void runOnOperation() override {
     func::FuncOp func = getOperation();
@@ -45,6 +48,7 @@ struct LoopCoalescingPass
 
 } // namespace
 
-std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopCoalescingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::affine::createLoopCoalescingPass() {
   return std::make_unique<LoopCoalescingPass>();
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
index 3017cb0ed0a4a..89f0a9e92279e 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
@@ -36,13 +36,16 @@
 #include <sstream>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINELOOPFUSION
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-loop-fusion"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 /// Loop fusion pass. This pass currently supports a greedy fusion policy,
@@ -54,7 +57,7 @@ namespace {
 // TODO: Extend this pass to check for fusion preventing dependences,
 // and add support for more general loop fusion algorithms.
 
-struct LoopFusion : public impl::AffineLoopFusionBase<LoopFusion> {
+struct LoopFusion : public affine::impl::AffineLoopFusionBase<LoopFusion> {
   LoopFusion() = default;
   LoopFusion(unsigned fastMemorySpace, uint64_t localBufSizeThresholdBytes,
              bool maximalFusion, enum FusionMode affineFusionMode) {
@@ -1039,7 +1042,7 @@ struct GreedyFusion {
         depthSliceUnions.resize(dstLoopDepthTest);
         FusionStrategy strategy(FusionStrategy::ProducerConsumer);
         for (unsigned i = 1; i <= dstLoopDepthTest; ++i) {
-          FusionResult result = mlir::canFuseLoops(
+          FusionResult result = affine::canFuseLoops(
               srcAffineForOp, dstAffineForOp,
               /*dstLoopDepth=*/i, &depthSliceUnions[i - 1], strategy);
 
@@ -1259,7 +1262,7 @@ struct GreedyFusion {
       unsigned maxLegalFusionDepth = 0;
       FusionStrategy strategy(memref);
       for (unsigned i = 1; i <= dstLoopDepthTest; ++i) {
-        FusionResult result = mlir::canFuseLoops(
+        FusionResult result = affine::canFuseLoops(
             sibAffineForOp, dstAffineForOp,
             /*dstLoopDepth=*/i, &depthSliceUnions[i - 1], strategy);
 
@@ -1291,9 +1294,9 @@ struct GreedyFusion {
       // further inside `fuseLoops`.
       bool isInnermostInsertion = (bestDstLoopDepth == dstLoopDepthTest);
       // Fuse computation slice of 'sibLoopNest' into 'dstLoopNest'.
-      mlir::fuseLoops(sibAffineForOp, dstAffineForOp,
-                      depthSliceUnions[bestDstLoopDepth - 1],
-                      isInnermostInsertion);
+      affine::fuseLoops(sibAffineForOp, dstAffineForOp,
+                        depthSliceUnions[bestDstLoopDepth - 1],
+                        isInnermostInsertion);
 
       auto dstForInst = cast<AffineForOp>(dstNode->op);
       // Update operation position of fused loop nest (if needed).
@@ -1501,10 +1504,9 @@ void LoopFusion::runOnOperation() {
       runOnBlock(&block);
 }
 
-std::unique_ptr<Pass>
-mlir::createLoopFusionPass(unsigned fastMemorySpace,
-                           uint64_t localBufSizeThreshold, bool maximalFusion,
-                           enum FusionMode affineFusionMode) {
+std::unique_ptr<Pass> mlir::affine::createLoopFusionPass(
+    unsigned fastMemorySpace, uint64_t localBufSizeThreshold,
+    bool maximalFusion, enum FusionMode affineFusionMode) {
   return std::make_unique<LoopFusion>(fastMemorySpace, localBufSizeThreshold,
                                       maximalFusion, affineFusionMode);
 }

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
index 5d3a948226a18..8764228d974ab 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
@@ -28,18 +28,21 @@
 #include <optional>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINELOOPTILING
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 using namespace mlir;
+using namespace mlir::affine;
 
 #define DEBUG_TYPE "affine-loop-tile"
 
 namespace {
 
 /// A pass to perform loop tiling on all suitable loop nests of a Function.
-struct LoopTiling : public impl::AffineLoopTilingBase<LoopTiling> {
+struct LoopTiling : public affine::impl::AffineLoopTilingBase<LoopTiling> {
   LoopTiling() = default;
   explicit LoopTiling(uint64_t cacheSizeBytes, bool avoidMaxMinBounds = true)
       : avoidMaxMinBounds(avoidMaxMinBounds) {
@@ -62,10 +65,11 @@ struct LoopTiling : public impl::AffineLoopTilingBase<LoopTiling> {
 /// Creates a pass to perform loop tiling on all suitable loop nests of a
 /// Function.
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
+mlir::affine::createLoopTilingPass(uint64_t cacheSizeBytes) {
   return std::make_unique<LoopTiling>(cacheSizeBytes);
 }
-std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopTilingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+mlir::affine::createLoopTilingPass() {
   return std::make_unique<LoopTiling>();
 }
 
@@ -97,7 +101,7 @@ static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
 /// hyper-rectangles, which are scheduled in the lexicographically increasing
 /// order on the vector of loop indices. This function will return failure when
 /// any dependence component is negative along any of `origLoops`.
-static bool checkTilingLegality(MutableArrayRef<mlir::AffineForOp> origLoops) {
+static bool checkTilingLegality(MutableArrayRef<AffineForOp> origLoops) {
   assert(!origLoops.empty() && "no original loops provided");
 
   // We first find out all dependences we intend to check.

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
index 825a36840bbf1..57df7ada91654 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
@@ -25,13 +25,16 @@
 #include <optional>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINELOOPUNROLL
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-loop-unroll"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 
@@ -42,7 +45,7 @@ namespace {
 /// full unroll threshold was specified, in which case, fully unrolls all loops
 /// with trip count less than the specified threshold. The latter is for testing
 /// purposes, especially for testing outer loop unrolling.
-struct LoopUnroll : public impl::AffineLoopUnrollBase<LoopUnroll> {
+struct LoopUnroll : public affine::impl::AffineLoopUnrollBase<LoopUnroll> {
   // Callback to obtain unroll factors; if this has a callable target, takes
   // precedence over command-line argument or passed argument.
   const std::function<unsigned(AffineForOp)> getUnrollFactor;
@@ -142,7 +145,7 @@ LogicalResult LoopUnroll::runOnAffineForOp(AffineForOp forOp) {
                             cleanUpUnroll);
 }
 
-std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopUnrollPass(
+std::unique_ptr<OperationPass<func::FuncOp>> mlir::affine::createLoopUnrollPass(
     int unrollFactor, bool unrollUpToFactor, bool unrollFull,
     const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
   return std::make_unique<LoopUnroll>(

diff  --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
index a0fad4dc5b4f5..a79160df7575a 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
@@ -49,19 +49,22 @@
 #include <optional>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINELOOPUNROLLANDJAM
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-loop-unroll-jam"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 /// Loop unroll jam pass. Currently, this just unroll jams the first
 /// outer loop in a Function.
 struct LoopUnrollAndJam
-    : public impl::AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
+    : public affine::impl::AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
   explicit LoopUnrollAndJam(
       std::optional<unsigned> unrollJamFactor = std::nullopt) {
     if (unrollJamFactor)
@@ -73,7 +76,7 @@ struct LoopUnrollAndJam
 } // namespace
 
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
+mlir::affine::createLoopUnrollAndJamPass(int unrollJamFactor) {
   return std::make_unique<LoopUnrollAndJam>(
       unrollJamFactor == -1 ? std::nullopt
                             : std::optional<unsigned>(unrollJamFactor));

diff  --git a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
index 9b2bd6b7c2a1e..7d815f742541c 100644
--- a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
@@ -27,17 +27,21 @@
 #include "llvm/Support/Debug.h"
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINEPIPELINEDATATRANSFER
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "affine-pipeline-data-transfer"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 struct PipelineDataTransfer
-    : public impl::AffinePipelineDataTransferBase<PipelineDataTransfer> {
+    : public affine::impl::AffinePipelineDataTransferBase<
+          PipelineDataTransfer> {
   void runOnOperation() override;
   void runOnAffineForOp(AffineForOp forOp);
 
@@ -49,7 +53,7 @@ struct PipelineDataTransfer
 /// Creates a pass to pipeline explicit movement of data across levels of the
 /// memory hierarchy.
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createPipelineDataTransferPass() {
+mlir::affine::createPipelineDataTransferPass() {
   return std::make_unique<PipelineDataTransfer>();
 }
 
@@ -328,7 +332,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
     instShiftMap[dmaStartOp] = 0;
     // Set shifts for DMA start op's affine operand computation slices to 0.
     SmallVector<AffineApplyOp, 4> sliceOps;
-    mlir::createAffineComputationSlice(dmaStartOp, &sliceOps);
+    affine::createAffineComputationSlice(dmaStartOp, &sliceOps);
     if (!sliceOps.empty()) {
       for (auto sliceOp : sliceOps) {
         instShiftMap[sliceOp.getOperation()] = 0;

diff  --git a/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp b/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp
index 3fc0fedd89bbf..0efe31c6cb289 100644
--- a/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/ReifyValueBounds.cpp
@@ -14,6 +14,7 @@
 #include "mlir/Interfaces/ValueBoundsOpInterface.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 static FailureOr<OpFoldResult>
 reifyValueBound(OpBuilder &b, Location loc, presburger::BoundType type,
@@ -54,7 +55,7 @@ reifyValueBound(OpBuilder &b, Location loc, presburger::BoundType type,
   }
 
   // Simplify and return bound.
-  mlir::canonicalizeMapAndOperands(&boundMap, &operands);
+  affine::canonicalizeMapAndOperands(&boundMap, &operands);
   // Check for special cases where no affine.apply op is needed.
   if (boundMap.isSingleConstant()) {
     // Bound is a constant: return an IntegerAttr.
@@ -69,10 +70,10 @@ reifyValueBound(OpBuilder &b, Location loc, presburger::BoundType type,
         operands[expr.getPosition() + boundMap.getNumDims()]);
   // General case: build affine.apply op.
   return static_cast<OpFoldResult>(
-      b.create<AffineApplyOp>(loc, boundMap, operands).getResult());
+      b.create<affine::AffineApplyOp>(loc, boundMap, operands).getResult());
 }
 
-FailureOr<OpFoldResult> mlir::reifyShapedValueDimBound(
+FailureOr<OpFoldResult> mlir::affine::reifyShapedValueDimBound(
     OpBuilder &b, Location loc, presburger::BoundType type, Value value,
     int64_t dim, ValueBoundsConstraintSet::StopConditionFn stopCondition,
     bool closedUB) {
@@ -89,7 +90,7 @@ FailureOr<OpFoldResult> mlir::reifyShapedValueDimBound(
                          closedUB);
 }
 
-FailureOr<OpFoldResult> mlir::reifyIndexValueBound(
+FailureOr<OpFoldResult> mlir::affine::reifyIndexValueBound(
     OpBuilder &b, Location loc, presburger::BoundType type, Value value,
     ValueBoundsConstraintSet::StopConditionFn stopCondition, bool closedUB) {
   auto reifyToOperands = [&](Value v, std::optional<int64_t> d) {

diff  --git a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
index 8039484ce8d62..8987a82b7206c 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineStructures.cpp
@@ -20,13 +20,16 @@
 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_SIMPLIFYAFFINESTRUCTURES
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 #define DEBUG_TYPE "simplify-affine-structure"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 
@@ -35,7 +38,8 @@ namespace {
 /// all memrefs with non-trivial layout maps are converted to ones with trivial
 /// identity layout ones.
 struct SimplifyAffineStructures
-    : public impl::SimplifyAffineStructuresBase<SimplifyAffineStructures> {
+    : public affine::impl::SimplifyAffineStructuresBase<
+          SimplifyAffineStructures> {
   void runOnOperation() override;
 
   /// Utility to simplify an affine attribute and update its entry in the parent
@@ -78,7 +82,7 @@ struct SimplifyAffineStructures
 } // namespace
 
 std::unique_ptr<OperationPass<func::FuncOp>>
-mlir::createSimplifyAffineStructuresPass() {
+mlir::affine::createSimplifyAffineStructuresPass() {
   return std::make_unique<SimplifyAffineStructures>();
 }
 

diff  --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
index 30affbb8442d3..1d347329c0005 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
@@ -31,11 +31,14 @@
 #include <optional>
 
 namespace mlir {
+namespace affine {
 #define GEN_PASS_DEF_AFFINEVECTORIZE
 #include "mlir/Dialect/Affine/Passes.h.inc"
+} // namespace affine
 } // namespace mlir
 
 using namespace mlir;
+using namespace affine;
 using namespace vector;
 
 ///
@@ -585,7 +588,7 @@ isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
 static std::optional<NestedPattern>
 makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
             ArrayRef<int64_t> fastestVaryingPattern) {
-  using matcher::For;
+  using affine::matcher::For;
   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
@@ -606,7 +609,7 @@ makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
 }
 
 static NestedPattern &vectorTransferPattern() {
-  static auto pattern = matcher::Op([](Operation &op) {
+  static auto pattern = affine::matcher::Op([](Operation &op) {
     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
   });
   return pattern;
@@ -616,7 +619,7 @@ namespace {
 
 /// Base state for the vectorize pass.
 /// Command line arguments are preempted by non-empty pass arguments.
-struct Vectorize : public impl::AffineVectorizeBase<Vectorize> {
+struct Vectorize : public affine::impl::AffineVectorizeBase<Vectorize> {
   using Base::Base;
 
   void runOnOperation() override;
@@ -1796,7 +1799,6 @@ verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
   return success();
 }
 
-namespace mlir {
 
 /// External utility to vectorize affine loops in 'loops' using the n-D
 /// vectorization factors in 'vectorSizes'. By default, each vectorization
@@ -1806,10 +1808,10 @@ namespace mlir {
 /// If `reductionLoops` is not empty, the given reduction loops may be
 /// vectorized along the reduction dimension.
 /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
-void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
-                          ArrayRef<int64_t> vectorSizes,
-                          ArrayRef<int64_t> fastestVaryingPattern,
-                          const ReductionLoopMap &reductionLoops) {
+void mlir::affine::vectorizeAffineLoops(
+    Operation *parentOp, DenseSet<Operation *> &loops,
+    ArrayRef<int64_t> vectorSizes, ArrayRef<int64_t> fastestVaryingPattern,
+    const ReductionLoopMap &reductionLoops) {
   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
   NestedPatternContext mlContext;
   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
@@ -1851,14 +1853,12 @@ void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
 /// loops = {{%i2}}, to vectorize only the first innermost loop;
 /// loops = {{%i3}}, to vectorize only the second innermost loop;
 /// loops = {{%i1}}, to vectorize only the middle loop.
-LogicalResult
-vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
-                        const VectorizationStrategy &strategy) {
+LogicalResult mlir::affine::vectorizeAffineLoopNest(
+    std::vector<SmallVector<AffineForOp, 2>> &loops,
+    const VectorizationStrategy &strategy) {
   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
   NestedPatternContext mlContext;
   if (failed(verifyLoopNesting(loops)))
     return failure();
   return vectorizeLoopNest(loops, strategy);
 }
-
-} // namespace mlir

diff  --git a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
index 624b79f65d429..4a4b6eee24a58 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
@@ -26,6 +26,7 @@
 #define DEBUG_TYPE "loop-fusion-utils"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 // Gathers all load and store memref accesses in 'opA' into 'values', where
 // 'values[memref] == true' for each store operation.
@@ -245,10 +246,11 @@ static unsigned getMaxLoopDepth(ArrayRef<Operation *> srcOps,
 // TODO: This pass performs some computation that is the same for all the depths
 // (e.g., getMaxLoopDepth). Implement a version of this utility that processes
 // all the depths at once or only the legal maximal depth for maximal fusion.
-FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
-                                unsigned dstLoopDepth,
-                                ComputationSliceState *srcSlice,
-                                FusionStrategy fusionStrategy) {
+FusionResult mlir::affine::canFuseLoops(AffineForOp srcForOp,
+                                        AffineForOp dstForOp,
+                                        unsigned dstLoopDepth,
+                                        ComputationSliceState *srcSlice,
+                                        FusionStrategy fusionStrategy) {
   // Return 'failure' if 'dstLoopDepth == 0'.
   if (dstLoopDepth == 0) {
     LLVM_DEBUG(llvm::dbgs() << "Cannot fuse loop nests at depth 0\n");
@@ -303,7 +305,7 @@ FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
 
   // Calculate the number of common loops surrounding 'srcForOp' and 'dstForOp'.
   unsigned numCommonLoops =
-      mlir::getNumCommonSurroundingLoops(*srcForOp, *dstForOp);
+      affine::getNumCommonSurroundingLoops(*srcForOp, *dstForOp);
 
   // Filter out ops in 'opsA' to compute the slice union based on the
   // assumptions made by the fusion strategy.
@@ -335,9 +337,9 @@ FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
 
   // Compute union of computation slices computed between all pairs of ops
   // from 'forOpA' and 'forOpB'.
-  SliceComputationResult sliceComputationResult =
-      mlir::computeSliceUnion(strategyOpsA, opsB, dstLoopDepth, numCommonLoops,
-                              isSrcForOpBeforeDstForOp, srcSlice);
+  SliceComputationResult sliceComputationResult = affine::computeSliceUnion(
+      strategyOpsA, opsB, dstLoopDepth, numCommonLoops,
+      isSrcForOpBeforeDstForOp, srcSlice);
   if (sliceComputationResult.value == SliceComputationResult::GenericFailure) {
     LLVM_DEBUG(llvm::dbgs() << "computeSliceUnion failed\n");
     return FusionResult::FailPrecondition;
@@ -353,8 +355,8 @@ FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
 
 /// Patch the loop body of a forOp that is a single iteration reduction loop
 /// into its containing block.
-LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp,
-                                             bool siblingFusionUser) {
+static LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp,
+                                                    bool siblingFusionUser) {
   // Check if the reduction loop is a single iteration loop.
   std::optional<uint64_t> tripCount = getConstantTripCount(forOp);
   if (!tripCount || *tripCount != 1)
@@ -416,9 +418,9 @@ LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp,
 
 /// Fuses 'srcForOp' into 'dstForOp' with destination loop block insertion point
 /// and source slice loop bounds specified in 'srcSlice'.
-void mlir::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
-                     const ComputationSliceState &srcSlice,
-                     bool isInnermostSiblingInsertion) {
+void mlir::affine::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
+                             const ComputationSliceState &srcSlice,
+                             bool isInnermostSiblingInsertion) {
   // Clone 'srcForOp' into 'dstForOp' at 'srcSlice->insertPoint'.
   OpBuilder b(srcSlice.insertPoint->getBlock(), srcSlice.insertPoint);
   IRMapping mapper;
@@ -465,7 +467,8 @@ void mlir::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
 /// Collect loop nest statistics (eg. loop trip count and operation count)
 /// in 'stats' for loop nest rooted at 'forOp'. Returns true on success,
 /// returns false otherwise.
-bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
+bool mlir::affine::getLoopNestStats(AffineForOp forOpRoot,
+                                    LoopNestStats *stats) {
   auto walkResult = forOpRoot.walk([&](AffineForOp forOp) {
     auto *childForOp = forOp.getOperation();
     auto *parentForOp = forOp->getParentOp();
@@ -553,7 +556,7 @@ static int64_t getComputeCostHelper(
 /// Currently, the total cost is computed by counting the total operation
 /// instance count (i.e. total number of operations in the loop body * loop
 /// trip count) for the entire loop nest.
-int64_t mlir::getComputeCost(AffineForOp forOp, LoopNestStats &stats) {
+int64_t mlir::affine::getComputeCost(AffineForOp forOp, LoopNestStats &stats) {
   return getComputeCostHelper(forOp, stats,
                               /*tripCountOverrideMap=*/nullptr,
                               /*computeCostMap=*/nullptr);
@@ -564,10 +567,12 @@ int64_t mlir::getComputeCost(AffineForOp forOp, LoopNestStats &stats) {
 /// the total cost is computed by counting the total operation instance count
 /// (i.e. total number of operations in the loop body * loop trip count) for
 /// the entire loop nest.
-bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
-                                AffineForOp dstForOp, LoopNestStats &dstStats,
-                                const ComputationSliceState &slice,
-                                int64_t *computeCost) {
+bool mlir::affine::getFusionComputeCost(AffineForOp srcForOp,
+                                        LoopNestStats &srcStats,
+                                        AffineForOp dstForOp,
+                                        LoopNestStats &dstStats,
+                                        const ComputationSliceState &slice,
+                                        int64_t *computeCost) {
   llvm::SmallDenseMap<Operation *, uint64_t, 8> sliceTripCountMap;
   DenseMap<Operation *, int64_t> computeCostMap;
 
@@ -634,7 +639,7 @@ bool mlir::getFusionComputeCost(AffineForOp srcForOp, LoopNestStats &srcStats,
 /// Returns in 'producerConsumerMemrefs' the memrefs involved in a
 /// producer-consumer dependence between write ops in 'srcOps' and read ops in
 /// 'dstOps'.
-void mlir::gatherProducerConsumerMemrefs(
+void mlir::affine::gatherProducerConsumerMemrefs(
     ArrayRef<Operation *> srcOps, ArrayRef<Operation *> dstOps,
     DenseSet<Value> &producerConsumerMemrefs) {
   // Gather memrefs from stores in 'srcOps'.

diff  --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index 1513102955b5e..94203ec942749 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -34,6 +34,7 @@
 #define DEBUG_TYPE "loop-utils"
 
 using namespace mlir;
+using namespace affine;
 using namespace presburger;
 using llvm::SmallMapVector;
 
@@ -128,7 +129,7 @@ static void replaceIterArgsAndYieldResults(AffineForOp forOp) {
 /// Promotes the loop body of a forOp to its containing block if the forOp
 /// was known to have a single iteration.
 // TODO: extend this for arbitrary affine bounds.
-LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
+LogicalResult mlir::affine::promoteIfSingleIteration(AffineForOp forOp) {
   std::optional<uint64_t> tripCount = getConstantTripCount(forOp);
   if (!tripCount || *tripCount != 1)
     return failure();
@@ -232,9 +233,9 @@ static AffineForOp generateShiftedLoop(
 // asserts preservation of SSA dominance. A check for that as well as that for
 // memory-based dependence preservation check rests with the users of this
 // method.
-LogicalResult mlir::affineForOpBodySkew(AffineForOp forOp,
-                                        ArrayRef<uint64_t> shifts,
-                                        bool unrollPrologueEpilogue) {
+LogicalResult mlir::affine::affineForOpBodySkew(AffineForOp forOp,
+                                                ArrayRef<uint64_t> shifts,
+                                                bool unrollPrologueEpilogue) {
   assert(forOp.getBody()->getOperations().size() == shifts.size() &&
          "too few/many shifts");
   if (forOp.getBody()->begin() == std::prev(forOp.getBody()->end()))
@@ -363,7 +364,8 @@ LogicalResult mlir::affineForOpBodySkew(AffineForOp forOp,
 }
 
 /// Checks whether a loop nest is hyper-rectangular or not.
-LogicalResult checkIfHyperRectangular(MutableArrayRef<AffineForOp> input) {
+static LogicalResult
+checkIfHyperRectangular(MutableArrayRef<AffineForOp> input) {
   FlatAffineValueConstraints cst;
   SmallVector<Operation *, 8> ops(input.begin(), input.end());
   // 0-d or 1-d is trivially hyper-rectangular.
@@ -384,8 +386,8 @@ LogicalResult checkIfHyperRectangular(MutableArrayRef<AffineForOp> input) {
 /// Check if the input nest is supported for tiling and whether tiling would be
 /// legal or not.
 template <typename t>
-LogicalResult performPreTilingChecks(MutableArrayRef<AffineForOp> input,
-                                     ArrayRef<t> tileSizes) {
+static LogicalResult performPreTilingChecks(MutableArrayRef<AffineForOp> input,
+                                            ArrayRef<t> tileSizes) {
   assert(input.size() == tileSizes.size() && "Too few/many tile sizes");
 
   if (llvm::any_of(input,
@@ -418,15 +420,15 @@ static void moveLoopBodyImpl(AffineForOp src, AffineForOp dest,
 
 /// Move the loop body of AffineForOp 'src' from 'src' to the start of dest
 /// body.
-void moveLoopBody(AffineForOp src, AffineForOp dest) {
+static void moveLoopBody(AffineForOp src, AffineForOp dest) {
   moveLoopBodyImpl(src, dest, dest.getBody()->begin());
 }
 
 /// Constructs tiled loop nest, without setting the loop bounds and move the
 /// body of the original loop nest to the tiled loop nest.
-void constructTiledLoopNest(MutableArrayRef<AffineForOp> origLoops,
-                            AffineForOp rootAffineForOp, unsigned width,
-                            MutableArrayRef<AffineForOp> tiledLoops) {
+static void constructTiledLoopNest(MutableArrayRef<AffineForOp> origLoops,
+                                   AffineForOp rootAffineForOp, unsigned width,
+                                   MutableArrayRef<AffineForOp> tiledLoops) {
   Location loc = rootAffineForOp.getLoc();
 
   // The outermost among the loops as we add more..
@@ -773,9 +775,9 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
 }
 
 LogicalResult
-mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
-                          ArrayRef<unsigned> tileSizes,
-                          SmallVectorImpl<AffineForOp> *tiledNest) {
+mlir::affine::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
+                                  ArrayRef<unsigned> tileSizes,
+                                  SmallVectorImpl<AffineForOp> *tiledNest) {
   if (input.empty())
     return success();
 
@@ -816,10 +818,9 @@ mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
 /// loops and intra-tile loops, using SSA values as tiling parameters. A band
 /// is a contiguous set of loops.
 //  TODO: handle non hyper-rectangular spaces.
-LogicalResult
-mlir::tilePerfectlyNestedParametric(MutableArrayRef<AffineForOp> input,
-                                    ArrayRef<Value> tileSizes,
-                                    SmallVectorImpl<AffineForOp> *tiledNest) {
+LogicalResult mlir::affine::tilePerfectlyNestedParametric(
+    MutableArrayRef<AffineForOp> input, ArrayRef<Value> tileSizes,
+    SmallVectorImpl<AffineForOp> *tiledNest) {
   if (input.empty())
     return success();
 
@@ -859,8 +860,8 @@ mlir::tilePerfectlyNestedParametric(MutableArrayRef<AffineForOp> input,
 /// (the first op being another AffineFor, and the second op - a terminator).
 /// A loop is perfectly nested iff: the first op in the loop's body is another
 /// AffineForOp, and the second op is a terminator).
-void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
-                                   AffineForOp root) {
+void mlir::affine::getPerfectlyNestedLoops(
+    SmallVectorImpl<AffineForOp> &nestedLoops, AffineForOp root) {
   for (unsigned i = 0; i < std::numeric_limits<unsigned>::max(); ++i) {
     nestedLoops.push_back(root);
     Block &body = root.getRegion().front();
@@ -876,8 +877,8 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
 /// Identify valid and profitable bands of loops to tile. This is currently just
 /// a temporary placeholder to test the mechanics of tiled code generation.
 /// Returns all maximal outermost perfect loop nests to tile.
-void mlir::getTileableBands(func::FuncOp f,
-                            std::vector<SmallVector<AffineForOp, 6>> *bands) {
+void mlir::affine::getTileableBands(
+    func::FuncOp f, std::vector<SmallVector<AffineForOp, 6>> *bands) {
   // Get maximal perfect nest of 'affine.for' insts starting from root
   // (inclusive).
   for (AffineForOp forOp : f.getOps<AffineForOp>()) {
@@ -888,7 +889,7 @@ void mlir::getTileableBands(func::FuncOp f,
 }
 
 /// Unrolls this loop completely.
-LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
+LogicalResult mlir::affine::loopUnrollFull(AffineForOp forOp) {
   std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
   if (mayBeConstantTripCount.has_value()) {
     uint64_t tripCount = *mayBeConstantTripCount;
@@ -903,8 +904,8 @@ LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
 
 /// Unrolls this loop by the specified factor or by the trip count (if constant)
 /// whichever is lower.
-LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp,
-                                         uint64_t unrollFactor) {
+LogicalResult mlir::affine::loopUnrollUpToFactor(AffineForOp forOp,
+                                                 uint64_t unrollFactor) {
   std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
   if (mayBeConstantTripCount.has_value() &&
       *mayBeConstantTripCount < unrollFactor)
@@ -1011,7 +1012,7 @@ static LogicalResult generateCleanupLoopForUnroll(AffineForOp forOp,
 
 /// Unrolls this loop by the specified factor. Returns success if the loop
 /// is successfully unrolled.
-LogicalResult mlir::loopUnrollByFactor(
+LogicalResult mlir::affine::loopUnrollByFactor(
     AffineForOp forOp, uint64_t unrollFactor,
     function_ref<void(unsigned, Operation *, OpBuilder)> annotateFn,
     bool cleanUpUnroll) {
@@ -1078,8 +1079,8 @@ LogicalResult mlir::loopUnrollByFactor(
   return success();
 }
 
-LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp,
-                                            uint64_t unrollJamFactor) {
+LogicalResult mlir::affine::loopUnrollJamUpToFactor(AffineForOp forOp,
+                                                    uint64_t unrollJamFactor) {
   std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
   if (mayBeConstantTripCount.has_value() &&
       *mayBeConstantTripCount < unrollJamFactor)
@@ -1129,8 +1130,8 @@ struct JamBlockGatherer {
 };
 
 /// Unrolls and jams this loop by the specified factor.
-LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
-                                          uint64_t unrollJamFactor) {
+LogicalResult mlir::affine::loopUnrollJamByFactor(AffineForOp forOp,
+                                                  uint64_t unrollJamFactor) {
   assert(unrollJamFactor > 0 && "unroll jam factor should be positive");
 
   std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
@@ -1212,7 +1213,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
     }
     // Create a new loop with additional iterOperands, iter_args and yield
     // operands. This new loop will take the loop body of the original loop.
-    AffineForOp newForOp = mlir::replaceForOpWithNewYields(
+    AffineForOp newForOp = affine::replaceForOpWithNewYields(
         builder, oldForOp, dupIterOperands, dupYieldOperands, dupIterArgs);
     newLoopsWithIterArgs.push_back(newForOp);
     // `forOp` has been replaced with a new loop.
@@ -1323,7 +1324,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
 
 /// Performs loop interchange on 'forOpA' and 'forOpB', where 'forOpB' is
 /// nested within 'forOpA' as the only non-terminator operation in its block.
-void mlir::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) {
+void mlir::affine::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) {
   assert(&*forOpA.getBody()->begin() == forOpB.getOperation());
   auto &forOpABody = forOpA.getBody()->getOperations();
   auto &forOpBBody = forOpB.getBody()->getOperations();
@@ -1380,8 +1381,8 @@ static bool checkLoopInterchangeDependences(
 
 /// Checks if the loop interchange permutation 'loopPermMap' of the perfectly
 /// nested sequence of loops in 'loops' would violate dependences.
-bool mlir::isValidLoopInterchangePermutation(ArrayRef<AffineForOp> loops,
-                                             ArrayRef<unsigned> loopPermMap) {
+bool mlir::affine::isValidLoopInterchangePermutation(
+    ArrayRef<AffineForOp> loops, ArrayRef<unsigned> loopPermMap) {
   // Gather dependence components for dependences between all ops in loop nest
   // rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth].
   assert(loopPermMap.size() == loops.size());
@@ -1394,7 +1395,7 @@ bool mlir::isValidLoopInterchangePermutation(ArrayRef<AffineForOp> loops,
 /// Returns true if `loops` is a perfectly nested loop nest, where loops appear
 /// in it from outermost to innermost.
 bool LLVM_ATTRIBUTE_UNUSED
-mlir::isPerfectlyNested(ArrayRef<AffineForOp> loops) {
+mlir::affine::isPerfectlyNested(ArrayRef<AffineForOp> loops) {
   assert(!loops.empty() && "no loops provided");
 
   // We already know that the block can't be empty.
@@ -1416,8 +1417,8 @@ mlir::isPerfectlyNested(ArrayRef<AffineForOp> loops) {
 
 // input[i] should move from position i -> permMap[i]. Returns the position in
 // `input` that becomes the new outermost loop.
-unsigned mlir::permuteLoops(MutableArrayRef<AffineForOp> input,
-                            ArrayRef<unsigned> permMap) {
+unsigned mlir::affine::permuteLoops(MutableArrayRef<AffineForOp> input,
+                                    ArrayRef<unsigned> permMap) {
   assert(input.size() == permMap.size() && "invalid permutation map size");
   // Check whether the permutation spec is valid. This is a small vector - we'll
   // just sort and check if it's iota.
@@ -1486,7 +1487,7 @@ unsigned mlir::permuteLoops(MutableArrayRef<AffineForOp> input,
 // Sinks all sequential loops to the innermost levels (while preserving
 // relative order among them) and moves all parallel loops to the
 // outermost (while again preserving relative order among them).
-AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) {
+AffineForOp mlir::affine::sinkSequentialLoops(AffineForOp forOp) {
   SmallVector<AffineForOp, 4> loops;
   getPerfectlyNestedLoops(loops, forOp);
   if (loops.size() < 2)
@@ -1621,8 +1622,8 @@ static AffineForOp stripmineSink(AffineForOp forOp, SizeType factor,
 }
 
 SmallVector<SmallVector<AffineForOp, 8>, 8>
-mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
-           ArrayRef<AffineForOp> targets) {
+mlir::affine::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
+                   ArrayRef<AffineForOp> targets) {
   SmallVector<SmallVector<AffineForOp, 8>, 8> res;
   SmallVector<AffineForOp, 8> currentTargets(targets.begin(), targets.end());
   for (auto it : llvm::zip(forOps, sizes)) {
@@ -1633,9 +1634,9 @@ mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
   return res;
 }
 
-SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps,
-                                       ArrayRef<uint64_t> sizes,
-                                       AffineForOp target) {
+SmallVector<AffineForOp, 8> mlir::affine::tile(ArrayRef<AffineForOp> forOps,
+                                               ArrayRef<uint64_t> sizes,
+                                               AffineForOp target) {
   SmallVector<AffineForOp, 8> res;
   for (auto loops : tile(forOps, sizes, ArrayRef<AffineForOp>(target))) {
     assert(loops.size() == 1);
@@ -1644,7 +1645,7 @@ SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps,
   return res;
 }
 
-LogicalResult mlir::coalesceLoops(MutableArrayRef<AffineForOp> loops) {
+LogicalResult mlir::affine::coalesceLoops(MutableArrayRef<AffineForOp> loops) {
   if (loops.size() < 2)
     return success();
 
@@ -1758,8 +1759,9 @@ LogicalResult mlir::coalesceLoops(MutableArrayRef<AffineForOp> loops) {
   return success();
 }
 
-void mlir::mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef<Value> processorId,
-                                 ArrayRef<Value> numProcessors) {
+void mlir::affine::mapLoopToProcessorIds(scf::ForOp forOp,
+                                         ArrayRef<Value> processorId,
+                                         ArrayRef<Value> numProcessors) {
   assert(processorId.size() == numProcessors.size());
   if (processorId.empty())
     return;
@@ -2300,11 +2302,11 @@ static bool getFullMemRefAsRegion(Operation *op, unsigned numParamLoopIVs,
   return true;
 }
 
-LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin,
-                                           Block::iterator end,
-                                           const AffineCopyOptions &copyOptions,
-                                           std::optional<Value> filterMemRef,
-                                           DenseSet<Operation *> &copyNests) {
+LogicalResult
+mlir::affine::affineDataCopyGenerate(Block::iterator begin, Block::iterator end,
+                                     const AffineCopyOptions &copyOptions,
+                                     std::optional<Value> filterMemRef,
+                                     DenseSet<Operation *> &copyNests) {
   if (begin == end)
     return success();
 
@@ -2490,16 +2492,15 @@ LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin,
 
 // A convenience version of affineDataCopyGenerate for all ops in the body of
 // an AffineForOp.
-LogicalResult mlir::affineDataCopyGenerate(AffineForOp forOp,
-                                           const AffineCopyOptions &copyOptions,
-                                           std::optional<Value> filterMemRef,
-                                           DenseSet<Operation *> &copyNests) {
+LogicalResult mlir::affine::affineDataCopyGenerate(
+    AffineForOp forOp, const AffineCopyOptions &copyOptions,
+    std::optional<Value> filterMemRef, DenseSet<Operation *> &copyNests) {
   return affineDataCopyGenerate(forOp.getBody()->begin(),
                                 std::prev(forOp.getBody()->end()), copyOptions,
                                 filterMemRef, copyNests);
 }
 
-LogicalResult mlir::generateCopyForMemRegion(
+LogicalResult mlir::affine::generateCopyForMemRegion(
     const MemRefRegion &memrefRegion, Operation *analyzedOp,
     const AffineCopyOptions &copyOptions, CopyGenerateResult &result) {
   Block *block = analyzedOp->getBlock();
@@ -2543,8 +2544,8 @@ gatherLoopsInBlock(Block *block, unsigned currLoopDepth,
 }
 
 /// Gathers all AffineForOps in 'func.func' grouped by loop depth.
-void mlir::gatherLoops(func::FuncOp func,
-                       std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
+void mlir::affine::gatherLoops(
+    func::FuncOp func, std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
   for (auto &block : func)
     gatherLoopsInBlock(&block, /*currLoopDepth=*/0, depthToLoops);
 
@@ -2559,7 +2560,7 @@ void mlir::gatherLoops(func::FuncOp func,
 // affine.applys, fold to constant if all result dimensions of the map are
 // constant (canonicalizeMapAndOperands below already does this for single
 // result bound maps), and use simplifyMap to perform algebraic simplification.
-AffineForOp mlir::createCanonicalizedAffineForOp(
+AffineForOp mlir::affine::createCanonicalizedAffineForOp(
     OpBuilder b, Location loc, ValueRange lbOperands, AffineMap lbMap,
     ValueRange ubOperands, AffineMap ubMap, int64_t step) {
   SmallVector<Value, 4> lowerOperands(lbOperands);
@@ -2716,8 +2717,8 @@ createFullTiles(MutableArrayRef<AffineForOp> inputNest,
 }
 
 LogicalResult
-mlir::separateFullTiles(MutableArrayRef<AffineForOp> inputNest,
-                        SmallVectorImpl<AffineForOp> *fullTileNest) {
+mlir::affine::separateFullTiles(MutableArrayRef<AffineForOp> inputNest,
+                                SmallVectorImpl<AffineForOp> *fullTileNest) {
   if (inputNest.empty())
     return success();
 

diff  --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index d96b688d29ed5..507b3872090eb 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -30,6 +30,7 @@
 #define DEBUG_TYPE "affine-utils"
 
 using namespace mlir;
+using namespace affine;
 using namespace presburger;
 
 namespace {
@@ -209,17 +210,18 @@ class AffineApplyExpander
 
 /// Create a sequence of operations that implement the `expr` applied to the
 /// given dimension and symbol values.
-mlir::Value mlir::expandAffineExpr(OpBuilder &builder, Location loc,
-                                   AffineExpr expr, ValueRange dimValues,
-                                   ValueRange symbolValues) {
+mlir::Value mlir::affine::expandAffineExpr(OpBuilder &builder, Location loc,
+                                           AffineExpr expr,
+                                           ValueRange dimValues,
+                                           ValueRange symbolValues) {
   return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
 }
 
 /// Create a sequence of operations that implement the `affineMap` applied to
 /// the given `operands` (as it it were an AffineApplyOp).
 std::optional<SmallVector<Value, 8>>
-mlir::expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap,
-                      ValueRange operands) {
+mlir::affine::expandAffineMap(OpBuilder &builder, Location loc,
+                              AffineMap affineMap, ValueRange operands) {
   auto numDims = affineMap.getNumDims();
   auto expanded = llvm::to_vector<8>(
       llvm::map_range(affineMap.getResults(),
@@ -341,8 +343,8 @@ static AffineIfOp hoistAffineIfOp(AffineIfOp ifOp, Operation *hoistOverOp) {
 }
 
 LogicalResult
-mlir::affineParallelize(AffineForOp forOp,
-                        ArrayRef<LoopReduction> parallelReductions) {
+mlir::affine::affineParallelize(AffineForOp forOp,
+                                ArrayRef<LoopReduction> parallelReductions) {
   // Fail early if there are iter arguments that are not reductions.
   unsigned numReductions = parallelReductions.size();
   if (numReductions != forOp.getNumIterOperands())
@@ -400,7 +402,7 @@ mlir::affineParallelize(AffineForOp forOp,
 }
 
 // Returns success if any hoisting happened.
-LogicalResult mlir::hoistAffineIfOp(AffineIfOp ifOp, bool *folded) {
+LogicalResult mlir::affine::hoistAffineIfOp(AffineIfOp ifOp, bool *folded) {
   // Bail out early if the ifOp returns a result.  TODO: Consider how to
   // properly support this case.
   if (ifOp.getNumResults() != 0)
@@ -454,8 +456,9 @@ LogicalResult mlir::hoistAffineIfOp(AffineIfOp ifOp, bool *folded) {
 }
 
 // Return the min expr after replacing the given dim.
-AffineExpr mlir::substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min,
-                              AffineExpr max, bool positivePath) {
+AffineExpr mlir::affine::substWithMin(AffineExpr e, AffineExpr dim,
+                                      AffineExpr min, AffineExpr max,
+                                      bool positivePath) {
   if (e == dim)
     return positivePath ? min : max;
   if (auto bin = e.dyn_cast<AffineBinaryOpExpr>()) {
@@ -480,7 +483,7 @@ AffineExpr mlir::substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min,
   return e;
 }
 
-void mlir::normalizeAffineParallel(AffineParallelOp op) {
+void mlir::affine::normalizeAffineParallel(AffineParallelOp op) {
   // Loops with min/max in bounds are not normalized at the moment.
   if (op.hasMinMaxBounds())
     return;
@@ -544,7 +547,8 @@ void mlir::normalizeAffineParallel(AffineParallelOp op) {
                                     ubExprs, op.getContext());
   op.setUpperBounds(ranges.getOperands(), newUpperMap);
 }
-LogicalResult mlir::normalizeAffineFor(AffineForOp op, bool promoteSingleIter) {
+LogicalResult mlir::affine::normalizeAffineFor(AffineForOp op,
+                                               bool promoteSingleIter) {
   if (promoteSingleIter && succeeded(promoteIfSingleIteration(op)))
     return success();
 
@@ -701,7 +705,7 @@ static bool mayHaveEffect(Operation *srcMemOp, Operation *destMemOp,
 }
 
 template <typename EffectType, typename T>
-bool mlir::hasNoInterveningEffect(Operation *start, T memOp) {
+bool mlir::affine::hasNoInterveningEffect(Operation *start, T memOp) {
   auto isLocallyAllocated = [](Value memref) {
     auto *defOp = memref.getDefiningOp();
     return defOp && hasSingleEffect<MemoryEffects::Allocate>(defOp, memref);
@@ -894,7 +898,7 @@ static LogicalResult forwardStoreToLoad(
 
     // 4. Ensure there is no intermediate operation which could replace the
     // value in memory.
-    if (!mlir::hasNoInterveningEffect<MemoryEffects::Write>(storeOp, loadOp))
+    if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(storeOp, loadOp))
       continue;
 
     // We now have a candidate for forwarding.
@@ -921,9 +925,10 @@ static LogicalResult forwardStoreToLoad(
   return success();
 }
 
-template bool mlir::hasNoInterveningEffect<mlir::MemoryEffects::Read,
-                                           mlir::AffineReadOpInterface>(
-    mlir::Operation *, mlir::AffineReadOpInterface);
+template bool
+mlir::affine::hasNoInterveningEffect<mlir::MemoryEffects::Read,
+                                     affine::AffineReadOpInterface>(
+    mlir::Operation *, affine::AffineReadOpInterface);
 
 // This attempts to find stores which have no impact on the final result.
 // A writing op writeA will be eliminated if there exists an op writeB if
@@ -961,7 +966,7 @@ static void findUnusedStore(AffineWriteOpInterface writeA,
 
     // There cannot be an operation which reads from memory between
     // the two writes.
-    if (!mlir::hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB))
+    if (!affine::hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB))
       continue;
 
     opsToErase.push_back(writeA);
@@ -997,7 +1002,7 @@ static void loadCSE(AffineReadOpInterface loadA,
       continue;
 
     // 3. There is no write between loadA and loadB.
-    if (!mlir::hasNoInterveningEffect<MemoryEffects::Write>(
+    if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(
             loadB.getOperation(), loadA))
       continue;
 
@@ -1055,8 +1060,8 @@ static void loadCSE(AffineReadOpInterface loadA,
 // currently only eliminates the stores only if no other loads/uses (other
 // than dealloc) remain.
 //
-void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
-                               PostDominanceInfo &postDomInfo) {
+void mlir::affine::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
+                                       PostDominanceInfo &postDomInfo) {
   // Load op's whose results were replaced by those forwarded from stores.
   SmallVector<Operation *, 8> opsToErase;
 
@@ -1109,13 +1114,11 @@ void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
 }
 
 // Perform the replacement in `op`.
-LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
-                                             Operation *op,
-                                             ArrayRef<Value> extraIndices,
-                                             AffineMap indexRemap,
-                                             ArrayRef<Value> extraOperands,
-                                             ArrayRef<Value> symbolOperands,
-                                             bool allowNonDereferencingOps) {
+LogicalResult mlir::affine::replaceAllMemRefUsesWith(
+    Value oldMemRef, Value newMemRef, Operation *op,
+    ArrayRef<Value> extraIndices, AffineMap indexRemap,
+    ArrayRef<Value> extraOperands, ArrayRef<Value> symbolOperands,
+    bool allowNonDereferencingOps) {
   unsigned newMemRefRank = newMemRef.getType().cast<MemRefType>().getRank();
   (void)newMemRefRank; // unused in opt mode
   unsigned oldMemRefRank = oldMemRef.getType().cast<MemRefType>().getRank();
@@ -1285,7 +1288,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef,
   return success();
 }
 
-LogicalResult mlir::replaceAllMemRefUsesWith(
+LogicalResult mlir::affine::replaceAllMemRefUsesWith(
     Value oldMemRef, Value newMemRef, ArrayRef<Value> extraIndices,
     AffineMap indexRemap, ArrayRef<Value> extraOperands,
     ArrayRef<Value> symbolOperands, Operation *domOpFilter,
@@ -1401,7 +1404,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(
 /// all the affine.apply op's supplying operands to this opInst did not have any
 /// uses besides this opInst; otherwise returns the list of affine.apply
 /// operations created in output argument `sliceOps`.
-void mlir::createAffineComputationSlice(
+void mlir::affine::createAffineComputationSlice(
     Operation *opInst, SmallVectorImpl<AffineApplyOp> *sliceOps) {
   // Collect all operands that are results of affine apply ops.
   SmallVector<Value, 4> subOperands;
@@ -1709,7 +1712,7 @@ static void createNewDynamicSizes(MemRefType oldMemRefType,
 }
 
 // TODO: Currently works for static memrefs with a single layout map.
-LogicalResult mlir::normalizeMemRef(memref::AllocOp *allocOp) {
+LogicalResult mlir::affine::normalizeMemRef(memref::AllocOp *allocOp) {
   MemRefType memrefType = allocOp->getType();
   OpBuilder b(*allocOp);
 
@@ -1767,8 +1770,8 @@ LogicalResult mlir::normalizeMemRef(memref::AllocOp *allocOp) {
   return success();
 }
 
-MemRefType mlir::normalizeMemRefType(MemRefType memrefType,
-                                     unsigned numSymbolicOperands) {
+MemRefType mlir::affine::normalizeMemRefType(MemRefType memrefType,
+                                             unsigned numSymbolicOperands) {
   unsigned rank = memrefType.getRank();
   if (rank == 0)
     return memrefType;
@@ -1848,13 +1851,15 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType,
   return newMemRefType;
 }
 
-DivModValue mlir::getDivMod(OpBuilder &b, Location loc, Value lhs, Value rhs) {
+DivModValue mlir::affine::getDivMod(OpBuilder &b, Location loc, Value lhs,
+                                    Value rhs) {
   DivModValue result;
   AffineExpr d0, d1;
   bindDims(b.getContext(), d0, d1);
   result.quotient =
-      makeComposedAffineApply(b, loc, d0.floorDiv(d1), {lhs, rhs});
-  result.remainder = makeComposedAffineApply(b, loc, d0 % d1, {lhs, rhs});
+      affine::makeComposedAffineApply(b, loc, d0.floorDiv(d1), {lhs, rhs});
+  result.remainder =
+      affine::makeComposedAffineApply(b, loc, d0 % d1, {lhs, rhs});
   return result;
 }
 
@@ -1871,9 +1876,9 @@ static FailureOr<OpFoldResult> getIndexProduct(OpBuilder &b, Location loc,
   return result;
 }
 
-FailureOr<SmallVector<Value>> mlir::delinearizeIndex(OpBuilder &b, Location loc,
-                                                     Value linearIndex,
-                                                     ArrayRef<Value> basis) {
+FailureOr<SmallVector<Value>>
+mlir::affine::delinearizeIndex(OpBuilder &b, Location loc, Value linearIndex,
+                               ArrayRef<Value> basis) {
   unsigned numDims = basis.size();
 
   SmallVector<Value> divisors;

diff  --git a/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp b/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
index f47149d68fbbf..b74df4ff6060f 100644
--- a/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
@@ -12,8 +12,9 @@
 #include "mlir/IR/PatternMatch.h"
 
 using namespace mlir;
+using namespace affine;
 
-LogicalResult mlir::mergeOffsetsSizesAndStrides(
+LogicalResult mlir::affine::mergeOffsetsSizesAndStrides(
     OpBuilder &builder, Location loc, ArrayRef<OpFoldResult> producerOffsets,
     ArrayRef<OpFoldResult> producerSizes,
     ArrayRef<OpFoldResult> producerStrides,
@@ -58,7 +59,7 @@ LogicalResult mlir::mergeOffsetsSizesAndStrides(
   return success();
 }
 
-LogicalResult mlir::mergeOffsetsSizesAndStrides(
+LogicalResult mlir::affine::mergeOffsetsSizesAndStrides(
     OpBuilder &builder, Location loc, OffsetSizeAndStrideOpInterface producer,
     OffsetSizeAndStrideOpInterface consumer,
     const llvm::SmallBitVector &droppedProducerDims,
@@ -77,7 +78,7 @@ LogicalResult mlir::mergeOffsetsSizesAndStrides(
       combinedOffsets, combinedSizes, combinedStrides);
 }
 
-void mlir::resolveIndicesIntoOpWithOffsetsAndStrides(
+void mlir::affine::resolveIndicesIntoOpWithOffsetsAndStrides(
     RewriterBase &rewriter, Location loc,
     ArrayRef<OpFoldResult> mixedSourceOffsets,
     ArrayRef<OpFoldResult> mixedSourceStrides,
@@ -109,7 +110,7 @@ void mlir::resolveIndicesIntoOpWithOffsetsAndStrides(
   }
 }
 
-void mlir::resolveSizesIntoOpWithSizes(
+void mlir::affine::resolveSizesIntoOpWithSizes(
     ArrayRef<OpFoldResult> sourceSizes, ArrayRef<OpFoldResult> destSizes,
     const llvm::SmallBitVector &rankReducedSourceDims,
     SmallVectorImpl<OpFoldResult> &resolvedSizes) {

diff  --git a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
index 753e7e638740e..b07bdc91803ee 100644
--- a/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
+++ b/mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp
@@ -61,7 +61,7 @@ static Value buildLinearThreadId(RewriterBase &rewriter, Location loc,
       rewriter.create<ThreadIdOp>(loc, indexType, Dimension::z).getResult()};
   threadsAndWorkGroups.push_back(blockDimsOfr[0]);
   threadsAndWorkGroups.push_back(blockDimsOfr[1]);
-  OpFoldResult ofr = makeComposedFoldedAffineApply(
+  OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
       rewriter, loc, tx + ty * BDX + tz * BDX * BDY, threadsAndWorkGroups);
   return getValueOrCreateConstantIndexOp(rewriter, loc, ofr);
 }
@@ -137,7 +137,7 @@ struct GpuWarpIdBuilder : public GpuIdBuilder {
       // `forallMappingSizes`.
       Value linearId = buildLinearThreadId(rewriter, loc, this->blockDimsOfr);
       AffineExpr d0 = getAffineDimExpr(0, rewriter.getContext());
-      OpFoldResult warpIdOfr = makeComposedFoldedAffineApply(
+      OpFoldResult warpIdOfr = affine::makeComposedFoldedAffineApply(
           rewriter, loc, d0.floorDiv(kWarpSize), {linearId});
       Value warpId = getValueOrCreateConstantIndexOp(rewriter, loc, warpIdOfr);
       // Sizes in [x, y, z] -> [z, y x] order to properly compute strides in
@@ -149,7 +149,8 @@ struct GpuWarpIdBuilder : public GpuIdBuilder {
       SmallVector<Value> ids;
       // Reverse back to be in [x, y, z] order.
       for (AffineExpr e : llvm::reverse(delinearizingExprs))
-        ids.push_back(makeComposedAffineApply(rewriter, loc, e, warpId));
+        ids.push_back(
+            affine::makeComposedAffineApply(rewriter, loc, e, warpId));
 
       // clang-format off
       LDBG("----linearId: " << linearId);
@@ -204,7 +205,8 @@ struct GpuLinearIdBuilder : public GpuIdBuilder {
       SmallVector<Value> ids;
       // Reverse back to be in [x, y, z] order.
       for (AffineExpr e : llvm::reverse(delinearizingExprs))
-        ids.push_back(makeComposedAffineApply(rewriter, loc, e, linearId));
+        ids.push_back(
+            affine::makeComposedAffineApply(rewriter, loc, e, linearId));
 
       // clang-format off
       LLVM_DEBUG(llvm::interleaveComma(reverseBasisSizes,

diff  --git a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
index 5672b02b0226d..ea9c3969c413f 100644
--- a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp
@@ -81,8 +81,8 @@ static void insertCopyLoops(ImplicitLocOpBuilder &b, Value from, Value to) {
            GPUDialect::getNumWorkgroupDimensions())))) {
     Value v = en.value();
     auto loop = cast<scf::ForOp>(v.getParentRegion()->getParentOp());
-    mapLoopToProcessorIds(loop, {threadIds[en.index()]},
-                          {blockDims[en.index()]});
+    affine::mapLoopToProcessorIds(loop, {threadIds[en.index()]},
+                                  {blockDims[en.index()]});
   }
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index 22a3c04ff3745..ed59518f64741 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -636,7 +636,7 @@ LinalgOp::reifyResultShapes(OpBuilder &b,
   Location loc = getOperation()->getLoc();
   IRRewriter rewriter(b);
   SmallVector<OpFoldResult> allResultDimValues =
-      makeComposedFoldedMultiResultAffineApply(
+      affine::makeComposedFoldedMultiResultAffineApply(
           rewriter, loc, resultShapesFromInputShapesMap,
           createFlatListOfOperandDims(b, loc));
   int64_t pos = 0;

diff  --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 53b96803290f7..07ddc02c00f47 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -580,7 +580,7 @@ struct FoldInsertPadIntoFill : public OpRewritePattern<tensor::InsertSliceOp> {
     // plus low padding sizes.
     SmallVector<OpFoldResult, 4> newOffsets;
     for (const auto &p : llvm::zip(lowPads, oldOffsets)) {
-      newOffsets.push_back(makeComposedFoldedAffineApply(
+      newOffsets.push_back(affine::makeComposedFoldedAffineApply(
           rewriter, loc, addMap, {std::get<0>(p), std::get<1>(p)}));
     }
 

diff  --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 3bd3dae4de33f..6907cf6ba115b 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -1169,8 +1169,8 @@ DiagnosedSilenceableFailure transform::MultiTileSizesOp::applyToOne(
   AffineExpr s0 = builder.getAffineSymbolExpr(0);
   AffineExpr s1 = builder.getAffineSymbolExpr(1);
   Operation *splitPoint =
-      makeComposedAffineApply(builder, target.getLoc(), s0 * s1,
-                              {spec->lowTileSize, spec->lowTripCount});
+      affine::makeComposedAffineApply(builder, target.getLoc(), s0 * s1,
+                                      {spec->lowTileSize, spec->lowTripCount});
   Operation *lowTileSize = spec->lowTileSize.getDefiningOp();
   Operation *highTileSize = spec->highTileSize.getDefiningOp();
   assert(lowTileSize && highTileSize && splitPoint &&
@@ -1420,7 +1420,7 @@ packMatmulGreedily(RewriterBase &rewriter, LinalgOp linalgOp,
     AffineExpr d0, s0;
     bindDims(rewriter.getContext(), d0);
     bindSymbols(rewriter.getContext(), s0);
-    adjustedPackedSizes.push_back(makeComposedFoldedAffineApply(
+    adjustedPackedSizes.push_back(affine::makeComposedFoldedAffineApply(
         rewriter, genericOp->getLoc(), d0.ceilDiv(s0) * s0,
         {loopRanges[adjustedPackedSizes.size()].size,
          rewriter.getIndexAttr(paddedSizesNextMultipleOf[i])}));
@@ -1983,8 +1983,8 @@ transform::ScalarizeOp::applyToOne(LinalgOp target,
     TrackingListener listener(state, *this);
     IRRewriter rewriter(getContext(), &listener);
     SmallVector<OpFoldResult> shapeSizes =
-        makeComposedFoldedMultiResultAffineApply(rewriter, loc, map,
-                                                 allShapeSizes);
+        affine::makeComposedFoldedMultiResultAffineApply(rewriter, loc, map,
+                                                         allShapeSizes);
     // If the shape size is dynamic, tile by 1.
     // Otherwise, do not tile (i.e. tile size 0).
     for (OpFoldResult shapeSize : shapeSizes) {
@@ -3351,7 +3351,7 @@ class LinalgTransformDialectExtension
   void init() {
     declareDependentDialect<pdl::PDLDialect>();
     declareDependentDialect<LinalgDialect>();
-    declareGeneratedDialect<AffineDialect>();
+    declareGeneratedDialect<affine::AffineDialect>();
     declareGeneratedDialect<arith::ArithDialect>();
     declareGeneratedDialect<scf::SCFDialect>();
     declareGeneratedDialect<vector::VectorDialect>();

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
index 33964d985a064..684d823d9f3df 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/BubbleUpExtractSlice.cpp
@@ -96,7 +96,7 @@ struct BubbleUpExtractSliceOpPattern
           linalgOp, "failed to get loops map from shape sizes");
     }
     SmallVector<OpFoldResult> sizeBounds =
-        makeComposedFoldedMultiResultAffineApply(
+        affine::makeComposedFoldedMultiResultAffineApply(
             rewriter, linalgLoc, shapeSizesToLoopsMap, allShapeSizes);
 
     // The offsets and sizes from the slice operation only give you the tile

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
index 2e320925a74ba..5423cf8d750fc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp
@@ -55,7 +55,7 @@ static SmallVector<Value> unrollIndex(OpBuilder &b, Location loc, Value index,
   for (int64_t f : factors)
     basis.push_back(b.create<arith::ConstantOp>(loc, b.getIndexAttr(f)));
   FailureOr<SmallVector<Value>> multiIndex =
-      delinearizeIndex(b, loc, index, basis);
+      affine::delinearizeIndex(b, loc, index, basis);
   assert(!failed(multiIndex) && "Failed to linearize img2col index");
   return *multiIndex;
 }
@@ -68,7 +68,8 @@ static Value getConvolvedIndex(OpBuilder &b, Location loc, Value oIndex,
   AffineExpr oExpr, fExpr;
   bindSymbols(b.getContext(), oExpr, fExpr);
   AffineMap convMap = AffineMap::get(0, 2, stride * oExpr + fExpr);
-  return makeComposedAffineApply(b, loc, convMap, ValueRange{oIndex, fIndex});
+  return affine::makeComposedAffineApply(b, loc, convMap,
+                                         ValueRange{oIndex, fIndex});
 }
 
 FailureOr<std::pair<Operation *, Operation *>>

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
index a7515edd5b1ed..e381b0aa011cb 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp
@@ -111,8 +111,8 @@ static SmallVector<OpFoldResult> getGenericOpLoopRange(OpBuilder &b,
       cast<LinalgOp>(op.getOperation()).createFlatListOfOperandDims(b, loc);
   AffineMap map = op.getShapesToLoopsMap();
   IRRewriter rewriter(b);
-  return makeComposedFoldedMultiResultAffineApply(rewriter, loc, map,
-                                                  allShapesSizes);
+  return affine::makeComposedFoldedMultiResultAffineApply(rewriter, loc, map,
+                                                          allShapesSizes);
 }
 
 /// Helper method to permute the list of `values` based on the `map`.

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 4df55ddd62e0f..f3c8c5c06a087 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -179,7 +179,7 @@ static void generateFusedElementwiseOpRegion(
                     });
     for (IndexOp indexOp :
          llvm::make_early_inc_range(producerBlock.getOps<IndexOp>())) {
-      Value newIndex = rewriter.create<mlir::AffineApplyOp>(
+      Value newIndex = rewriter.create<affine::AffineApplyOp>(
           producer.getLoc(),
           consumerToProducerLoopsMap.getSubMap(indexOp.getDim()), fusedIndices);
       mapper.map(indexOp.getResult(), newIndex);
@@ -719,7 +719,7 @@ static void updateExpandedGenericOpRegion(PatternRewriter &rewriter,
       assert(!ShapedType::isDynamic(std::get<0>(it)));
       AffineExpr idx, acc;
       bindDims(rewriter.getContext(), idx, acc);
-      newIndex = rewriter.create<AffineApplyOp>(
+      newIndex = rewriter.create<affine::AffineApplyOp>(
           indexOp.getLoc(), idx + acc * std::get<0>(it),
           ValueRange{std::get<1>(it), newIndex});
     }
@@ -1871,7 +1871,7 @@ struct LinalgElementwiseOpFusionPass
     populateFoldReshapeOpsByExpansionPatterns(patterns, defaultControlFn);
 
     // General canonicalization patterns.
-    AffineApplyOp::getCanonicalizationPatterns(patterns, context);
+    affine::AffineApplyOp::getCanonicalizationPatterns(patterns, context);
     GenericOp::getCanonicalizationPatterns(patterns, context);
     tensor::ExpandShapeOp::getCanonicalizationPatterns(patterns, context);
     tensor::CollapseShapeOp::getCanonicalizationPatterns(patterns, context);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
index f61f23eb502a1..1ee594163b361 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
@@ -462,7 +462,7 @@ HoistPaddingAnalysis::getHoistedPackedTensorSizes(RewriterBase &rewriter,
   // of the enclosing loops.
   for (auto forOp : packingLoops) {
     // Compute an upper bound `ubVal` for the upper bound of `forOp`.
-    FailureOr<OpFoldResult> loopUb = reifyIndexValueBound(
+    FailureOr<OpFoldResult> loopUb = affine::reifyIndexValueBound(
         rewriter, loc, presburger::BoundType::UB, forOp.getUpperBound(),
         /*stopCondition=*/
         [&](Value v, std::optional<int64_t> d) {
@@ -472,7 +472,8 @@ HoistPaddingAnalysis::getHoistedPackedTensorSizes(RewriterBase &rewriter,
           Operation *op = v.getDefiningOp();
           if (!op)
             return true;
-          return !isa<AffineMinOp, AffineMaxOp, AffineApplyOp>(op);
+          return !isa<affine::AffineMinOp, affine::AffineMaxOp,
+                      affine::AffineApplyOp>(op);
         },
         /*closedUB=*/true);
     assert(succeeded(loopUb) && "could not get upper bound");
@@ -485,7 +486,7 @@ HoistPaddingAnalysis::getHoistedPackedTensorSizes(RewriterBase &rewriter,
     AffineExpr lb, ub, step;
     bindDims(rewriter.getContext(), lb, ub);
     bindSymbols(rewriter.getContext(), step);
-    Value res = rewriter.createOrFold<AffineApplyOp>(
+    Value res = rewriter.createOrFold<affine::AffineApplyOp>(
         loc, (ub - lb).ceilDiv(step),
         ValueRange{forOp.getLowerBound(), ubVal,
                    cast<scf::ForOp>(forOp).getStep()});
@@ -519,7 +520,7 @@ static Value buildLoopIterationCount(RewriterBase &rewriter, scf::ForOp outer,
   Value ivVal = forOp.getInductionVar(), lbVal = forOp.getLowerBound(),
         stepVal = forOp.getStep();
   auto loc = forOp->getLoc();
-  return rewriter.createOrFold<AffineApplyOp>(
+  return rewriter.createOrFold<affine::AffineApplyOp>(
       loc, (iv - lb).ceilDiv(step), ValueRange{ivVal, lbVal, stepVal});
 }
 

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
index b453ce8e20c28..13ec4d92ad26d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
@@ -94,7 +94,7 @@ void mlir::linalg::hoistRedundantVectorTransfers(func::FuncOp func) {
       auto loop = dyn_cast<LoopLikeOpInterface>(transferRead->getParentOp());
       LLVM_DEBUG(DBGS() << "Parent op: " << *transferRead->getParentOp()
                         << "\n");
-      if (!isa_and_nonnull<scf::ForOp, AffineForOp>(loop))
+      if (!isa_and_nonnull<scf::ForOp, affine::AffineForOp>(loop))
         return WalkResult::advance();
 
       LLVM_DEBUG(DBGS() << "Candidate read: " << *transferRead.getOperation()
@@ -200,7 +200,7 @@ void mlir::linalg::hoistRedundantVectorTransfers(func::FuncOp func) {
             // the walk.
             return WalkResult::interrupt();
           })
-          .Case<AffineForOp>([&](AffineForOp affineForOp) {
+          .Case<affine::AffineForOp>([&](affine::AffineForOp affineForOp) {
             auto newForOp = replaceForOpWithNewYields(
                 b, affineForOp, transferRead.getVector(),
                 SmallVector<Value>{transferWrite.getVector()},

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
index 29e7f971e5c98..f46ba71599b3f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Interchange.cpp
@@ -96,7 +96,7 @@ mlir::linalg::interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp,
                       std::back_inserter(allIndices), [&](uint64_t dim) {
                         return rewriter.create<IndexOp>(indexOp->getLoc(), dim);
                       });
-      rewriter.replaceOpWithNewOp<AffineApplyOp>(
+      rewriter.replaceOpWithNewOp<affine::AffineApplyOp>(
           indexOp, permutationMap.getSubMap(indexOp.getDim()), allIndices);
     }
   }

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index 583128a418125..adc1769bb6053 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -49,8 +49,8 @@ static SmallVector<Value> makeCanonicalAffineApplies(OpBuilder &b, Location loc,
   for (auto e : map.getResults()) {
     auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
     SmallVector<Value> operands(vals.begin(), vals.end());
-    canonicalizeMapAndOperands(&exprMap, &operands);
-    res.push_back(b.create<AffineApplyOp>(loc, exprMap, operands));
+    affine::canonicalizeMapAndOperands(&exprMap, &operands);
+    res.push_back(b.create<affine::AffineApplyOp>(loc, exprMap, operands));
   }
   return res;
 }
@@ -189,7 +189,7 @@ static void replaceIndexOpsByInductionVariables(RewriterBase &rewriter,
         .Case([&](scf::ForOp forOp) {
           allIvs.push_back(forOp.getInductionVar());
         })
-        .Case([&](AffineForOp affineForOp) {
+        .Case([&](affine::AffineForOp affineForOp) {
           allIvs.push_back(affineForOp.getInductionVar());
         })
         .Default([&](Operation *op) { assert(false && "unexpected op"); });
@@ -208,10 +208,12 @@ static void replaceIndexOpsByInductionVariables(RewriterBase &rewriter,
 template <typename LoopTy>
 static FailureOr<LinalgLoops> linalgOpToLoopsImpl(RewriterBase &rewriter,
                                                   LinalgOp linalgOp) {
-  using LoadOpTy = std::conditional_t<std::is_same<LoopTy, AffineForOp>::value,
-                                      AffineLoadOp, memref::LoadOp>;
-  using StoreOpTy = std::conditional_t<std::is_same<LoopTy, AffineForOp>::value,
-                                       AffineStoreOp, memref::StoreOp>;
+  using LoadOpTy =
+      std::conditional_t<std::is_same<LoopTy, affine::AffineForOp>::value,
+                         affine::AffineLoadOp, memref::LoadOp>;
+  using StoreOpTy =
+      std::conditional_t<std::is_same<LoopTy, affine::AffineForOp>::value,
+                         affine::AffineStoreOp, memref::StoreOp>;
 
   // The flattened loopToOperandRangesMaps is expected to be an invertible
   // permutation map (which is asserted in the inverse calculation).
@@ -284,11 +286,11 @@ class LinalgRewritePattern : public RewritePattern {
 /// other cases, it is replaced by its unique operand.
 struct FoldAffineOp : public RewritePattern {
   FoldAffineOp(MLIRContext *context)
-      : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
+      : RewritePattern(affine::AffineApplyOp::getOperationName(), 0, context) {}
 
   LogicalResult matchAndRewrite(Operation *op,
                                 PatternRewriter &rewriter) const override {
-    AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
+    auto affineApplyOp = cast<affine::AffineApplyOp>(op);
     auto map = affineApplyOp.getAffineMap();
     if (map.getNumResults() != 1 || map.getNumInputs() > 1)
       return failure();
@@ -316,7 +318,7 @@ static void lowerLinalgToLoopsImpl(func::FuncOp funcOp) {
   patterns.add<LinalgRewritePattern<LoopType>>(context);
   memref::DimOp::getCanonicalizationPatterns(patterns, context);
   tensor::DimOp::getCanonicalizationPatterns(patterns, context);
-  AffineApplyOp::getCanonicalizationPatterns(patterns, context);
+  affine::AffineApplyOp::getCanonicalizationPatterns(patterns, context);
   patterns.add<FoldAffineOp>(context);
   // Just apply the patterns greedily.
   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
@@ -328,7 +330,7 @@ struct LowerToAffineLoops
     registry.insert<memref::MemRefDialect>();
   }
   void runOnOperation() override {
-    lowerLinalgToLoopsImpl<AffineForOp>(getOperation());
+    lowerLinalgToLoopsImpl<affine::AffineForOp>(getOperation());
   }
 };
 
@@ -368,7 +370,7 @@ mlir::createConvertLinalgToAffineLoopsPass() {
 /// Emits a loop nest of `affine.for` with the proper body for `linalgOp`.
 FailureOr<LinalgLoops>
 mlir::linalg::linalgOpToAffineLoops(RewriterBase &rewriter, LinalgOp linalgOp) {
-  return linalgOpToLoopsImpl<AffineForOp>(rewriter, linalgOp);
+  return linalgOpToLoopsImpl<affine::AffineForOp>(rewriter, linalgOp);
 }
 
 /// Emits a loop nest of `scf.for` with the proper body for `linalgOp`.

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp
index e6fce56d4140b..344b2893c906e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp
@@ -81,14 +81,14 @@ linalg::splitOp(RewriterBase &rewriter, TilingInterface op, unsigned dimension,
   // Adjust the split point so that it doesn't overflow the size.
   AffineExpr d0, d1, d2;
   bindDims(rewriter.getContext(), d0, d1, d2);
-  OpFoldResult minSplitPoint = makeComposedFoldedAffineMin(
+  OpFoldResult minSplitPoint = affine::makeComposedFoldedAffineMin(
       rewriter, op.getLoc(),
       AffineMap::inferFromExprList(ArrayRef<AffineExpr>{d0, d1 + d2}).front(),
       {splitPoint, offsets[dimension], sizes[dimension]});
 
   // Compute the size of the second part. Return early if the second part would
   // have an empty iteration space.
-  OpFoldResult remainingSize = makeComposedFoldedAffineApply(
+  OpFoldResult remainingSize = affine::makeComposedFoldedAffineApply(
       rewriter, op.getLoc(), d0 + d1 - d2,
       {iterationSpace[dimension].offset, iterationSpace[dimension].size,
        minSplitPoint});
@@ -121,7 +121,7 @@ linalg::splitOp(RewriterBase &rewriter, TilingInterface op, unsigned dimension,
   });
 
   // Create the second part.
-  OpFoldResult totalOffset = makeComposedFoldedAffineApply(
+  OpFoldResult totalOffset = affine::makeComposedFoldedAffineApply(
       rewriter, op.getLoc(), d0 + d1, {offsets[dimension], minSplitPoint});
   SmallVector<Value> secondResults;
   TilingInterface secondPart =

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 18d485ae5463b..1ff11665b402c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -39,6 +39,7 @@ namespace mlir {
 } // namespace mlir
 
 using namespace mlir;
+using namespace mlir::affine;
 using namespace mlir::linalg;
 using namespace mlir::scf;
 
@@ -178,7 +179,7 @@ mlir::linalg::computeMultiTileSizes(OpBuilder &builder, LinalgOp op,
   AffineExpr s1 = b.getAffineSymbolExpr(1);
   AffineExpr s2 = b.getAffineSymbolExpr(2);
   auto apply = [&](AffineExpr expr, ValueRange values) -> Value {
-    return makeComposedAffineApply(b, b.getLoc(), expr, values);
+    return affine::makeComposedAffineApply(b, b.getLoc(), expr, values);
   };
   Value a = apply(s0.floorDiv(s1), {tripCount, divisorValue});
   Value t = apply((s0 + s1 - 1).floorDiv(s1), {targetSizeValue, divisorValue});
@@ -228,7 +229,7 @@ static bool canOmitTileOffsetInBoundsCheck(OpFoldResult tileSize,
 /// Build an `affine_max` of all the `vals`.
 static OpFoldResult buildMax(OpBuilder &b, Location loc,
                              ArrayRef<OpFoldResult> vals) {
-  return makeComposedFoldedAffineMax(
+  return affine::makeComposedFoldedAffineMax(
       b, loc, AffineMap::getMultiDimIdentityMap(vals.size(), loc.getContext()),
       vals);
 }
@@ -236,7 +237,7 @@ static OpFoldResult buildMax(OpBuilder &b, Location loc,
 /// Build an `affine_min` of all the `vals`.
 static OpFoldResult buildMin(OpBuilder &b, Location loc,
                              ArrayRef<OpFoldResult> vals) {
-  return makeComposedFoldedAffineMin(
+  return affine::makeComposedFoldedAffineMin(
       b, loc, AffineMap::getMultiDimIdentityMap(vals.size(), loc.getContext()),
       vals);
 }
@@ -968,10 +969,10 @@ mlir::linalg::getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx) {
 void mlir::linalg::populateLinalgTilingCanonicalizationPatterns(
     RewritePatternSet &patterns) {
   auto *ctx = patterns.getContext();
-  AffineApplyOp::getCanonicalizationPatterns(patterns, ctx);
-  AffineForOp::getCanonicalizationPatterns(patterns, ctx);
-  AffineMinOp::getCanonicalizationPatterns(patterns, ctx);
-  AffineMaxOp::getCanonicalizationPatterns(patterns, ctx);
+  affine::AffineApplyOp::getCanonicalizationPatterns(patterns, ctx);
+  affine::AffineForOp::getCanonicalizationPatterns(patterns, ctx);
+  affine::AffineMinOp::getCanonicalizationPatterns(patterns, ctx);
+  affine::AffineMaxOp::getCanonicalizationPatterns(patterns, ctx);
   arith::ConstantIndexOp::getCanonicalizationPatterns(patterns, ctx);
 
   memref::SubViewOp::getCanonicalizationPatterns(patterns, ctx);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index 676d6330cde3e..c5026eed4423e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -37,7 +37,7 @@ static SmallVector<Value> getIndicesForAccess(OpBuilder &b, Location loc,
   for (auto result : indexingMap.getResults()) {
     AffineMap m = AffineMap::get(indexingMap.getNumDims(),
                                  indexingMap.getNumSymbols(), result);
-    Value v = b.create<AffineApplyOp>(loc, m, ivs);
+    Value v = b.create<affine::AffineApplyOp>(loc, m, ivs);
     indices.push_back(v);
   }
   return indices;
@@ -104,8 +104,8 @@ struct LinalgOpTilingInterface
 
     return llvm::to_vector(
         llvm::map_range(map.getResults(), [&](AffineExpr loopExpr) {
-          OpFoldResult ofr =
-              makeComposedFoldedAffineApply(b, loc, loopExpr, allShapesSizes);
+          OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
+              b, loc, loopExpr, allShapesSizes);
           return Range{b.getIndexAttr(0), ofr, b.getIndexAttr(1)};
         }));
   }
@@ -147,7 +147,7 @@ struct LinalgOpTilingInterface
     bindDims(b.getContext(), d0);
     SmallVector<OpFoldResult> subShapeSizes =
         llvm::to_vector(llvm::map_range(sizes, [&](OpFoldResult ofr) {
-          return makeComposedFoldedAffineApply(b, loc, d0 - 1, ofr);
+          return affine::makeComposedFoldedAffineApply(b, loc, d0 - 1, ofr);
         }));
 
     OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber);

diff  --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index eff424f021eb6..1bc6c42545ed9 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1374,11 +1374,11 @@ mlir::linalg::vectorizeLinalgOpPrecondition(LinalgOp linalgOp,
 /// Converts affine.apply Ops to arithmetic operations.
 static void convertAffineApply(RewriterBase &rewriter, LinalgOp linalgOp) {
   OpBuilder::InsertionGuard g(rewriter);
-  auto toReplace = linalgOp.getBlock()->getOps<AffineApplyOp>();
+  auto toReplace = linalgOp.getBlock()->getOps<affine::AffineApplyOp>();
 
   for (auto op : make_early_inc_range(toReplace)) {
     rewriter.setInsertionPoint(op);
-    auto expanded = expandAffineExpr(
+    auto expanded = affine::expandAffineExpr(
         rewriter, op->getLoc(), op.getAffineMap().getResult(0),
         op.getOperands().take_front(op.getAffineMap().getNumDims()),
         op.getOperands().take_back(op.getAffineMap().getNumSymbols()));
@@ -1868,8 +1868,8 @@ struct PadOpVectorizationWithTransferWritePattern
 
       // Case 2: Both values are identical AffineMinOps. (Should not happen if
       // CSE is run.)
-      auto minOp1 = v1.getDefiningOp<AffineMinOp>();
-      auto minOp2 = v2.getDefiningOp<AffineMinOp>();
+      auto minOp1 = v1.getDefiningOp<affine::AffineMinOp>();
+      auto minOp2 = v2.getDefiningOp<affine::AffineMinOp>();
       if (minOp1 && minOp2 && minOp1.getAffineMap() == minOp2.getAffineMap() &&
           minOp1.getOperands() == minOp2.getOperands())
         continue;

diff  --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
index 4c8f31bd92cab..f4969baa10e39 100644
--- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp
@@ -42,6 +42,7 @@
 
 using namespace mlir;
 using namespace presburger;
+using namespace mlir::affine;
 using namespace mlir::linalg;
 using namespace mlir::scf;
 
@@ -456,11 +457,11 @@ void GenerateLoopNest<AffineForOp>::doit(
     constantSteps.push_back(op.value());
   }
 
-  mlir::buildAffineLoopNest(b, loc, lbs, ubs, constantSteps,
-                            [&](OpBuilder &b, Location loc, ValueRange ivs) {
-                              bodyBuilderFn(b, loc, ivs,
-                                            linalgOp->getOperands());
-                            });
+  affine::buildAffineLoopNest(b, loc, lbs, ubs, constantSteps,
+                              [&](OpBuilder &b, Location loc, ValueRange ivs) {
+                                bodyBuilderFn(b, loc, ivs,
+                                              linalgOp->getOperands());
+                              });
 }
 
 /// Update the `lb`, `ub` and `step` to get per processor `lb`, `ub` and `step`.
@@ -470,8 +471,9 @@ void updateBoundsForCyclicDistribution(OpBuilder &b, Location loc, Value procId,
   AffineExpr d0, d1;
   bindDims(b.getContext(), d0, d1);
   AffineExpr s0 = getAffineSymbolExpr(0, b.getContext());
-  lb = makeComposedAffineApply(b, loc, d0 + d1 * s0, {lb, procId, step});
-  step = makeComposedAffineApply(b, loc, d0 * s0, {nprocs, step});
+  lb =
+      affine::makeComposedAffineApply(b, loc, d0 + d1 * s0, {lb, procId, step});
+  step = affine::makeComposedAffineApply(b, loc, d0 * s0, {nprocs, step});
 }
 
 /// Generates a loop nest consisting of scf.parallel and scf.for, depending

diff  --git a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
index 3209b1bb83411..65d7be0991fcf 100644
--- a/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
+++ b/mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp
@@ -110,7 +110,7 @@ class MemRefTransformDialectExtension
 
   void init() {
     declareDependentDialect<pdl::PDLDialect>();
-    declareGeneratedDialect<AffineDialect>();
+    declareGeneratedDialect<affine::AffineDialect>();
     declareGeneratedDialect<arith::ArithDialect>();
     declareGeneratedDialect<memref::MemRefDialect>();
     declareGeneratedDialect<nvgpu::NVGPUDialect>();

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
index d5387c9faeffd..369f22521895d 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ComposeSubView.cpp
@@ -111,8 +111,8 @@ struct ComposeSubViewOpPattern : public OpRewritePattern<memref::SubViewOp> {
         }
 
         AffineMap map = AffineMap::get(0, affineApplyOperands.size(), expr);
-        Value result = rewriter.create<AffineApplyOp>(op.getLoc(), map,
-                                                      affineApplyOperands);
+        Value result = rewriter.create<affine::AffineApplyOp>(
+            op.getLoc(), map, affineApplyOperands);
         offsets.push_back(result);
       }
     }

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
index 64b9b04b51034..fcddb1ea14120 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
@@ -32,7 +32,9 @@ namespace memref {
 #include "mlir/Dialect/MemRef/Transforms/Passes.h.inc"
 } // namespace memref
 } // namespace mlir
+
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
index 5ef977f9add3d..5141b5f33cfa2 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp
@@ -195,8 +195,8 @@ getGenericOpViewSizeForEachDim(RewriterBase &rewriter,
   AffineExpr s1 = rewriter.getAffineSymbolExpr(1);
 
   for (auto [srcSize, indice] : llvm::zip(srcSizes, indices)) {
-    finalSizes.push_back(makeComposedFoldedAffineApply(rewriter, loc, s0 - s1,
-                                                       {srcSize, indice}));
+    finalSizes.push_back(affine::makeComposedFoldedAffineApply(
+        rewriter, loc, s0 - s1, {srcSize, indice}));
   }
   return finalSizes;
 }

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
index d99cf916794d7..72675b03abf65 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
@@ -85,7 +85,7 @@ resolveSourceIndicesExpandShape(Location loc, PatternRewriter &rewriter,
 
     // Creating maximally folded and composd affine.apply composes better with
     // other transformations without interleaving canonicalization passes.
-    OpFoldResult ofr = makeComposedFoldedAffineApply(
+    OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
         rewriter, loc,
         AffineMap::get(/*numDims=*/groupSize,
                        /*numSymbols=*/0, srcIndexExpr),
@@ -135,7 +135,7 @@ resolveSourceIndicesCollapseShape(Location loc, PatternRewriter &rewriter,
 
     // Construct the AffineApplyOp for each delinearizingExpr.
     for (int64_t i = 0; i < groupSize; i++) {
-      OpFoldResult ofr = makeComposedFoldedAffineApply(
+      OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
           rewriter, loc,
           AffineMap::get(/*numDims=*/1, /*numSymbols=*/0,
                          delinearizingExprs[i]),
@@ -150,7 +150,7 @@ resolveSourceIndicesCollapseShape(Location loc, PatternRewriter &rewriter,
     int64_t srcRank =
         collapseShapeOp.getViewSource().getType().cast<MemRefType>().getRank();
     for (int64_t i = 0; i < srcRank; i++) {
-      OpFoldResult ofr = makeComposedFoldedAffineApply(
+      OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
           rewriter, loc, zeroAffineMap, dynamicIndices);
       sourceIndices.push_back(
           getValueOrCreateConstantIndexOp(rewriter, loc, ofr));
@@ -268,13 +268,13 @@ class SubViewOfSubViewFolder : public OpRewritePattern<memref::SubViewOp> {
     // Resolve sizes according to dropped dims.
     SmallVector<OpFoldResult> resolvedSizes;
     llvm::SmallBitVector srcDroppedDims = srcSubView.getDroppedDims();
-    resolveSizesIntoOpWithSizes(srcSubView.getMixedSizes(),
-                                subView.getMixedSizes(), srcDroppedDims,
-                                resolvedSizes);
+    affine::resolveSizesIntoOpWithSizes(srcSubView.getMixedSizes(),
+                                        subView.getMixedSizes(), srcDroppedDims,
+                                        resolvedSizes);
 
     // Resolve offsets according to source offsets and strides.
     SmallVector<Value> resolvedOffsets;
-    resolveIndicesIntoOpWithOffsetsAndStrides(
+    affine::resolveIndicesIntoOpWithOffsetsAndStrides(
         rewriter, subView.getLoc(), srcSubView.getMixedOffsets(),
         srcSubView.getMixedStrides(), srcDroppedDims, subView.getMixedOffsets(),
         resolvedOffsets);
@@ -309,7 +309,7 @@ calculateExpandedAccessIndices(AffineMap affineMap,
       llvm::map_range(indices, [](Value v) -> OpFoldResult { return v; })));
   SmallVector<Value> expandedIndices;
   for (unsigned i = 0, e = affineMap.getNumResults(); i < e; i++) {
-    OpFoldResult ofr = makeComposedFoldedAffineApply(
+    OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
         rewriter, loc, affineMap.getSubMap({i}), indicesOfr);
     expandedIndices.push_back(
         getValueOrCreateConstantIndexOp(rewriter, loc, ofr));
@@ -371,22 +371,23 @@ LogicalResult LoadOpOfSubViewOpFolder<OpTy>::matchAndRewrite(
                              loadOp.getIndices().end());
   // For affine ops, we need to apply the map to get the operands to get the
   // "actual" indices.
-  if (auto affineLoadOp = dyn_cast<AffineLoadOp>(loadOp.getOperation())) {
+  if (auto affineLoadOp =
+          dyn_cast<affine::AffineLoadOp>(loadOp.getOperation())) {
     AffineMap affineMap = affineLoadOp.getAffineMap();
     auto expandedIndices = calculateExpandedAccessIndices(
         affineMap, indices, loadOp.getLoc(), rewriter);
     indices.assign(expandedIndices.begin(), expandedIndices.end());
   }
   SmallVector<Value> sourceIndices;
-  resolveIndicesIntoOpWithOffsetsAndStrides(
+  affine::resolveIndicesIntoOpWithOffsetsAndStrides(
       rewriter, loadOp.getLoc(), subViewOp.getMixedOffsets(),
       subViewOp.getMixedStrides(), subViewOp.getDroppedDims(), indices,
       sourceIndices);
 
   llvm::TypeSwitch<Operation *, void>(loadOp)
-      .Case([&](AffineLoadOp op) {
-        rewriter.replaceOpWithNewOp<AffineLoadOp>(loadOp, subViewOp.getSource(),
-                                                  sourceIndices);
+      .Case([&](affine::AffineLoadOp op) {
+        rewriter.replaceOpWithNewOp<affine::AffineLoadOp>(
+            loadOp, subViewOp.getSource(), sourceIndices);
       })
       .Case([&](memref::LoadOp op) {
         rewriter.replaceOpWithNewOp<memref::LoadOp>(
@@ -422,7 +423,8 @@ LogicalResult LoadOpOfExpandShapeOpFolder<OpTy>::matchAndRewrite(
                              loadOp.getIndices().end());
   // For affine ops, we need to apply the map to get the operands to get the
   // "actual" indices.
-  if (auto affineLoadOp = dyn_cast<AffineLoadOp>(loadOp.getOperation())) {
+  if (auto affineLoadOp =
+          dyn_cast<affine::AffineLoadOp>(loadOp.getOperation())) {
     AffineMap affineMap = affineLoadOp.getAffineMap();
     auto expandedIndices = calculateExpandedAccessIndices(
         affineMap, indices, loadOp.getLoc(), rewriter);
@@ -433,7 +435,7 @@ LogicalResult LoadOpOfExpandShapeOpFolder<OpTy>::matchAndRewrite(
           loadOp.getLoc(), rewriter, expandShapeOp, indices, sourceIndices)))
     return failure();
   llvm::TypeSwitch<Operation *, void>(loadOp)
-      .Case<AffineLoadOp, memref::LoadOp>([&](auto op) {
+      .Case<affine::AffineLoadOp, memref::LoadOp>([&](auto op) {
         rewriter.replaceOpWithNewOp<decltype(op)>(
             loadOp, expandShapeOp.getViewSource(), sourceIndices);
       })
@@ -454,7 +456,8 @@ LogicalResult LoadOpOfCollapseShapeOpFolder<OpTy>::matchAndRewrite(
                              loadOp.getIndices().end());
   // For affine ops, we need to apply the map to get the operands to get the
   // "actual" indices.
-  if (auto affineLoadOp = dyn_cast<AffineLoadOp>(loadOp.getOperation())) {
+  if (auto affineLoadOp =
+          dyn_cast<affine::AffineLoadOp>(loadOp.getOperation())) {
     AffineMap affineMap = affineLoadOp.getAffineMap();
     auto expandedIndices = calculateExpandedAccessIndices(
         affineMap, indices, loadOp.getLoc(), rewriter);
@@ -465,7 +468,7 @@ LogicalResult LoadOpOfCollapseShapeOpFolder<OpTy>::matchAndRewrite(
           loadOp.getLoc(), rewriter, collapseShapeOp, indices, sourceIndices)))
     return failure();
   llvm::TypeSwitch<Operation *, void>(loadOp)
-      .Case<AffineLoadOp, memref::LoadOp>([&](auto op) {
+      .Case<affine::AffineLoadOp, memref::LoadOp>([&](auto op) {
         rewriter.replaceOpWithNewOp<decltype(op)>(
             loadOp, collapseShapeOp.getViewSource(), sourceIndices);
       })
@@ -491,21 +494,22 @@ LogicalResult StoreOpOfSubViewOpFolder<OpTy>::matchAndRewrite(
                              storeOp.getIndices().end());
   // For affine ops, we need to apply the map to get the operands to get the
   // "actual" indices.
-  if (auto affineStoreOp = dyn_cast<AffineStoreOp>(storeOp.getOperation())) {
+  if (auto affineStoreOp =
+          dyn_cast<affine::AffineStoreOp>(storeOp.getOperation())) {
     AffineMap affineMap = affineStoreOp.getAffineMap();
     auto expandedIndices = calculateExpandedAccessIndices(
         affineMap, indices, storeOp.getLoc(), rewriter);
     indices.assign(expandedIndices.begin(), expandedIndices.end());
   }
   SmallVector<Value> sourceIndices;
-  resolveIndicesIntoOpWithOffsetsAndStrides(
+  affine::resolveIndicesIntoOpWithOffsetsAndStrides(
       rewriter, storeOp.getLoc(), subViewOp.getMixedOffsets(),
       subViewOp.getMixedStrides(), subViewOp.getDroppedDims(), indices,
       sourceIndices);
 
   llvm::TypeSwitch<Operation *, void>(storeOp)
-      .Case([&](AffineStoreOp op) {
-        rewriter.replaceOpWithNewOp<AffineStoreOp>(
+      .Case([&](affine::AffineStoreOp op) {
+        rewriter.replaceOpWithNewOp<affine::AffineStoreOp>(
             op, op.getValue(), subViewOp.getSource(), sourceIndices);
       })
       .Case([&](memref::StoreOp op) {
@@ -543,7 +547,8 @@ LogicalResult StoreOpOfExpandShapeOpFolder<OpTy>::matchAndRewrite(
                              storeOp.getIndices().end());
   // For affine ops, we need to apply the map to get the operands to get the
   // "actual" indices.
-  if (auto affineStoreOp = dyn_cast<AffineStoreOp>(storeOp.getOperation())) {
+  if (auto affineStoreOp =
+          dyn_cast<affine::AffineStoreOp>(storeOp.getOperation())) {
     AffineMap affineMap = affineStoreOp.getAffineMap();
     auto expandedIndices = calculateExpandedAccessIndices(
         affineMap, indices, storeOp.getLoc(), rewriter);
@@ -554,7 +559,7 @@ LogicalResult StoreOpOfExpandShapeOpFolder<OpTy>::matchAndRewrite(
           storeOp.getLoc(), rewriter, expandShapeOp, indices, sourceIndices)))
     return failure();
   llvm::TypeSwitch<Operation *, void>(storeOp)
-      .Case<AffineStoreOp, memref::StoreOp>([&](auto op) {
+      .Case<affine::AffineStoreOp, memref::StoreOp>([&](auto op) {
         rewriter.replaceOpWithNewOp<decltype(op)>(storeOp, storeOp.getValue(),
                                                   expandShapeOp.getViewSource(),
                                                   sourceIndices);
@@ -576,7 +581,8 @@ LogicalResult StoreOpOfCollapseShapeOpFolder<OpTy>::matchAndRewrite(
                              storeOp.getIndices().end());
   // For affine ops, we need to apply the map to get the operands to get the
   // "actual" indices.
-  if (auto affineStoreOp = dyn_cast<AffineStoreOp>(storeOp.getOperation())) {
+  if (auto affineStoreOp =
+          dyn_cast<affine::AffineStoreOp>(storeOp.getOperation())) {
     AffineMap affineMap = affineStoreOp.getAffineMap();
     auto expandedIndices = calculateExpandedAccessIndices(
         affineMap, indices, storeOp.getLoc(), rewriter);
@@ -587,7 +593,7 @@ LogicalResult StoreOpOfCollapseShapeOpFolder<OpTy>::matchAndRewrite(
           storeOp.getLoc(), rewriter, collapseShapeOp, indices, sourceIndices)))
     return failure();
   llvm::TypeSwitch<Operation *, void>(storeOp)
-      .Case<AffineStoreOp, memref::StoreOp>([&](auto op) {
+      .Case<affine::AffineStoreOp, memref::StoreOp>([&](auto op) {
         rewriter.replaceOpWithNewOp<decltype(op)>(
             storeOp, storeOp.getValue(), collapseShapeOp.getViewSource(),
             sourceIndices);
@@ -617,7 +623,7 @@ LogicalResult NvgpuAsyncCopyOpSubViewOpFolder::matchAndRewrite(
 
   if (srcSubViewOp) {
     LLVM_DEBUG(DBGS() << "srcSubViewOp : " << srcSubViewOp << "\n");
-    resolveIndicesIntoOpWithOffsetsAndStrides(
+    affine::resolveIndicesIntoOpWithOffsetsAndStrides(
         rewriter, copyOp.getLoc(), srcSubViewOp.getMixedOffsets(),
         srcSubViewOp.getMixedStrides(), srcSubViewOp.getDroppedDims(),
         srcindices, foldedSrcIndices);
@@ -630,7 +636,7 @@ LogicalResult NvgpuAsyncCopyOpSubViewOpFolder::matchAndRewrite(
 
   if (dstSubViewOp) {
     LLVM_DEBUG(DBGS() << "dstSubViewOp : " << dstSubViewOp << "\n");
-    resolveIndicesIntoOpWithOffsetsAndStrides(
+    affine::resolveIndicesIntoOpWithOffsetsAndStrides(
         rewriter, copyOp.getLoc(), dstSubViewOp.getMixedOffsets(),
         dstSubViewOp.getMixedStrides(), dstSubViewOp.getDroppedDims(),
         dstindices, foldedDstIndices);
@@ -650,21 +656,21 @@ LogicalResult NvgpuAsyncCopyOpSubViewOpFolder::matchAndRewrite(
 }
 
 void memref::populateFoldMemRefAliasOpPatterns(RewritePatternSet &patterns) {
-  patterns.add<LoadOpOfSubViewOpFolder<AffineLoadOp>,
+  patterns.add<LoadOpOfSubViewOpFolder<affine::AffineLoadOp>,
                LoadOpOfSubViewOpFolder<memref::LoadOp>,
                LoadOpOfSubViewOpFolder<vector::TransferReadOp>,
                LoadOpOfSubViewOpFolder<gpu::SubgroupMmaLoadMatrixOp>,
-               StoreOpOfSubViewOpFolder<AffineStoreOp>,
+               StoreOpOfSubViewOpFolder<affine::AffineStoreOp>,
                StoreOpOfSubViewOpFolder<memref::StoreOp>,
                StoreOpOfSubViewOpFolder<vector::TransferWriteOp>,
                StoreOpOfSubViewOpFolder<gpu::SubgroupMmaStoreMatrixOp>,
-               LoadOpOfExpandShapeOpFolder<AffineLoadOp>,
+               LoadOpOfExpandShapeOpFolder<affine::AffineLoadOp>,
                LoadOpOfExpandShapeOpFolder<memref::LoadOp>,
-               StoreOpOfExpandShapeOpFolder<AffineStoreOp>,
+               StoreOpOfExpandShapeOpFolder<affine::AffineStoreOp>,
                StoreOpOfExpandShapeOpFolder<memref::StoreOp>,
-               LoadOpOfCollapseShapeOpFolder<AffineLoadOp>,
+               LoadOpOfCollapseShapeOpFolder<affine::AffineLoadOp>,
                LoadOpOfCollapseShapeOpFolder<memref::LoadOp>,
-               StoreOpOfCollapseShapeOpFolder<AffineStoreOp>,
+               StoreOpOfCollapseShapeOpFolder<affine::AffineStoreOp>,
                StoreOpOfCollapseShapeOpFolder<memref::StoreOp>,
                SubViewOfSubViewFolder, NvgpuAsyncCopyOpSubViewOpFolder>(
       patterns.getContext());

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
index 64874487c3d3a..ee1adcce80e5f 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
@@ -190,7 +190,7 @@ mlir::memref::multiBuffer(RewriterBase &rewriter, memref::AllocOp allocOp,
   Value stepVal = getValueOrCreateConstantIndexOp(rewriter, loc, *singleStep);
   AffineExpr iv, lb, step;
   bindDims(rewriter.getContext(), iv, lb, step);
-  Value bufferIndex = makeComposedAffineApply(
+  Value bufferIndex = affine::makeComposedAffineApply(
       rewriter, loc, ((iv - lb).floorDiv(step)) % multiBufferingFactor,
       {ivVal, lbVal, stepVal});
   LLVM_DEBUG(DBGS() << "--multi-buffered indexing: " << bufferIndex << "\n");

diff  --git a/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp b/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
index ad30ee0898cb0..c252433d16fa1 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
@@ -29,6 +29,7 @@ namespace memref {
 #define DEBUG_TYPE "normalize-memrefs"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index f1b45e1f09ace..0c7c04ddc63c2 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -20,6 +20,7 @@
 #include "mlir/Dialect/Vector/IR/VectorOps.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 //===----------------------------------------------------------------------===//
 // GetParentForOp
@@ -298,7 +299,7 @@ class SCFTransformDialectExtension
   using Base::Base;
 
   void init() {
-    declareGeneratedDialect<AffineDialect>();
+    declareGeneratedDialect<affine::AffineDialect>();
     declareGeneratedDialect<func::FuncDialect>();
 
     registerTransformOps<

diff  --git a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
index 8cbca1b6da914..2450a0e5fb347 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
@@ -179,8 +179,8 @@ void mlir::scf::populateSCFForLoopCanonicalizationPatterns(
     RewritePatternSet &patterns) {
   MLIRContext *ctx = patterns.getContext();
   patterns
-      .add<AffineOpSCFCanonicalizationPattern<AffineMinOp>,
-           AffineOpSCFCanonicalizationPattern<AffineMaxOp>,
+      .add<AffineOpSCFCanonicalizationPattern<affine::AffineMinOp>,
+           AffineOpSCFCanonicalizationPattern<affine::AffineMaxOp>,
            DimOfIterArgFolder<tensor::DimOp>, DimOfIterArgFolder<memref::DimOp>,
            DimOfLoopResultFolder<tensor::DimOp>,
            DimOfLoopResultFolder<memref::DimOp>>(ctx);

diff  --git a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
index 4816f5bb2c625..f208e5245977d 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
@@ -34,6 +34,7 @@ namespace mlir {
 } // namespace mlir
 
 using namespace mlir;
+using namespace mlir::affine;
 using scf::ForOp;
 using scf::ParallelOp;
 

diff  --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
index 785b068687579..1e04261e2eb36 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp
@@ -130,8 +130,8 @@ mlir::scf::tileParallelLoop(ParallelOp op, ArrayRef<int64_t> tileSizes,
     // Otherwise, we dynamically compute the bound for
     // each iteration of the outer loop.
     newBounds.push_back(
-        b.create<AffineMinOp>(op.getLoc(), b.getIndexType(), minMap,
-                              ValueRange{newStep, upperBound, iv}));
+        b.create<affine::AffineMinOp>(op.getLoc(), b.getIndexType(), minMap,
+                                      ValueRange{newStep, upperBound, iv}));
   }
   auto innerLoop = b.create<ParallelOp>(
       op.getLoc(), SmallVector<Value, 2>(newBounds.size(), zero), newBounds,

diff  --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 544a863c8ffe4..131e8216ef5d2 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -102,7 +102,7 @@ static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
   bindSymbols(b.getContext(), s0, s1);
   AffineMap minMap = AffineMap::get(1, 2, {s0, s1 - d0}, b.getContext());
   Value size = getValueOrCreateConstantIndexOp(b, loc, loopRange.size);
-  return makeComposedFoldedAffineMin(
+  return affine::makeComposedFoldedAffineMin(
       b, loc, minMap, SmallVector<OpFoldResult>{iv, tileSize, size});
 }
 

diff  --git a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
index 1c458eee44d1a..11df319fffc32 100644
--- a/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
@@ -27,6 +27,7 @@
 #define DEBUG_TYPE "mlir-scf-affine-utils"
 
 using namespace mlir;
+using namespace affine;
 using namespace presburger;
 
 LogicalResult scf::matchForLikeLoop(Value iv, OpFoldResult &lb,
@@ -68,7 +69,7 @@ canonicalizeMinMaxOp(RewriterBase &rewriter, Operation *op,
   RewriterBase::InsertionGuard guard(rewriter);
   rewriter.setInsertionPoint(op);
   FailureOr<AffineValueMap> simplified =
-      mlir::simplifyConstrainedMinMaxOp(op, std::move(constraints));
+      affine::simplifyConstrainedMinMaxOp(op, std::move(constraints));
   if (failed(simplified))
     return failure();
   return rewriter.replaceOpWithNewOp<AffineApplyOp>(

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
index e2c6f77958f68..defef6608edb7 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp
@@ -91,8 +91,8 @@ static Value genVectorMask(PatternRewriter &rewriter, Location loc, VL vl,
       {rewriter.getAffineSymbolExpr(0),
        rewriter.getAffineDimExpr(0) - rewriter.getAffineDimExpr(1)},
       rewriter.getContext());
-  Value end =
-      rewriter.createOrFold<AffineMinOp>(loc, min, ValueRange{hi, iv, step});
+  Value end = rewriter.createOrFold<affine::AffineMinOp>(
+      loc, min, ValueRange{hi, iv, step});
   return rewriter.create<vector::CreateMaskOp>(loc, mtp, end);
 }
 

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
index ea087c1357aec..f30423de62b40 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp
@@ -55,9 +55,9 @@ static OpFoldResult getCollapsedOutputDimFromInputShape(
     AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos);
     expr = (expr ? expr * currExpr : currExpr);
   }
-  return applyMapToValues(builder, loc,
-                          AffineMap::get(0, endPos - startPos + 1, expr),
-                          dynamicDims)[0];
+  return affine::applyMapToValues(
+      builder, loc, AffineMap::get(0, endPos - startPos + 1, expr),
+      dynamicDims)[0];
 }
 
 /// Given the `src` of a collapsing reshape op and its reassociation maps,
@@ -103,7 +103,7 @@ static OpFoldResult getExpandedOutputDimFromInputShape(
     linearizedStaticDim *= d.value();
   }
   Value sourceDim = builder.create<tensor::DimOp>(loc, src, sourceDimPos);
-  return applyMapToValues(
+  return affine::applyMapToValues(
       builder, loc,
       AffineMap::get(
           0, 1, builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)),
@@ -190,7 +190,7 @@ struct ReifyPadOp
       };
       addOpFoldResult(lowPad[dim]);
       addOpFoldResult(highPad[dim]);
-      shapes.push_back(applyMapToValues(
+      shapes.push_back(affine::applyMapToValues(
           b, loc, AffineMap::get(1, numSymbols, expr), mapOperands)[0]);
     }
     reifiedReturnShapes.emplace_back(std::move(shapes));

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index f15aea61b82fc..0eca1843ea19f 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1528,8 +1528,8 @@ struct FoldDimOfExpandShape : public OpRewritePattern<DimOp> {
         rewriter.create<DimOp>(dimOp.getLoc(), expandShapeOp.getSrc(), srcDim);
     AffineExpr expr;
     bindSymbols(dimOp.getContext(), expr);
-    rewriter.replaceOpWithNewOp<AffineApplyOp>(dimOp, expr.floorDiv(product),
-                                               srcDimSz);
+    rewriter.replaceOpWithNewOp<affine::AffineApplyOp>(
+        dimOp, expr.floorDiv(product), srcDimSz);
     return success();
   }
 };
@@ -1567,7 +1567,8 @@ struct FoldDimOfCollapseShape : public OpRewritePattern<DimOp> {
       syms.push_back(rewriter.getAffineSymbolExpr(it.index()));
       product = product ? product * syms.back() : syms.back();
     }
-    rewriter.replaceOpWithNewOp<AffineApplyOp>(dimOp, product, srcDimSizes);
+    rewriter.replaceOpWithNewOp<affine::AffineApplyOp>(dimOp, product,
+                                                       srcDimSizes);
     return success();
   }
 };
@@ -3565,7 +3566,7 @@ SmallVector<OpFoldResult> PackOp::getResultShape(
   bindSymbols(builder.getContext(), s0, s1);
   AffineExpr ceilDivExpr = s0.ceilDiv(s1);
   for (auto tiledDim : llvm::enumerate(innerDimsPos)) {
-    resultDims[tiledDim.value()] = makeComposedFoldedAffineApply(
+    resultDims[tiledDim.value()] = affine::makeComposedFoldedAffineApply(
         builder, loc, ceilDivExpr,
         {resultDims[tiledDim.value()], innerTileSizes[tiledDim.index()]});
   }
@@ -3610,7 +3611,8 @@ Value PackOp::createDestinationTensor(OpBuilder &b, Location loc, Value source,
   AffineExpr dim0, dim1;
   bindDims(b.getContext(), dim0, dim1);
   auto ceilDiv = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
-    return makeComposedFoldedAffineApply(b, loc, dim0.ceilDiv(dim1), {v1, v2});
+    return affine::makeComposedFoldedAffineApply(b, loc, dim0.ceilDiv(dim1),
+                                                 {v1, v2});
   };
 
   SmallVector<OpFoldResult> mixedSizes;
@@ -3816,7 +3818,7 @@ Value UnPackOp::createDestinationTensor(OpBuilder &b, Location loc,
   AffineExpr sym0, sym1;
   bindSymbols(b.getContext(), sym0, sym1);
   auto dimMul = [&](OpFoldResult v1, OpFoldResult v2) -> OpFoldResult {
-    return makeComposedFoldedAffineApply(b, loc, sym0 * sym1, {v1, v2});
+    return affine::makeComposedFoldedAffineApply(b, loc, sym0 * sym1, {v1, v2});
   };
 
   SmallVector<OpFoldResult> mixedSizes;

diff  --git a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
index 63f7a5af6f5e4..0ba705293ce5b 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
@@ -139,8 +139,8 @@ struct PackOpTiling
         tensor::createDimValues(b, loc, packOp.getSource());
     SmallVector<OpFoldResult> inputIndices, inputSizes;
     for (auto dim : llvm::seq<int64_t>(0, inputRank)) {
-      using AV = AffineValueExpr;
-      AffineBuilder ab(b, loc);
+      using AV = affine::AffineValueExpr;
+      affine::AffineBuilder ab(b, loc);
       AffineExpr dim0, dim1, sym;
       bindDims(b.getContext(), dim0, dim1);
       bindSymbols(b.getContext(), sym);
@@ -255,8 +255,8 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp,
   }
 
   Location loc = unpackOp.getLoc();
-  using AV = AffineValueExpr;
-  AffineBuilder ab(b, loc);
+  using AV = affine::AffineValueExpr;
+  affine::AffineBuilder ab(b, loc);
   AffineExpr dim0, dim1, sym0;
   bindDims(b.getContext(), dim0, dim1);
   bindSymbols(b.getContext(), sym0);
@@ -303,12 +303,12 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp,
     return info;
   }
 
-  DivModValue firstCoord =
-      getDivMod(b, loc, getValueOrCreateConstantIndexOp(b, loc, tileOffset),
-                getValueOrCreateConstantIndexOp(b, loc, innerTileSize));
+  affine::DivModValue firstCoord = affine::getDivMod(
+      b, loc, getValueOrCreateConstantIndexOp(b, loc, tileOffset),
+      getValueOrCreateConstantIndexOp(b, loc, innerTileSize));
   OpFoldResult tileExclusiveBound =
       ab.add(AV(dim0).bind(tileOffset), AV(dim1).bind(tileSize));
-  DivModValue lastCoord = getDivMod(
+  affine::DivModValue lastCoord = affine::getDivMod(
       b, loc,
       getValueOrCreateConstantIndexOp(
           b, loc,
@@ -468,21 +468,21 @@ FailureOr<TilingResult> tensor::bubbleUpPadSlice(OpBuilder &b,
   // Add two integers.
   auto addMap = AffineMap::get(2, 0, {dim0 + dim1});
   auto add = [&](OpFoldResult v1, OpFoldResult v2) {
-    return makeComposedFoldedAffineApply(b, loc, addMap, {v1, v2});
+    return affine::makeComposedFoldedAffineApply(b, loc, addMap, {v1, v2});
   };
   // Subtract two integers.
   auto subMap = AffineMap::get(2, 0, {dim0 - dim1});
   auto sub = [&](OpFoldResult v1, OpFoldResult v2) {
-    return makeComposedFoldedAffineApply(b, loc, subMap, {v1, v2});
+    return affine::makeComposedFoldedAffineApply(b, loc, subMap, {v1, v2});
   };
   // Take the minimum of two integers.
   auto idMap = AffineMap::getMultiDimIdentityMap(2, b.getContext());
   auto min = [&](OpFoldResult v1, OpFoldResult v2) {
-    return makeComposedFoldedAffineMin(b, loc, idMap, {v1, v2});
+    return affine::makeComposedFoldedAffineMin(b, loc, idMap, {v1, v2});
   };
   // Take the maximum of two integers.
   auto max = [&](OpFoldResult v1, OpFoldResult v2) {
-    return makeComposedFoldedAffineMax(b, loc, idMap, {v1, v2});
+    return affine::makeComposedFoldedAffineMax(b, loc, idMap, {v1, v2});
   };
   // Zero index-typed integer.
   OpFoldResult zero = b.getIndexAttr(0);

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index cbf28073362f6..57e5df4633438 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -893,7 +893,7 @@ struct PadOpInterface
       AffineExpr s0, s1, s2;
       bindSymbols(op->getContext(), s0, s1, s2);
       AffineExpr sumExpr = s0 + s1 + s2;
-      Value sum = rewriter.create<AffineApplyOp>(
+      Value sum = rewriter.create<affine::AffineApplyOp>(
           loc, sumExpr, ValueRange{srcDim, lowPad, highPad});
       dynamicSizes.push_back(sum);
     }

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
index f1ad357098c55..b5e75e081886a 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ExtractSliceFromReshapeUtils.cpp
@@ -23,6 +23,7 @@
 #include "llvm/ADT/STLExtras.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 using namespace mlir::tensor;
 
 /// Get the dimension size of a value of RankedTensor type at the
@@ -61,7 +62,7 @@ static DimAndIndex invertSliceIndexing(OpBuilder &b, Location loc,
   assert(dim < sliceParams.size() && "slice should be non rank-reducing");
   return std::make_pair(
       dim,
-      makeComposedAffineApply(
+      affine::makeComposedAffineApply(
           b, loc, s0 + d0 * s1,
           {indexValue,
            getValueOrCreateConstantIndexOp(b, loc, sliceParams[dim].offset),

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
index 46bff2bb55cd5..882b8db6b6c57 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp
@@ -100,7 +100,7 @@ LogicalResult TransferReadOfExtractSliceOpFolder::matchAndRewrite(
   SmallVector<Value> indices(readOp.getIndices().begin(),
                              readOp.getIndices().end());
   SmallVector<Value> sourceIndices;
-  resolveIndicesIntoOpWithOffsetsAndStrides(
+  affine::resolveIndicesIntoOpWithOffsetsAndStrides(
       rewriter, readOp.getLoc(), extractSliceOp.getMixedOffsets(),
       extractSliceOp.getMixedStrides(), extractSliceOp.getDroppedDims(),
       indices, sourceIndices);
@@ -132,7 +132,7 @@ LogicalResult InsertSliceOfTransferWriteOpFolder::matchAndRewrite(
   SmallVector<Value> indices(writeOp.getIndices().begin(),
                              writeOp.getIndices().end());
   SmallVector<Value> sourceIndices;
-  resolveIndicesIntoOpWithOffsetsAndStrides(
+  affine::resolveIndicesIntoOpWithOffsetsAndStrides(
       rewriter, writeOp.getLoc(), insertSliceOp.getMixedOffsets(),
       insertSliceOp.getMixedStrides(), insertSliceOp.getDroppedDims(), indices,
       sourceIndices);
@@ -187,9 +187,9 @@ struct InsertSliceOfInsertSliceFolder : public OpRewritePattern<OpTy> {
     // Note: the "insertSlice" case is symmetrical to the extract/subview case:
     // `insertSliceOp` is passed as the "source" and `sourceInsertSliceOp` is
     // passed as the destination to the helper function.
-    resolveSizesIntoOpWithSizes(insertSliceOp.getMixedSizes(),
-                                sourceInsertSliceOp.getMixedSizes(),
-                                droppedDims, resolvedSizes);
+    affine::resolveSizesIntoOpWithSizes(insertSliceOp.getMixedSizes(),
+                                        sourceInsertSliceOp.getMixedSizes(),
+                                        droppedDims, resolvedSizes);
 
     // If we are inside an InParallel region, temporarily set the insertion
     // point outside: only tensor.parallel_insert_slice ops are allowed in
@@ -204,7 +204,7 @@ struct InsertSliceOfInsertSliceFolder : public OpRewritePattern<OpTy> {
     // Note: the "insertSlice" case is symmetrical to the extract/subview case:
     // `insertSliceOp` is passed as the "source" and `sourceInsertSliceOp` is
     // passed as the destination to the helper function.
-    resolveIndicesIntoOpWithOffsetsAndStrides(
+    affine::resolveIndicesIntoOpWithOffsetsAndStrides(
         rewriter, insertSliceOp.getLoc(), insertSliceOp.getMixedOffsets(),
         insertSliceOp.getMixedStrides(), droppedDims,
         sourceInsertSliceOp.getMixedOffsets(), resolvedOffsets);

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
index 895d1b1f02f07..9b8853d123ea8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
@@ -29,9 +29,9 @@ struct MergeConsecutiveExtractSlice : public OpRewritePattern<ExtractSliceOp> {
       return failure();
 
     SmallVector<OpFoldResult> newOffsets, newSizes, newStrides;
-    if (failed(mergeOffsetsSizesAndStrides(rewriter, nextOp.getLoc(), prevOp,
-                                           nextOp, prevOp.getDroppedDims(),
-                                           newOffsets, newSizes, newStrides)))
+    if (failed(affine::mergeOffsetsSizesAndStrides(
+            rewriter, nextOp.getLoc(), prevOp, nextOp, prevOp.getDroppedDims(),
+            newOffsets, newSizes, newStrides)))
       return failure();
 
     rewriter.replaceOpWithNewOp<ExtractSliceOp>(nextOp, nextOp.getType(),

diff  --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
index 81d39a7609ea4..71dddd1363794 100644
--- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
@@ -35,7 +35,8 @@ PadOp mlir::tensor::createPadHighOp(RankedTensorType type, Value source,
     bindDims(b.getContext(), d0);
     auto dimOp = b.createOrFold<tensor::DimOp>(loc, source, en.index());
     high[en.index()] =
-        makeComposedAffineApply(b, loc, en.value() - d0, {dimOp}).getResult();
+        affine::makeComposedAffineApply(b, loc, en.value() - d0, {dimOp})
+            .getResult();
   }
   return b.create<PadOp>(loc, type, source, low, high, pad, nofold);
 }

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
index 0b36ffedbbdb0..6592930b5f66f 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
@@ -72,8 +72,8 @@ struct DistributedLoadStoreHelper {
   Value buildDistributedOffset(RewriterBase &b, Location loc, int64_t index) {
     int64_t distributedSize = distributedVectorType.getDimSize(index);
     AffineExpr tid = getAffineSymbolExpr(0, b.getContext());
-    return b.createOrFold<AffineApplyOp>(loc, tid * distributedSize,
-                                         ArrayRef<Value>{laneId});
+    return b.createOrFold<affine::AffineApplyOp>(loc, tid * distributedSize,
+                                                 ArrayRef<Value>{laneId});
   }
 
   /// Create a store during the process of distributing the
@@ -513,9 +513,9 @@ struct WarpOpTransferWrite : public OpRewritePattern<vector::TransferWriteOp> {
       unsigned vectorPos = std::get<1>(it).cast<AffineDimExpr>().getPosition();
       auto scale =
           rewriter.getAffineConstantExpr(targetType.getDimSize(vectorPos));
-      indices[indexPos] =
-          makeComposedAffineApply(rewriter, loc, d0 + scale * d1,
-                                  {indices[indexPos], newWarpOp.getLaneid()});
+      indices[indexPos] = affine::makeComposedAffineApply(
+          rewriter, loc, d0 + scale * d1,
+          {indices[indexPos], newWarpOp.getLaneid()});
     }
     newWriteOp.getIndicesMutable().assign(indices);
 
@@ -753,9 +753,9 @@ struct WarpOpTransferRead : public OpRewritePattern<WarpExecuteOnLane0Op> {
       unsigned vectorPos = std::get<1>(it).cast<AffineDimExpr>().getPosition();
       int64_t scale =
           distributedVal.getType().cast<VectorType>().getDimSize(vectorPos);
-      indices[indexPos] =
-          makeComposedAffineApply(rewriter, read.getLoc(), d0 + scale * d1,
-                                  {indices[indexPos], warpOp.getLaneid()});
+      indices[indexPos] = affine::makeComposedAffineApply(
+          rewriter, read.getLoc(), d0 + scale * d1,
+          {indices[indexPos], warpOp.getLaneid()});
     }
     Value newRead = rewriter.create<vector::TransferReadOp>(
         read.getLoc(), distributedVal.getType(), read.getSource(), indices,
@@ -1046,15 +1046,15 @@ struct WarpOpExtractElement : public OpRewritePattern<WarpExecuteOnLane0Op> {
     int64_t elementsPerLane = distributedVecType.getShape()[0];
     AffineExpr sym0 = getAffineSymbolExpr(0, rewriter.getContext());
     // tid of extracting thread: pos / elementsPerLane
-    Value broadcastFromTid = rewriter.create<AffineApplyOp>(
+    Value broadcastFromTid = rewriter.create<affine::AffineApplyOp>(
         loc, sym0.ceilDiv(elementsPerLane), extractOp.getPosition());
     // Extract at position: pos % elementsPerLane
     Value pos =
         elementsPerLane == 1
             ? rewriter.create<arith::ConstantIndexOp>(loc, 0).getResult()
             : rewriter
-                  .create<AffineApplyOp>(loc, sym0 % elementsPerLane,
-                                         extractOp.getPosition())
+                  .create<affine::AffineApplyOp>(loc, sym0 % elementsPerLane,
+                                                 extractOp.getPosition())
                   .getResult();
     Value extracted =
         rewriter.create<vector::ExtractElementOp>(loc, distributedVec, pos);
@@ -1119,14 +1119,15 @@ struct WarpOpInsertElement : public OpRewritePattern<WarpExecuteOnLane0Op> {
     int64_t elementsPerLane = distrType.getShape()[0];
     AffineExpr sym0 = getAffineSymbolExpr(0, rewriter.getContext());
     // tid of extracting thread: pos / elementsPerLane
-    Value insertingLane = rewriter.create<AffineApplyOp>(
+    Value insertingLane = rewriter.create<affine::AffineApplyOp>(
         loc, sym0.ceilDiv(elementsPerLane), newPos);
     // Insert position: pos % elementsPerLane
     Value pos =
         elementsPerLane == 1
             ? rewriter.create<arith::ConstantIndexOp>(loc, 0).getResult()
             : rewriter
-                  .create<AffineApplyOp>(loc, sym0 % elementsPerLane, newPos)
+                  .create<affine::AffineApplyOp>(loc, sym0 % elementsPerLane,
+                                                 newPos)
                   .getResult();
     Value isInsertingLane = rewriter.create<arith::CmpIOp>(
         loc, arith::CmpIPredicate::eq, newWarpOp.getLaneid(), insertingLane);

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
index cbfea2fe62203..3a06d9bdea1f0 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
@@ -599,7 +599,7 @@ class RewriteScalarExtractElementOfTransferRead
     if (extractOp.getPosition()) {
       AffineExpr sym0, sym1;
       bindSymbols(extractOp.getContext(), sym0, sym1);
-      OpFoldResult ofr = makeComposedFoldedAffineApply(
+      OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
           rewriter, extractOp.getLoc(), sym0 + sym1,
           {newIndices[newIndices.size() - 1], extractOp.getPosition()});
       if (ofr.is<Value>()) {
@@ -663,7 +663,7 @@ class RewriteScalarExtractOfTransferRead
       int64_t offset = it.value().cast<IntegerAttr>().getInt();
       int64_t idx =
           newIndices.size() - extractOp.getPosition().size() + it.index();
-      OpFoldResult ofr = makeComposedFoldedAffineApply(
+      OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
           rewriter, extractOp.getLoc(),
           rewriter.getAffineSymbolExpr(0) + offset, {newIndices[idx]});
       if (ofr.is<Value>()) {

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
index caf5822256bc6..34a7ce16ce983 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp
@@ -41,7 +41,7 @@ using namespace mlir::vector;
 static std::optional<int64_t> extractConstantIndex(Value v) {
   if (auto cstOp = v.getDefiningOp<arith::ConstantIndexOp>())
     return cstOp.value();
-  if (auto affineApplyOp = v.getDefiningOp<AffineApplyOp>())
+  if (auto affineApplyOp = v.getDefiningOp<affine::AffineApplyOp>())
     if (affineApplyOp.getAffineMap().isSingleConstant())
       return affineApplyOp.getAffineMap().getSingleConstantResult();
   return std::nullopt;
@@ -76,8 +76,8 @@ static Value createInBoundsCond(RewriterBase &b,
     int64_t vectorSize = xferOp.getVectorType().getDimSize(resultIdx);
     auto d0 = getAffineDimExpr(0, xferOp.getContext());
     auto vs = getAffineConstantExpr(vectorSize, xferOp.getContext());
-    Value sum =
-        makeComposedAffineApply(b, loc, d0 + vs, xferOp.indices()[indicesIdx]);
+    Value sum = affine::makeComposedAffineApply(b, loc, d0 + vs,
+                                                xferOp.indices()[indicesIdx]);
     Value cond = createFoldedSLE(
         b, sum, vector::createOrFoldDimOp(b, loc, xferOp.source(), indicesIdx));
     if (!cond)
@@ -208,7 +208,7 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp,
     SmallVector<AffineMap, 4> maps =
         AffineMap::inferFromExprList(MapList{{i - j, k}});
     // affine_min(%dimMemRef - %index, %dimAlloc)
-    Value affineMin = b.create<AffineMinOp>(
+    Value affineMin = b.create<affine::AffineMinOp>(
         loc, index.getType(), maps[0], ValueRange{dimMemRef, index, dimAlloc});
     sizes.push_back(affineMin);
   });
@@ -449,7 +449,7 @@ static Operation *getAutomaticAllocationScope(Operation *op) {
        parent = parent->getParentOp()) {
     if (parent->hasTrait<OpTrait::AutomaticAllocationScope>())
       scope = parent;
-    if (!isa<scf::ForOp, AffineForOp>(parent))
+    if (!isa<scf::ForOp, affine::AffineForOp>(parent))
       break;
   }
   assert(scope && "Expected op to be inside automatic allocation scope");

diff  --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
index 98577e36faa91..086ae5c5030b3 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
@@ -118,7 +118,8 @@ static SmallVector<Value> sliceTransferIndices(ArrayRef<int64_t> elementOffsets,
     auto expr = getAffineDimExpr(0, builder.getContext()) +
                 getAffineConstantExpr(elementOffsets[dim.index()], ctx);
     auto map = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0, expr);
-    slicedIndices[pos] = builder.create<AffineApplyOp>(loc, map, indices[pos]);
+    slicedIndices[pos] =
+        builder.create<affine::AffineApplyOp>(loc, map, indices[pos]);
   }
   return slicedIndices;
 }

diff  --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 8e3cd03f956cd..15895470eca4c 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -77,8 +77,8 @@ static AffineMap makePermutationMap(
 
   for (auto kvp : enclosingLoopToVectorDim) {
     assert(kvp.second < perm.size());
-    auto invariants = getInvariantAccesses(
-        cast<AffineForOp>(kvp.first).getInductionVar(), indices);
+    auto invariants = affine::getInvariantAccesses(
+        cast<affine::AffineForOp>(kvp.first).getInductionVar(), indices);
     unsigned numIndices = indices.size();
     unsigned countInvariantIndices = 0;
     for (unsigned dim = 0; dim < numIndices; ++dim) {
@@ -119,7 +119,7 @@ static SetVector<Operation *> getParentsOfType(Block *block) {
 
 /// Returns the enclosing AffineForOp, from closest to farthest.
 static SetVector<Operation *> getEnclosingforOps(Block *block) {
-  return getParentsOfType<AffineForOp>(block);
+  return getParentsOfType<affine::AffineForOp>(block);
 }
 
 AffineMap mlir::makePermutationMap(

diff  --git a/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp b/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp
index 73ea6dddf2b29..a036ae836c3cb 100644
--- a/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp
+++ b/mlir/test/lib/Analysis/TestMemRefBoundCheck.cpp
@@ -23,6 +23,7 @@
 #define DEBUG_TYPE "memref-bound-check"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
index 7ddcfdde06c0e..b3b9a590773b0 100644
--- a/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
+++ b/mlir/test/lib/Analysis/TestMemRefDependenceCheck.cpp
@@ -21,6 +21,7 @@
 #define DEBUG_TYPE "test-memref-dependence-check"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp b/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
index c602095bbfeed..b418a457473a8 100644
--- a/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
@@ -23,6 +23,7 @@
 #define PASS_NAME "test-affine-data-copy"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp b/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
index 5629a8dfad3bf..f8e76356c4321 100644
--- a/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp
@@ -17,6 +17,7 @@
 #include "mlir/Dialect/Func/IR/FuncOps.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 #define DEBUG_TYPE "test-affine-parametric-tile"
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp b/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp
index 83b66f57a6e4f..7e4a3ca7b7c72 100644
--- a/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestAffineLoopUnswitching.cpp
@@ -19,6 +19,7 @@
 #define PASS_NAME "test-affine-loop-unswitch"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp b/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp
index bdb494d83a474..bc05ddd37b22e 100644
--- a/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestDecomposeAffineOps.cpp
@@ -21,6 +21,7 @@
 #define PASS_NAME "test-decompose-affine-ops"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp b/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
index 7eb345408e78c..f4f1593dc53e2 100644
--- a/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
@@ -20,6 +20,7 @@
 #define DEBUG_TYPE "test-loop-fusion"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options");
 
@@ -60,10 +61,10 @@ struct TestLoopFusion
 static bool testDependenceCheck(AffineForOp srcForOp, AffineForOp dstForOp,
                                 unsigned i, unsigned j, unsigned loopDepth,
                                 unsigned maxLoopDepth) {
-  mlir::ComputationSliceState sliceUnion;
+  affine::ComputationSliceState sliceUnion;
   for (unsigned d = loopDepth + 1; d <= maxLoopDepth; ++d) {
     FusionResult result =
-        mlir::canFuseLoops(srcForOp, dstForOp, d, &sliceUnion);
+        affine::canFuseLoops(srcForOp, dstForOp, d, &sliceUnion);
     if (result.value == FusionResult::FailBlockDependence) {
       srcForOp->emitRemark("block-level dependence preventing"
                            " fusion of loop nest ")
@@ -85,7 +86,8 @@ static unsigned getBlockIndex(Operation &op) {
 }
 
 // Returns a string representation of 'sliceUnion'.
-static std::string getSliceStr(const mlir::ComputationSliceState &sliceUnion) {
+static std::string
+getSliceStr(const affine::ComputationSliceState &sliceUnion) {
   std::string result;
   llvm::raw_string_ostream os(result);
   // Slice insertion point format [loop-depth, operation-block-index]
@@ -114,8 +116,8 @@ static bool testSliceComputation(AffineForOp forOpA, AffineForOp forOpB,
                                  unsigned i, unsigned j, unsigned loopDepth,
                                  unsigned maxLoopDepth) {
   for (unsigned d = loopDepth + 1; d <= maxLoopDepth; ++d) {
-    mlir::ComputationSliceState sliceUnion;
-    FusionResult result = mlir::canFuseLoops(forOpA, forOpB, d, &sliceUnion);
+    affine::ComputationSliceState sliceUnion;
+    FusionResult result = affine::canFuseLoops(forOpA, forOpB, d, &sliceUnion);
     if (result.value == FusionResult::Success) {
       forOpB->emitRemark("slice (")
           << " src loop: " << i << ", dst loop: " << j << ", depth: " << d
@@ -137,10 +139,10 @@ static bool testLoopFusionTransformation(AffineForOp forOpA, AffineForOp forOpB,
                                          unsigned loopDepth,
                                          unsigned maxLoopDepth) {
   for (unsigned d = loopDepth + 1; d <= maxLoopDepth; ++d) {
-    mlir::ComputationSliceState sliceUnion;
-    FusionResult result = mlir::canFuseLoops(forOpA, forOpB, d, &sliceUnion);
+    affine::ComputationSliceState sliceUnion;
+    FusionResult result = affine::canFuseLoops(forOpA, forOpB, d, &sliceUnion);
     if (result.value == FusionResult::Success) {
-      mlir::fuseLoops(forOpA, forOpB, sliceUnion);
+      affine::fuseLoops(forOpA, forOpB, sliceUnion);
       // Note: 'forOpA' is removed to simplify test output. A proper loop
       // fusion pass should check the data dependence graph and run memref
       // region analysis to ensure removing 'forOpA' is safe.

diff  --git a/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp b/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp
index 5b19c5c804a38..3dc7abb15af17 100644
--- a/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestLoopMapping.cpp
@@ -18,6 +18,7 @@
 #include "mlir/Pass/Pass.h"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 struct TestLoopMappingPass
@@ -33,7 +34,7 @@ struct TestLoopMappingPass
   explicit TestLoopMappingPass() = default;
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, scf::SCFDialect>();
+    registry.insert<affine::AffineDialect, scf::SCFDialect>();
   }
 
   void runOnOperation() override {

diff  --git a/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp b/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp
index 9183b3ee3c76f..e708b7de690ec 100644
--- a/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestLoopPermutation.cpp
@@ -18,6 +18,7 @@
 #define PASS_NAME "test-loop-permutation"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 namespace {
 

diff  --git a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
index 0a0b1c05885d4..1bf3ce4ceb329 100644
--- a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
@@ -19,6 +19,7 @@
 #define PASS_NAME "test-affine-reify-value-bounds"
 
 using namespace mlir;
+using namespace mlir::affine;
 using mlir::presburger::BoundType;
 
 namespace {
@@ -36,8 +37,8 @@ struct TestReifyValueBounds
   TestReifyValueBounds(const TestReifyValueBounds &pass) : PassWrapper(pass){};
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry
-        .insert<AffineDialect, tensor::TensorDialect, memref::MemRefDialect>();
+    registry.insert<affine::AffineDialect, tensor::TensorDialect,
+                    memref::MemRefDialect>();
   }
 
   void runOnOperation() override;

diff  --git a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
index 5ff4e55a1e96c..85dd0718c9f04 100644
--- a/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
@@ -33,6 +33,7 @@
 #define DEBUG_TYPE "affine-super-vectorizer-test"
 
 using namespace mlir;
+using namespace mlir::affine;
 
 static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options");
 
@@ -99,7 +100,7 @@ struct VectorizerTestPass
 
 void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) {
   auto f = getOperation();
-  using matcher::Op;
+  using affine::matcher::Op;
   SmallVector<int64_t, 8> shape(clTestVectorShapeRatio.begin(),
                                 clTestVectorShapeRatio.end());
   auto subVectorType =
@@ -109,7 +110,7 @@ void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) {
   auto filter = [&](Operation &op) {
     assert(subVectorType.getElementType().isF32() &&
            "Only f32 supported for now");
-    if (!matcher::operatesOnSuperVectorsOf(op, subVectorType)) {
+    if (!mlir::matcher::operatesOnSuperVectorsOf(op, subVectorType)) {
       return false;
     }
     if (op.getNumResults() != 1) {
@@ -139,7 +140,7 @@ void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) {
 }
 
 static NestedPattern patternTestSlicingOps() {
-  using matcher::Op;
+  using affine::matcher::Op;
   // Match all operations with the kTestSlicingOpName name.
   auto filter = [](Operation &op) {
     // Just use a custom op name for this test, it makes life easier.
@@ -202,7 +203,7 @@ static bool customOpWithAffineMapAttribute(Operation &op) {
 void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) {
   auto f = getOperation();
 
-  using matcher::Op;
+  using affine::matcher::Op;
   auto pattern = Op(customOpWithAffineMapAttribute);
   SmallVector<NestedMatch, 8> matches;
   pattern.match(f, &matches);

diff  --git a/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp b/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp
index b9523bfbda114..d6c35b4674878 100644
--- a/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp
+++ b/mlir/test/lib/Dialect/GPU/TestGpuMemoryPromotion.cpp
@@ -33,7 +33,8 @@ struct TestGpuMemoryPromotionPass
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestGpuMemoryPromotionPass)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, memref::MemRefDialect, scf::SCFDialect>();
+    registry.insert<affine::AffineDialect, memref::MemRefDialect,
+                    scf::SCFDialect>();
   }
   StringRef getArgument() const final { return "test-gpu-memory-promotion"; }
   StringRef getDescription() const final {

diff  --git a/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp b/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp
index 3f4fed0fd47a0..b5998d9c851e4 100644
--- a/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp
@@ -18,8 +18,8 @@ struct TestDataLayoutPropagationPass
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDataLayoutPropagationPass)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry
-        .insert<AffineDialect, linalg::LinalgDialect, tensor::TensorDialect>();
+    registry.insert<affine::AffineDialect, linalg::LinalgDialect,
+                    tensor::TensorDialect>();
   }
 
   StringRef getArgument() const final {

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp
index a6bc828d32e40..311244aeffb90 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgDecomposeOps.cpp
@@ -26,7 +26,7 @@ struct TestLinalgDecomposeOps
   TestLinalgDecomposeOps(const TestLinalgDecomposeOps &pass)
       : PassWrapper(pass){};
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, linalg::LinalgDialect>();
+    registry.insert<affine::AffineDialect, linalg::LinalgDialect>();
   }
   StringRef getArgument() const final { return "test-linalg-decompose-ops"; }
   StringRef getDescription() const final {

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
index dbd054e080de9..167ed80552067 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
@@ -92,8 +92,8 @@ struct TestLinalgElementwiseFusion
   TestLinalgElementwiseFusion(const TestLinalgElementwiseFusion &pass)
       : PassWrapper(pass) {}
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, linalg::LinalgDialect, memref::MemRefDialect,
-                    tensor::TensorDialect>();
+    registry.insert<affine::AffineDialect, linalg::LinalgDialect,
+                    memref::MemRefDialect, tensor::TensorDialect>();
   }
   StringRef getArgument() const final {
     return "test-linalg-elementwise-fusion-patterns";

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
index 87ffd7ab1263a..f00febcfa9435 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
@@ -73,8 +73,8 @@ struct TestLinalgGreedyFusion
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgGreedyFusion)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, linalg::LinalgDialect, memref::MemRefDialect,
-                    scf::SCFDialect>();
+    registry.insert<affine::AffineDialect, linalg::LinalgDialect,
+                    memref::MemRefDialect, scf::SCFDialect>();
   }
   StringRef getArgument() const final { return "test-linalg-greedy-fusion"; }
   StringRef getDescription() const final {

diff  --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
index 61f0ab4e5233f..c1d01acf3ad02 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
@@ -39,7 +39,7 @@ struct TestLinalgTransforms
 
   void getDependentDialects(DialectRegistry &registry) const override {
     // clang-format off
-    registry.insert<AffineDialect,
+    registry.insert<affine::AffineDialect,
                     bufferization::BufferizationDialect,
                     memref::MemRefDialect,
                     scf::SCFDialect,

diff  --git a/mlir/test/lib/Dialect/Linalg/TestPadFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestPadFusion.cpp
index 2989cff10d386..073e0d8d4e143 100644
--- a/mlir/test/lib/Dialect/Linalg/TestPadFusion.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestPadFusion.cpp
@@ -25,8 +25,8 @@ struct TestPadFusionPass
   MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPadFusionPass)
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry
-        .insert<AffineDialect, linalg::LinalgDialect, tensor::TensorDialect>();
+    registry.insert<affine::AffineDialect, linalg::LinalgDialect,
+                    tensor::TensorDialect>();
   }
 
   StringRef getArgument() const final { return "test-linalg-pad-fusion"; }

diff  --git a/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp b/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp
index 917b9d2342108..02a9dbbe263f8 100644
--- a/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp
+++ b/mlir/test/lib/Dialect/MemRef/TestComposeSubView.cpp
@@ -32,7 +32,7 @@ struct TestComposeSubViewPass
 
 void TestComposeSubViewPass::getDependentDialects(
     DialectRegistry &registry) const {
-  registry.insert<AffineDialect>();
+  registry.insert<affine::AffineDialect>();
 }
 
 void TestComposeSubViewPass::runOnOperation() {

diff  --git a/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp b/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp
index 6996ef2dc3179..1ca63bbb2a0e8 100644
--- a/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp
+++ b/mlir/test/lib/Dialect/MemRef/TestMultiBuffer.cpp
@@ -24,7 +24,7 @@ struct TestMultiBufferingPass
   TestMultiBufferingPass(const TestMultiBufferingPass &pass)
       : PassWrapper(pass) {}
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect>();
+    registry.insert<affine::AffineDialect>();
   }
   StringRef getArgument() const final { return "test-multi-buffering"; }
   StringRef getDescription() const final {

diff  --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index c535cd4d14513..a03f1d5e76d68 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -53,7 +53,7 @@ struct TestVectorToVectorLowering
   }
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect>();
+    registry.insert<affine::AffineDialect>();
   }
 
   Option<bool> unroll{*this, "unroll", llvm::cl::desc("Include unrolling"),
@@ -128,8 +128,8 @@ struct TestVectorContractionPrepareForMMTLowering
   TestVectorContractionPrepareForMMTLowering() = default;
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry
-        .insert<AffineDialect, arith::ArithDialect, vector::VectorDialect>();
+    registry.insert<affine::AffineDialect, arith::ArithDialect,
+                    vector::VectorDialect>();
   }
 
   void runOnOperation() override {
@@ -246,7 +246,7 @@ struct TestVectorTransferUnrollingPatterns
       : PassWrapper(pass) {}
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect>();
+    registry.insert<affine::AffineDialect>();
   }
   StringRef getArgument() const final {
     return "test-vector-transfer-unrolling-patterns";
@@ -305,8 +305,8 @@ struct TestScalarVectorTransferLoweringPatterns
   TestScalarVectorTransferLoweringPatterns() = default;
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, memref::MemRefDialect, tensor::TensorDialect,
-                    vector::VectorDialect>();
+    registry.insert<affine::AffineDialect, memref::MemRefDialect,
+                    tensor::TensorDialect, vector::VectorDialect>();
   }
 
   void runOnOperation() override {
@@ -342,7 +342,7 @@ struct TestVectorTransferCollapseInnerMostContiguousDims
       const TestVectorTransferCollapseInnerMostContiguousDims &pass) = default;
 
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<memref::MemRefDialect, AffineDialect>();
+    registry.insert<memref::MemRefDialect, affine::AffineDialect>();
   }
 
   StringRef getArgument() const final {
@@ -488,7 +488,7 @@ struct TestVectorDistribution
 
   void getDependentDialects(DialectRegistry &registry) const override {
     registry.insert<scf::SCFDialect, memref::MemRefDialect, gpu::GPUDialect,
-                    AffineDialect>();
+                    affine::AffineDialect>();
   }
 
   StringRef getArgument() const final { return "test-vector-warp-distribute"; }

diff  --git a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
index c362bcd57722d..dc7286e3bd315 100644
--- a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
+++ b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp
@@ -401,8 +401,9 @@ struct TestTilingInterfacePass
   TestTilingInterfacePass(const TestTilingInterfacePass &pass)
       : PassWrapper(pass) {}
   void getDependentDialects(DialectRegistry &registry) const override {
-    registry.insert<AffineDialect, linalg::LinalgDialect, memref::MemRefDialect,
-                    scf::SCFDialect, tensor::TensorDialect>();
+    registry.insert<affine::AffineDialect, linalg::LinalgDialect,
+                    memref::MemRefDialect, scf::SCFDialect,
+                    tensor::TensorDialect>();
     linalg::registerTilingInterfaceExternalModels(registry);
     tensor::registerTilingInterfaceExternalModels(registry);
   }

diff  --git a/mlir/unittests/Analysis/Presburger/Parser.h b/mlir/unittests/Analysis/Presburger/Parser.h
index e80267c5adae2..c2c63730056e7 100644
--- a/mlir/unittests/Analysis/Presburger/Parser.h
+++ b/mlir/unittests/Analysis/Presburger/Parser.h
@@ -30,7 +30,7 @@ namespace presburger {
 /// represents a valid IntegerSet.
 inline IntegerPolyhedron parseIntegerPolyhedron(StringRef str) {
   MLIRContext context(MLIRContext::Threading::DISABLED);
-  return FlatAffineValueConstraints(parseIntegerSet(str, &context));
+  return affine::FlatAffineValueConstraints(parseIntegerSet(str, &context));
 }
 
 /// Parse a list of StringRefs to IntegerRelation and combine them into a


        


More information about the flang-commits mailing list