[Mlir-commits] [mlir] [MLIR][NFC] Stop depending on func.func in affine LoopUtils via &Region (PR #83325)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu May 30 15:03:55 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-affine
@llvm/pr-subscribers-mlir-scf
Author: Alexey Z. (last5bits)
<details>
<summary>Changes</summary>
Note: this is an experiment illustrating an approach alternative to PR https://github.com/llvm/llvm-project/pull/82079
Instead, pass a reference to the outermost region, which makes it possible to use affine LoopUtils routines in downstream dialects that have their own function-like ops.
---
Patch is 42.34 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/83325.diff
16 Files Affected:
- (modified) mlir/include/mlir/Dialect/Affine/LoopUtils.h (+52-18)
- (modified) mlir/include/mlir/Dialect/Affine/Utils.h (+4-1)
- (modified) mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp (+10-6)
- (modified) mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp (+2-1)
- (modified) mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp (+1-1)
- (modified) mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp (+9-6)
- (modified) mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp (+2-1)
- (modified) mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp (+3-1)
- (modified) mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp (+6-2)
- (modified) mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp (+66-58)
- (modified) mlir/lib/Dialect/Affine/Utils/Utils.cpp (+3-2)
- (modified) mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp (+6-3)
- (modified) mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp (+13-8)
- (modified) mlir/test/lib/Dialect/Affine/TestAffineLoopParametricTiling.cpp (+1-1)
- (modified) mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp (+3-2)
- (modified) mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp (+2-1)
``````````diff
diff --git a/mlir/include/mlir/Dialect/Affine/LoopUtils.h b/mlir/include/mlir/Dialect/Affine/LoopUtils.h
index d143954b78fc1..cf87e71d1007e 100644
--- a/mlir/include/mlir/Dialect/Affine/LoopUtils.h
+++ b/mlir/include/mlir/Dialect/Affine/LoopUtils.h
@@ -43,7 +43,10 @@ struct MemRefRegion;
/// Unrolls this for operation completely if the trip count is known to be
/// constant. Returns failure otherwise.
-LogicalResult loopUnrollFull(AffineForOp forOp);
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult loopUnrollFull(Region &topRegion, AffineForOp forOp);
/// Unrolls this for operation by the specified unroll factor. Returns failure
/// if the loop cannot be unrolled either due to restrictions or due to invalid
@@ -51,14 +54,21 @@ LogicalResult loopUnrollFull(AffineForOp forOp);
/// annotates the Ops in each unrolled iteration by applying `annotateFn`.
/// When `cleanUpUnroll` is true, we can ensure the cleanup loop is unrolled
/// regardless of the unroll factor.
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
LogicalResult loopUnrollByFactor(
- AffineForOp forOp, uint64_t unrollFactor,
+ Region &topRegion, AffineForOp forOp, uint64_t unrollFactor,
function_ref<void(unsigned, Operation *, OpBuilder)> annotateFn = nullptr,
bool cleanUpUnroll = false);
/// Unrolls this loop by the specified unroll factor or its trip count,
/// whichever is lower.
-LogicalResult loopUnrollUpToFactor(AffineForOp forOp, uint64_t unrollFactor);
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult loopUnrollUpToFactor(Region &topRegion, AffineForOp forOp,
+ uint64_t unrollFactor);
/// Returns true if `loops` is a perfectly nested loop nest, where loops appear
/// in it from outermost to innermost.
@@ -75,34 +85,47 @@ void getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
/// with iteration arguments performing supported reductions and its inner loops
/// can have iteration arguments. Returns success if the loop is successfully
/// unroll-jammed.
-LogicalResult loopUnrollJamByFactor(AffineForOp forOp,
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult loopUnrollJamByFactor(Region &topRegion, AffineForOp forOp,
uint64_t unrollJamFactor);
/// Unrolls and jams this loop by the specified factor or by the trip count (if
/// constant), whichever is lower.
-LogicalResult loopUnrollJamUpToFactor(AffineForOp forOp,
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult loopUnrollJamUpToFactor(Region &topRegion, AffineForOp forOp,
uint64_t unrollJamFactor);
/// Promotes the loop body of a AffineForOp to its containing block if the loop
/// was known to have a single iteration.
-LogicalResult promoteIfSingleIteration(AffineForOp forOp);
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult promoteIfSingleIteration(Region &topRegion, AffineForOp forOp);
-/// Promotes all single iteration AffineForOp's in the Function, i.e., moves
+/// Promotes all single iteration AffineForOp's in the `region`, i.e., moves
/// their body into the containing Block.
-void promoteSingleIterationLoops(func::FuncOp f);
+void promoteSingleIterationLoops(Region ®ion);
/// Skew the operations in an affine.for's body with the specified
/// operation-wise shifts. The shifts are with respect to the original execution
/// order, and are multiplied by the loop 'step' before being applied. If
/// `unrollPrologueEpilogue` is set, fully unroll the prologue and epilogue
/// loops when possible.
-LogicalResult affineForOpBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts,
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult affineForOpBodySkew(Region &topRegion, AffineForOp forOp,
+ ArrayRef<uint64_t> shifts,
bool unrollPrologueEpilogue = false);
-/// Identify valid and profitable bands of loops to tile. This is currently just
-/// a temporary placeholder to test the mechanics of tiled code generation.
-/// Returns all maximal outermost perfect loop nests to tile.
-void getTileableBands(func::FuncOp f,
+/// Identify valid and profitable bands of loops to tile inside `region`. This
+/// is currently just a temporary placeholder to test the mechanics of tiled
+/// code generation. Returns all maximal outermost perfect loop nests to tile.
+void getTileableBands(Region ®ion,
std::vector<SmallVector<AffineForOp, 6>> *bands);
/// Tiles the specified band of perfectly nested loops creating tile-space loops
@@ -190,14 +213,21 @@ struct AffineCopyOptions {
/// encountered. For memrefs for whose element types a size in bytes can't be
/// computed (`index` type), their capacity is not accounted for and the
/// `fastMemCapacityBytes` copy option would be non-functional in such cases.
-LogicalResult affineDataCopyGenerate(Block::iterator begin, Block::iterator end,
+///
+/// \param topRegion the highest-level region (e.g., the body of the
+/// `func.func`)
+LogicalResult affineDataCopyGenerate(Region &topRegion, Block::iterator begin,
+ Block::iterator end,
const AffineCopyOptions ©Options,
std::optional<Value> filterMemRef,
DenseSet<Operation *> ©Nests);
/// A convenience version of affineDataCopyGenerate for all ops in the body of
/// an AffineForOp.
-LogicalResult affineDataCopyGenerate(AffineForOp forOp,
+///
+/// \param topRegion the highest-level region that contains `forOp` (e.g., the
+/// body of the `func.func`)
+LogicalResult affineDataCopyGenerate(Region &topRegion, AffineForOp forOp,
const AffineCopyOptions ©Options,
std::optional<Value> filterMemRef,
DenseSet<Operation *> ©Nests);
@@ -225,7 +255,11 @@ struct CopyGenerateResult {
///
/// Also note that certain options in `copyOptions` aren't looked at anymore,
/// like slowMemorySpace.
-LogicalResult generateCopyForMemRegion(const MemRefRegion &memrefRegion,
+///
+/// \param topRegion the highest-level region (e.g., the body of the
+/// `func.func`)
+LogicalResult generateCopyForMemRegion(Region &topRegion,
+ const MemRefRegion &memrefRegion,
Operation *analyzedOp,
const AffineCopyOptions ©Options,
CopyGenerateResult &result);
@@ -272,8 +306,8 @@ LogicalResult coalesceLoops(MutableArrayRef<AffineForOp> loops);
void mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef<Value> processorId,
ArrayRef<Value> numProcessors);
-/// Gathers all AffineForOps in 'func.func' grouped by loop depth.
-void gatherLoops(func::FuncOp func,
+/// Gathers all AffineForOps in `region` grouped by loop depth.
+void gatherLoops(Region ®ion,
std::vector<SmallVector<AffineForOp, 2>> &depthToLoops);
/// Creates an AffineForOp while ensuring that the lower and upper bounds are
diff --git a/mlir/include/mlir/Dialect/Affine/Utils.h b/mlir/include/mlir/Dialect/Affine/Utils.h
index 7f25db029781c..8058793da2f11 100644
--- a/mlir/include/mlir/Dialect/Affine/Utils.h
+++ b/mlir/include/mlir/Dialect/Affine/Utils.h
@@ -174,7 +174,10 @@ void normalizeAffineParallel(AffineParallelOp op);
/// loop has been normalized (or is already in the normal form). If
/// `promoteSingleIter` is true, the loop is simply promoted if it has a single
/// iteration.
-LogicalResult normalizeAffineFor(AffineForOp op,
+///
+/// \param topRegion the highest-level region that contains `op` (e.g., the body
+/// of the `func.func`)
+LogicalResult normalizeAffineFor(Region &topRegion, AffineForOp op,
bool promoteSingleIter = false);
/// Traverse `e` and return an AffineExpr where all occurrences of `dim` have
diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
index 331b0f1b2c2b1..a6fdc36aa5bfc 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
@@ -116,6 +116,9 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
AffineCopyOptions copyOptions = {generateDma, slowMemorySpace,
fastMemorySpace, tagMemorySpace,
fastMemCapacityBytes};
+ auto &topRegion = block->getParent()
+ ->getParentOfType<FunctionOpInterface>()
+ .getFunctionBody();
// Every affine.for op in the block starts and ends a block range for copying;
// in addition, a contiguous sequence of operations starting with a
@@ -139,8 +142,9 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
// If you hit a non-copy for loop, we will split there.
if ((forOp = dyn_cast<AffineForOp>(&*it)) && copyNests.count(forOp) == 0) {
// Perform the copying up unti this 'for' op first.
- (void)affineDataCopyGenerate(/*begin=*/curBegin, /*end=*/it, copyOptions,
- /*filterMemRef=*/std::nullopt, copyNests);
+ (void)affineDataCopyGenerate(topRegion, /*begin=*/curBegin, /*end=*/it,
+ copyOptions, /*filterMemRef=*/std::nullopt,
+ copyNests);
// Returns true if the footprint is known to exceed capacity.
auto exceedsCapacity = [&](AffineForOp forOp) {
@@ -172,8 +176,8 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
// Inner loop copies have their own scope - we don't thus update
// consumed capacity. The footprint check above guarantees this inner
// loop's footprint fits.
- (void)affineDataCopyGenerate(/*begin=*/it, /*end=*/std::next(it),
- copyOptions,
+ (void)affineDataCopyGenerate(topRegion, /*begin=*/it,
+ /*end=*/std::next(it), copyOptions,
/*filterMemRef=*/std::nullopt, copyNests);
}
// Get to the next load or store op after 'forOp'.
@@ -196,7 +200,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
assert(!curBegin->hasTrait<OpTrait::IsTerminator>() &&
"can't be a terminator");
// Exclude the affine.yield - hence, the std::prev.
- (void)affineDataCopyGenerate(/*begin=*/curBegin,
+ (void)affineDataCopyGenerate(topRegion, /*begin=*/curBegin,
/*end=*/std::prev(block->end()), copyOptions,
/*filterMemRef=*/std::nullopt, copyNests);
}
@@ -225,7 +229,7 @@ void AffineDataCopyGeneration::runOnOperation() {
// continuation of the walk or the collection of load/store ops.
nest->walk([&](Operation *op) {
if (auto forOp = dyn_cast<AffineForOp>(op))
- (void)promoteIfSingleIteration(forOp);
+ (void)promoteIfSingleIteration(f.getBody(), forOp);
else if (isa<AffineLoadOp, AffineStoreOp>(op))
copyOps.push_back(op);
});
diff --git a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
index 5cc38f7051726..6773027461f19 100644
--- a/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
@@ -38,11 +38,12 @@ struct AffineLoopNormalizePass
}
void runOnOperation() override {
+ auto &topRegion = getOperation().getBody();
getOperation().walk([&](Operation *op) {
if (auto affineParallel = dyn_cast<AffineParallelOp>(op))
normalizeAffineParallel(affineParallel);
else if (auto affineFor = dyn_cast<AffineForOp>(op))
- (void)normalizeAffineFor(affineFor, promoteSingleIter);
+ (void)normalizeAffineFor(topRegion, affineFor, promoteSingleIter);
});
}
};
diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
index 2650a06d198ea..022d327b197a4 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
@@ -238,7 +238,7 @@ void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
void LoopTiling::runOnOperation() {
// Bands of loops to tile.
std::vector<SmallVector<AffineForOp, 6>> bands;
- getTileableBands(getOperation(), &bands);
+ getTileableBands(getOperation().getBody(), &bands);
// Tile each band.
for (auto &band : bands) {
diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
index 57df7ada91654..278966641e9db 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
@@ -108,7 +108,7 @@ void LoopUnroll::runOnOperation() {
loops.push_back(forOp);
});
for (auto forOp : loops)
- (void)loopUnrollFull(forOp);
+ (void)loopUnrollFull(func.getBody(), forOp);
return;
}
@@ -131,18 +131,21 @@ void LoopUnroll::runOnOperation() {
/// Unrolls a 'affine.for' op. Returns success if the loop was unrolled,
/// failure otherwise. The default unroll factor is 4.
LogicalResult LoopUnroll::runOnAffineForOp(AffineForOp forOp) {
+ auto &topRegion =
+ forOp->getParentOfType<FunctionOpInterface>().getFunctionBody();
+
// Use the function callback if one was provided.
if (getUnrollFactor)
- return loopUnrollByFactor(forOp, getUnrollFactor(forOp),
+ return loopUnrollByFactor(topRegion, forOp, getUnrollFactor(forOp),
/*annotateFn=*/nullptr, cleanUpUnroll);
// Unroll completely if full loop unroll was specified.
if (unrollFull)
- return loopUnrollFull(forOp);
+ return loopUnrollFull(topRegion, forOp);
// Otherwise, unroll by the given unroll factor.
if (unrollUpToFactor)
- return loopUnrollUpToFactor(forOp, unrollFactor);
- return loopUnrollByFactor(forOp, unrollFactor, /*annotateFn=*/nullptr,
- cleanUpUnroll);
+ return loopUnrollUpToFactor(topRegion, forOp, unrollFactor);
+ return loopUnrollByFactor(topRegion, forOp, unrollFactor,
+ /*annotateFn=*/nullptr, cleanUpUnroll);
}
std::unique_ptr<OperationPass<func::FuncOp>> mlir::affine::createLoopUnrollPass(
diff --git a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
index a79160df7575a..a53299807ea83 100644
--- a/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
@@ -90,6 +90,7 @@ void LoopUnrollAndJam::runOnOperation() {
// unroll-and-jammed by this pass. However, runOnAffineForOp can be called on
// any for operation.
auto &entryBlock = getOperation().front();
+ auto &topRegion = getOperation().getBody();
if (auto forOp = dyn_cast<AffineForOp>(entryBlock.front()))
- (void)loopUnrollJamByFactor(forOp, unrollJamFactor);
+ (void)loopUnrollJamByFactor(topRegion, forOp, unrollJamFactor);
}
diff --git a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
index deb530b4cf1c9..4157c18b69700 100644
--- a/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
@@ -373,7 +373,9 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
return;
}
- if (failed(affineForOpBodySkew(forOp, shifts))) {
+ auto &topRegion =
+ forOp->getParentOfType<FunctionOpInterface>().getFunctionBody();
+ if (failed(affineForOpBodySkew(topRegion, forOp, shifts))) {
LLVM_DEBUG(llvm::dbgs() << "op body skewing failed - unexpected\n";);
return;
}
diff --git a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
index 7f3e43d0b4cd3..ba0c2aff0ca5a 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
@@ -18,6 +18,7 @@
#include "mlir/Dialect/Affine/Analysis/Utils.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
@@ -463,9 +464,12 @@ void mlir::affine::fuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
// Patch reduction loop - only ones that are sibling-fused with the
// destination loop - into the parent loop.
(void)promoteSingleIterReductionLoop(forOp, true);
- else
+ else {
// Promote any single iteration slice loops.
- (void)promoteIfSingleIteration(forOp);
+ auto &topRegion =
+ forOp->getParentOfType<FunctionOpInterface>().getFunctionBody();
+ (void)promoteIfSingleIteration(topRegion, forOp);
+ }
}
}
diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
index 268050a30e002..ccac8acf3f65e 100644
--- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
@@ -128,7 +128,8 @@ static void replaceIterArgsAndYieldResults(AffineForOp forOp) {
/// Promotes the loop body of a forOp to its containing block if the forOp
/// was known to have a single iteration.
-LogicalResult mlir::affine::promoteIfSingleIteration(AffineForOp forOp) {
+LogicalResult mlir::affine::promoteIfSingleIteration(Region &topRegion,
+ AffineForOp forOp) {
std::optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (!tripCount || *tripCount != 1)
return failure();
@@ -142,7 +143,7 @@ LogicalResult mlir::affine::promoteIfSingleIteration(AffineForOp forOp) {
auto *parentBlock = forOp->getBlock();
if (!iv.use_empty()) {
if (forOp.hasConstantLowerBound()) {
- OpBuilder topBuilder(forOp->getParentOfType<func::FuncOp>().getBody());
+ OpBuilder topBuilder(topRegion);
auto constOp = topBuilder.create<arith::ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
iv.replaceAllUsesWith(constOp);
@@ -182,7 +183,7 @@ LogicalResult mlir::affine::promoteIfSingleIteration(AffineForOp forOp) {
static AffineForOp generateShiftedLoop(
AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>> &opGroupQueue,
- unsigned offset, AffineForOp srcForOp, OpBuilder b) {
+ unsigned offset, AffineForOp srcForOp, Region &topRegion, OpBuilder b) {
auto lbOperands = srcForOp.getLowerBoundOperands();
auto ubOperands = srcForOp.getUpperBoundOperands();
@@ -218,7 +219,7 @@ static AffineForOp generateShiftedLoop(
for (auto *op : ops)
bodyBuilder.clone(*op, operandMap);
};
- if (succeeded(promoteIfSingleIteration(loopChunk)))
+ if (succeeded(promoteIfSingleIteration(topRegion, loopChunk)))
return AffineForOp();
return loopChunk;
}
@@ -234,7 +235,8 @@ static AffineForOp generateShiftedLoop(
// asserts preservation of SSA dominance. A check for that as well as that for
// memory-based dependence preservation check rests with the users of this
// method.
-LogicalResult mlir::affine::affineForOpBodySkew(AffineForOp forOp,
+LogicalResult mlir::affine::affineForOpBodySkew(Region &topRegion,
+ AffineForOp forOp,
ArrayRef<uint64_t> shifts,
bool unrollPrologueEpilogue) {
...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/83325
More information about the Mlir-commits
mailing list