[Mlir-commits] [mlir] [mlir] Remove unused local variables (NFC) (PR #138481)
Kazu Hirata
llvmlistbot at llvm.org
Sun May 4 20:44:11 PDT 2025
https://github.com/kazutakahirata created https://github.com/llvm/llvm-project/pull/138481
None
>From 6d94ea851eae156d0aceb2cf4bbfca8f529b959e Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Sun, 4 May 2025 20:34:35 -0700
Subject: [PATCH] [mlir] Remove unused local variables (NFC)
---
mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp | 1 -
mlir/lib/Dialect/Affine/Analysis/Utils.cpp | 6 ------
mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp | 1 -
mlir/lib/Dialect/Affine/Utils/Utils.cpp | 1 -
mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp | 1 -
mlir/lib/Dialect/EmitC/IR/EmitC.cpp | 1 -
mlir/lib/Dialect/GPU/IR/GPUDialect.cpp | 3 ---
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp | 1 -
mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp | 2 --
mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp | 2 +-
mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp | 1 -
mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp | 1 -
mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp | 1 -
mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp | 1 -
.../lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp | 1 -
mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp | 1 -
mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp | 1 -
.../Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp | 1 -
mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp | 4 ----
mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp | 2 --
mlir/lib/Dialect/SPIRV/IR/SPIRVParsingUtils.h | 1 -
.../Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp | 1 -
mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 2 --
23 files changed, 1 insertion(+), 36 deletions(-)
diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
index 9f64abb5a8860..b1527a5c3f838 100644
--- a/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
+++ b/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
@@ -175,7 +175,6 @@ struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
// It computes the multiplied sizes of all dimensions instead of taking
// the maximum of each dimension size * stride.
SmallVector<AffineExpr> productExpressions;
- SmallVector<Value> productResults;
unsigned sourceRank = cast<ShapedType>(src.getType()).getRank();
SmallVector<AffineExpr> symbols(2 * sourceRank);
diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
index 86aba7b187535..3144ca723df90 100644
--- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
@@ -1356,12 +1356,6 @@ std::optional<int64_t> MemRefRegion::getRegionSize() {
return false;
}
- // Indices to use for the DmaStart op.
- // Indices for the original memref being DMAed from/to.
- SmallVector<Value, 4> memIndices;
- // Indices for the faster buffer being DMAed into/from.
- SmallVector<Value, 4> bufIndices;
-
// Compute the extents of the buffer.
std::optional<int64_t> numElements = getConstantBoundingSizeAndShape();
if (!numElements) {
diff --git a/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp b/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
index 43d37ee3332ef..c9fe4474a68fa 100644
--- a/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
+++ b/mlir/lib/Dialect/Affine/TransformOps/AffineTransformOps.cpp
@@ -118,7 +118,6 @@ SimplifyBoundedAffineOpsOp::apply(transform::TransformRewriter &rewriter,
}
targets.push_back(target);
}
- SmallVector<Operation *> transformed;
RewritePatternSet patterns(getContext());
// Canonicalization patterns are needed so that affine.apply ops are composed
// with the remaining affine.min/max ops.
diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index ef470c30e680e..cde8223107859 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -1808,7 +1808,6 @@ mlir::affine::normalizeMemRef(memref::ReinterpretCastOp reinterpretCastOp) {
ArrayRef<int64_t> oldShape = memrefType.getShape();
ValueRange oldSizes = reinterpretCastOp.getSizes();
unsigned idx = 0;
- SmallVector<int64_t> newStaticSizes;
OpBuilder b(reinterpretCastOp);
// Collect the map operands which will be used to compute the new normalized
// memref shape.
diff --git a/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp b/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
index b74df4ff6060f..0439c199b934e 100644
--- a/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/ViewLikeInterfaceUtils.cpp
@@ -41,7 +41,6 @@ LogicalResult mlir::affine::mergeOffsetsSizesAndStrides(
combinedStrides[i] = producerStrides[i];
continue;
}
- SmallVector<OpFoldResult> offsetSymbols, strideSymbols;
// The combined offset is computed as
// producer_offset + consumer_offset * producer_strides.
combinedOffsets[i] = makeComposedFoldedAffineApply(
diff --git a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
index 31b9f64a76cf2..1709654b90138 100644
--- a/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
+++ b/mlir/lib/Dialect/EmitC/IR/EmitC.cpp
@@ -465,7 +465,6 @@ ParseResult ForOp::parse(OpAsmParser &parser, OperationState &result) {
// Parse the optional initial iteration arguments.
SmallVector<OpAsmParser::Argument, 4> regionArgs;
- SmallVector<OpAsmParser::UnresolvedOperand, 4> operands;
regionArgs.push_back(inductionVariable);
// Parse optional type, else assume Index.
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index f20126618060a..84e3071946f59 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -936,9 +936,6 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
SmallVector<OpAsmParser::UnresolvedOperand, LaunchOp::kNumConfigOperands>
sizes(LaunchOp::kNumConfigOperands);
- // Actual (data) operands passed to the kernel.
- SmallVector<OpAsmParser::UnresolvedOperand, 4> dataOperands;
-
// Region arguments to be created.
SmallVector<OpAsmParser::UnresolvedOperand, 16> regionArgs(
LaunchOp::kNumConfigRegionAttributes);
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 089ccc6680e48..fce0751430305 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1416,7 +1416,6 @@ static void addBodyWithPayloadOp(OpAsmParser &parser, OperationState &result,
Region *body = result.addRegion();
Block &block = body->emplaceBlock();
b.setInsertionPointToStart(&block);
- SmallVector<Value> bbArgs;
for (auto &operand : operands) {
block.addArgument(
llvm::cast<ShapedType>(operand.getType()).getElementType(),
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index a9b5c2a0f3c18..f6ca109b84f9e 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2288,7 +2288,6 @@ transform::ScalarizeOp::applyToOne(transform::TransformRewriter &rewriter,
}
return tileSizes;
});
- SmallVector<int64_t> emptyTileSizes;
rewriter.setInsertionPoint(target);
FailureOr<scf::SCFTilingResult> maybeTilingResult = tileUsingSCF(
rewriter, cast<TilingInterface>(target.getOperation()), tilingOptions);
@@ -2347,7 +2346,6 @@ transform::RewriteInDestinationPassingStyleOp::applyToOne(
transform::TransformRewriter &rewriter, Operation *target,
transform::ApplyToEachResultList &results,
transform::TransformState &state) {
- SmallVector<Operation *> res;
rewriter.setInsertionPoint(target);
FailureOr<Operation *> maybeResult =
TypeSwitch<Operation *, FailureOr<Operation *>>(target)
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 8c8f8594b81af..1f5af39e604e7 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -869,7 +869,7 @@ fuseWithReshapeByExpansion(LinalgOp linalgOp, Operation *reshapeOp,
"preconditions for fuse operation failed");
Location loc = linalgOp.getLoc();
- SmallVector<OpFoldResult> expandedShape, collapsedShape;
+ SmallVector<OpFoldResult> expandedShape;
SmallVector<AffineMap, 4> reassociationIndices;
Value src;
if (auto expandingReshapeOp = dyn_cast<tensor::ExpandShapeOp>(reshapeOp)) {
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 223d728b0b27d..fcfb499bb1332 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -85,7 +85,6 @@ getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth,
<< opOperand.getOperandNumber() << "\n");
LLVM_DEBUG(llvm::dbgs()
<< "getShapeDefiningLoopRange map: " << map << "\n");
- SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr);
for (const auto &en : llvm::enumerate(map.getResults())) {
auto dimExpr = dyn_cast<AffineDimExpr>(en.value());
if (!dimExpr)
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
index f0215742f2cf8..d599ddd220dde 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
@@ -270,7 +270,6 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
partialSizes.push_back(
b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
}
- SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
// If a callback is not specified, then use the default implementation for
// allocating the promoted buffer.
std::optional<Value> fullLocalView =
diff --git a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
index faa7bbf9d168a..0cc840403a020 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/TilingInterfaceImpl.cpp
@@ -1187,7 +1187,6 @@ struct UnPackOpTiling
loc, unPackOp.getDest(), outputOffsets, outputSizes, strides);
tiledOperands.push_back(extractDestSlice);
- SmallVector<OpFoldResult> inputOffsets, inputSizes;
strides.append(unPackOp.getSourceRank() - outputRank, oneAttr);
// Create slice of the source operand.
auto extractSourceSlice = b.create<tensor::ExtractSliceOp>(
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 6f10a31c15626..a0237c18cf2fe 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -68,7 +68,6 @@ Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
OpFoldResult memref::getMixedSize(OpBuilder &builder, Location loc, Value value,
int64_t dim) {
auto memrefType = llvm::cast<MemRefType>(value.getType());
- SmallVector<OpFoldResult> result;
if (memrefType.isDynamicDim(dim))
return builder.createOrFold<memref::DimOp>(loc, value, dim);
diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
index 2d9372ef1c609..e9a80be87a0f7 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ExpandStridedMetadata.cpp
@@ -505,7 +505,6 @@ getCollapsedStride(memref::CollapseShapeOp collapseShape, OpBuilder &builder,
auto [strides, offset] = sourceType.getStridesAndOffset();
- SmallVector<OpFoldResult> groupStrides;
ArrayRef<int64_t> srcShape = sourceType.getShape();
OpFoldResult lastValidStride = nullptr;
diff --git a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
index ec55b9e561914..e4fb3f9bb87ed 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp
@@ -168,7 +168,6 @@ resolveSourceIndicesCollapseShape(Location loc, PatternRewriter &rewriter,
ValueRange indices,
SmallVectorImpl<Value> &sourceIndices) {
int64_t cnt = 0;
- SmallVector<Value> tmp(indices.size());
SmallVector<OpFoldResult> dynamicIndices;
for (ArrayRef<int64_t> groups : collapseShapeOp.getReassociationIndices()) {
assert(!groups.empty() && "association indices groups cannot be empty");
diff --git a/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp b/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
index 7b3107a6e6204..80f4c5cd5afca 100644
--- a/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
+++ b/mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp
@@ -409,7 +409,6 @@ MeshSharding getSharding(OpResult result, const ShardingOption &shardingOption,
// process the split axes
for (auto it : llvm::enumerate(map.getResults())) {
- SmallVector<MeshAxis> tmp_axes;
AffineExpr expr = it.value();
// `expr` must be an `AffineDimExpr` because `map` is verified by
// isProjectedPermutation
diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
index cf62ee8bc45b5..d6a9d8f6401f1 100644
--- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -392,7 +392,6 @@ struct IndexSwitchOpInterface
int64_t resultNum = cast<OpResult>(value).getResultNumber();
// Helper function to get buffer type of a case.
- SmallVector<BaseMemRefType> yieldedTypes;
auto getYieldedBufferType = [&](Block &b) -> FailureOr<BaseMemRefType> {
auto yieldOp = cast<scf::YieldOp>(b.getTerminator());
Value yieldedValue = yieldOp->getOperand(resultNum);
diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
index 7edf19689d2e1..0cd7da5db9163 100644
--- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
@@ -483,8 +483,6 @@ static LogicalResult generateLoopNestUsingForallOp(
assert(loopRanges.size() == tileSizes.size() &&
"expected as many tile sizes as loop ranges");
OpBuilder::InsertionGuard guard(rewriter);
- SmallVector<OpFoldResult> offsets(loopRanges.size()),
- sizes(loopRanges.size());
std::optional<ArrayAttr> mappingAttr;
if (!mappingVector.empty())
@@ -865,7 +863,6 @@ FailureOr<LoopLikeOpInterface> yieldTiledValuesAndReplaceLoop(
static LogicalResult addInitOperandsToLoopNest(
RewriterBase &rewriter, MutableArrayRef<LoopLikeOpInterface> loops,
ValueRange newInitValues, YieldTiledValuesFn getNewTiledYieldsFn) {
- SmallVector<scf::ForOp> newLoops;
if (loops.empty())
return success();
OpBuilder::InsertionGuard g(rewriter);
@@ -1535,7 +1532,6 @@ mlir::scf::tileConsumerAndFuseProducersUsingSCF(
// 1. First tile the consumer.
SetVector<Operation *> fusedProducers, tiledAndFusedOps;
- llvm::SmallDenseMap<Value, size_t> origProducerToLoopResultNum;
FailureOr<scf::SCFTilingResult> tilingResult =
tileUsingSCF(rewriter, consumer, options.tilingOptions);
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index 097a18f8a70eb..3d2cb1dd7a032 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -776,8 +776,6 @@ void spirv::EntryPointOp::build(OpBuilder &builder, OperationState &state,
ParseResult spirv::EntryPointOp::parse(OpAsmParser &parser,
OperationState &result) {
spirv::ExecutionModel execModel;
- SmallVector<OpAsmParser::UnresolvedOperand, 0> identifiers;
- SmallVector<Type, 0> idTypes;
SmallVector<Attribute, 4> interfaceVars;
FlatSymbolRefAttr fn;
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVParsingUtils.h b/mlir/lib/Dialect/SPIRV/IR/SPIRVParsingUtils.h
index 858b94f7be8b0..f28d386f8874d 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVParsingUtils.h
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVParsingUtils.h
@@ -49,7 +49,6 @@ ParseResult
parseEnumKeywordAttr(EnumClass &value, ParserType &parser,
StringRef attrName = spirv::attributeName<EnumClass>()) {
StringRef keyword;
- SmallVector<NamedAttribute, 1> attr;
auto loc = parser.getCurrentLocation();
if (parser.parseKeyword(&keyword))
return failure();
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
index a988b2f4f1f4a..01651b1f0ac9c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
@@ -1031,7 +1031,6 @@ static LogicalResult rewrite2To4SpMM(PatternRewriter &rewriter,
.getAsyncToken();
token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnC)
.getAsyncToken();
- SmallVector<Value> newDynamicSizes;
token = genDeallocMemRef(rewriter, loc, buffer1, token);
token = genDeallocMemRef(rewriter, loc, buffer2, token);
token = genDeallocMemRef(rewriter, loc, buffer3, token);
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index b42e60d5cebd7..29da32cd1791c 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -60,7 +60,6 @@ Operation *TensorDialect::materializeConstant(OpBuilder &builder,
OpFoldResult tensor::getMixedSize(OpBuilder &builder, Location loc, Value value,
int64_t dim) {
auto tensorType = llvm::cast<RankedTensorType>(value.getType());
- SmallVector<OpFoldResult> result;
if (tensorType.isDynamicDim(dim))
return builder.createOrFold<tensor::DimOp>(loc, value, dim);
@@ -1703,7 +1702,6 @@ OpFoldResult ReshapeOp::fold(FoldAdaptor adaptor) {
if (auto dimOp = element.getDefiningOp<tensor::DimOp>()) {
dynamicNoop &= dimOp.getSource() == source;
- APSInt dim;
auto cst = getConstantIntValue(dimOp.getIndex());
dynamicNoop &=
cst.has_value() && cst.value() == static_cast<int64_t>(id);
More information about the Mlir-commits
mailing list