[Mlir-commits] [mlir] a2c9d4b - [mlir][sparse] Introduce proper sparsification passes
Aart Bik
llvmlistbot at llvm.org
Tue May 4 17:10:23 PDT 2021
Author: Aart Bik
Date: 2021-05-04T17:10:09-07:00
New Revision: a2c9d4bb04a91c947d17672acbda56fae09173be
URL: https://github.com/llvm/llvm-project/commit/a2c9d4bb04a91c947d17672acbda56fae09173be
DIFF: https://github.com/llvm/llvm-project/commit/a2c9d4bb04a91c947d17672acbda56fae09173be.diff
LOG: [mlir][sparse] Introduce proper sparsification passes
This revision migrates more code from Linalg into the new permanent home of
SparseTensor. It replaces the test passes with proper compiler passes.
NOTE: the actual removal of the last glue and clutter in Linalg will follow
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D101811
Added:
mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt
mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
mlir/test/Dialect/SparseTensor/conversion.mlir
mlir/test/Dialect/SparseTensor/sparse_1d.mlir
mlir/test/Dialect/SparseTensor/sparse_2d.mlir
mlir/test/Dialect/SparseTensor/sparse_3d.mlir
mlir/test/Dialect/SparseTensor/sparse_invalid.mlir
mlir/test/Dialect/SparseTensor/sparse_lower.mlir
mlir/test/Dialect/SparseTensor/sparse_nd.mlir
mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
mlir/test/Dialect/SparseTensor/sparse_storage.mlir
mlir/test/Dialect/SparseTensor/sparse_vector.mlir
Modified:
mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt
mlir/include/mlir/InitAllPasses.h
mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
mlir/test/lib/Transforms/CMakeLists.txt
mlir/tools/mlir-opt/mlir-opt.cpp
Removed:
mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h
mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp
mlir/test/Dialect/Linalg/sparse_1d.mlir
mlir/test/Dialect/Linalg/sparse_2d.mlir
mlir/test/Dialect/Linalg/sparse_3d.mlir
mlir/test/Dialect/Linalg/sparse_invalid.mlir
mlir/test/Dialect/Linalg/sparse_lower.mlir
mlir/test/Dialect/Linalg/sparse_nd.mlir
mlir/test/Dialect/Linalg/sparse_parallel.mlir
mlir/test/Dialect/Linalg/sparse_storage.mlir
mlir/test/Dialect/Linalg/sparse_vector.mlir
mlir/test/Dialect/SparseTensor/lowering.mlir
mlir/test/lib/Transforms/TestSparsification.cpp
################################################################################
diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
index 7c7ffb2dde577..bc8775a169d9d 100644
--- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
+++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h
@@ -1058,80 +1058,6 @@ LogicalResult applyStagedPatterns(
const FrozenRewritePatternSet &stage2Patterns,
function_ref<LogicalResult(Operation *)> stage3Lambda = nullptr);
-//===----------------------------------------------------------------------===//
-// Support for sparse tensor code generation.
-//
-// The sparse compiler part of MLIR lowers a tensor expression formulated as a
-// Linalg operation into a sequence of loops depending on what dimensions of the
-// tensors are marked dense or sparse. The generated code distinguishes between:
-// (1) for-loops that iterate over a single dense dimension,
-// (2) for-loops that iterate over a single sparse dimension,
-// (3) while-loops that co-iterate over several sparse dimensions.
-// The for-loops may be subsequently optimized for parallel or vector execution.
-//
-// For more details, the Dialect/Linalg/Transforms/Sparsification.cpp file.
-//===----------------------------------------------------------------------===//
-
-/// Defines a parallelization strategy. Any implicit loop in the Linalg
-/// operation that is marked "parallel" (thus not "reduction") is a candidate
-/// for parallelization. The loop is made parallel if (1) allowed by the
-/// strategy (e.g., AnyStorageOuterLoop considers either a dense or sparse
-/// outermost loop only), and (2) the generated code is an actual for-loop
-/// (and not a co-iterating while-loop).
-enum class SparseParallelizationStrategy {
- kNone,
- kDenseOuterLoop,
- kAnyStorageOuterLoop,
- kDenseAnyLoop,
- kAnyStorageAnyLoop
- // TODO: support reduction parallelization too?
-};
-
-/// Defines a vectorization strategy. Any implicit inner loop in the Linalg
-/// operation is a candidate (full SIMD for "parallel" loops and horizontal
-/// SIMD for "reduction" loops). A loop is actually vectorized if (1) allowed
-/// by the strategy, and (2) the emitted code is an actual for-loop (and not
-/// a co-iterating while-loop).
-enum class SparseVectorizationStrategy {
- kNone,
- kDenseInnerLoop,
- kAnyStorageInnerLoop
-};
-
-/// Defines a type for "pointer" and "index" storage in the sparse storage
-/// scheme, with a choice between the native platform-dependent index width
-/// or any of 64-/32-/16-/8-bit integers. A narrow width obviously reduces
-/// the memory footprint of the sparse storage scheme, but the width should
-/// suffice to define the total required range (viz. the maximum number of
-/// stored entries per indirection level for the "pointers" and the maximum
-/// value of each tensor index over all dimensions for the "indices").
-enum class SparseIntType { kNative, kI64, kI32, kI16, kI8 };
-
-/// Sparsification options.
-struct SparsificationOptions {
- SparsificationOptions(SparseParallelizationStrategy p,
- SparseVectorizationStrategy v, unsigned vl,
- SparseIntType pt, SparseIntType it, bool fo)
- : parallelizationStrategy(p), vectorizationStrategy(v), vectorLength(vl),
- ptrType(pt), indType(it), fastOutput(fo) {}
- SparsificationOptions()
- : SparsificationOptions(SparseParallelizationStrategy::kNone,
- SparseVectorizationStrategy::kNone, 1u,
- SparseIntType::kNative, SparseIntType::kNative,
- false) {}
- SparseParallelizationStrategy parallelizationStrategy;
- SparseVectorizationStrategy vectorizationStrategy;
- unsigned vectorLength;
- SparseIntType ptrType;
- SparseIntType indType;
- bool fastOutput; // experimental: fast output buffers
-};
-
-/// Sets up sparsification rewriting rules with the given options.
-void populateSparsificationPatterns(
- RewritePatternSet &patterns,
- const SparsificationOptions &options = SparsificationOptions());
-
} // namespace linalg
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt
index f33061b2d87cf..9f57627c321fb 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt
@@ -1 +1,2 @@
add_subdirectory(IR)
+add_subdirectory(Transforms)
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt
new file mode 100644
index 0000000000000..2d9eaa567c8f7
--- /dev/null
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(LLVM_TARGET_DEFINITIONS Passes.td)
+mlir_tablegen(Passes.h.inc -gen-pass-decls -name SparseTensor)
+add_public_tablegen_target(MLIRSparseTensorPassIncGen)
+
+add_mlir_doc(Passes SparseTensorPasses ./ -gen-pass-doc)
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
new file mode 100644
index 0000000000000..bfc1a31a98298
--- /dev/null
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
@@ -0,0 +1,95 @@
+//===- Passes.h - Sparse tensor pass entry points ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes of all sparse tensor passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES_H_
+
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Pass/Pass.h"
+
+namespace mlir {
+
+/// Defines a parallelization strategy. Any independent loop is a candidate
+/// for parallelization. The loop is made parallel if (1) allowed by the
+/// strategy (e.g., AnyStorageOuterLoop considers either a dense or sparse
+/// outermost loop only), and (2) the generated code is an actual for-loop
+/// (and not a co-iterating while-loop).
+enum class SparseParallelizationStrategy {
+ kNone,
+ kDenseOuterLoop,
+ kAnyStorageOuterLoop,
+ kDenseAnyLoop,
+ kAnyStorageAnyLoop
+ // TODO: support reduction parallelization too?
+};
+
+/// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD
+/// for parallel loops and horizontal SIMD for reduction loops). A loop is
+/// actually vectorized if (1) allowed by the strategy, and (2) the emitted
+/// code is an actual for-loop (and not a co-iterating while-loop).
+enum class SparseVectorizationStrategy {
+ kNone,
+ kDenseInnerLoop,
+ kAnyStorageInnerLoop
+};
+
+/// Defines a type for "pointer" and "index" storage in the sparse storage
+/// scheme, with a choice between the native platform-dependent index width
+/// or any of 64-/32-/16-/8-bit integers. A narrow width obviously reduces
+/// the memory footprint of the sparse storage scheme, but the width should
+/// suffice to define the total required range (viz. the maximum number of
+/// stored entries per indirection level for the "pointers" and the maximum
+/// value of each tensor index over all dimensions for the "indices").
+enum class SparseIntType { kNative, kI64, kI32, kI16, kI8 };
+
+/// Sparsification options.
+struct SparsificationOptions {
+ SparsificationOptions(SparseParallelizationStrategy p,
+ SparseVectorizationStrategy v, unsigned vl,
+ SparseIntType pt, SparseIntType it, bool fo)
+ : parallelizationStrategy(p), vectorizationStrategy(v), vectorLength(vl),
+ ptrType(pt), indType(it), fastOutput(fo) {}
+ SparsificationOptions()
+ : SparsificationOptions(SparseParallelizationStrategy::kNone,
+ SparseVectorizationStrategy::kNone, 1u,
+ SparseIntType::kNative, SparseIntType::kNative,
+ false) {}
+ SparseParallelizationStrategy parallelizationStrategy;
+ SparseVectorizationStrategy vectorizationStrategy;
+ unsigned vectorLength;
+ SparseIntType ptrType;
+ SparseIntType indType;
+ bool fastOutput; // experimental: fast output buffers
+};
+
+/// Sets up sparsification rewriting rules with the given options.
+void populateSparsificationPatterns(
+ RewritePatternSet &patterns,
+ const SparsificationOptions &options = SparsificationOptions());
+
+/// Sets up sparse tensor conversion rules.
+void populateSparseTensorConversionPatterns(RewritePatternSet &patterns);
+
+std::unique_ptr<Pass> createSparsificationPass();
+std::unique_ptr<Pass> createSparseTensorConversionPass();
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+/// Generate the code for registering passes.
+#define GEN_PASS_REGISTRATION
+#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"
+
+} // namespace mlir
+
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES_H_
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
new file mode 100644
index 0000000000000..67960d286fc9d
--- /dev/null
+++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
@@ -0,0 +1,39 @@
+//===-- Passes.td - Sparse tensor pass definition file -----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES
+
+include "mlir/Pass/PassBase.td"
+
+def Sparsification : Pass<"sparsification", "ModuleOp"> {
+ let summary = "Automatically generate sparse tensor code from annotations";
+ let constructor = "mlir::createSparsificationPass()";
+ let dependentDialects = [
+ "LLVM::LLVMDialect",
+ "memref::MemRefDialect",
+ "scf::SCFDialect",
+ "sparse_tensor::SparseTensorDialect",
+ "vector::VectorDialect",
+ ];
+}
+
+def SparseTensorConversion : Pass<"sparse-tensor-conversion", "ModuleOp"> {
+ let summary = "Apply conversion rules to sparse tensors";
+ let constructor = "mlir::createSparseTensorConversionPass()";
+ let dependentDialects = [
+ "LLVM::LLVMDialect",
+ "memref::MemRefDialect",
+ "scf::SCFDialect",
+ "sparse_tensor::SparseTensorDialect",
+ "vector::VectorDialect",
+ ];
+}
+
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES
+
diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h
deleted file mode 100644
index 0ae4b0bf0817f..0000000000000
--- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h
+++ /dev/null
@@ -1,23 +0,0 @@
-//===- Transforms.h - Sparse tensor transformations -------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_
-
-#include "mlir/IR/PatternMatch.h"
-
-namespace mlir {
-namespace sparse_tensor {
-
-/// Sets up sparsification conversion rules with the given options.
-void populateSparsificationConversionPatterns(RewritePatternSet &patterns);
-
-} // namespace sparse_tensor
-} // namespace mlir
-
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_
diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h
index cd22a96b24d05..59b35b7096f5a 100644
--- a/mlir/include/mlir/InitAllPasses.h
+++ b/mlir/include/mlir/InitAllPasses.h
@@ -25,6 +25,7 @@
#include "mlir/Dialect/SCF/Passes.h"
#include "mlir/Dialect/SPIRV/Transforms/Passes.h"
#include "mlir/Dialect/Shape/Transforms/Passes.h"
+#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Dialect/Tensor/Transforms/Passes.h"
#include "mlir/Dialect/Tosa/Transforms/Passes.h"
@@ -55,6 +56,7 @@ inline void registerAllPasses() {
registerGpuSerializeToCubinPass();
registerGpuSerializeToHsacoPass();
registerLinalgPasses();
+ registerSparseTensorPasses();
LLVM::registerLLVMPasses();
memref::registerMemRefPasses();
quant::registerQuantPasses();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
index 3f62ccefe7586..6a2f81185af9b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
@@ -11,7 +11,6 @@ add_mlir_dialect_library(MLIRLinalgTransforms
Interchange.cpp
Loops.cpp
Promotion.cpp
- Sparsification.cpp
Tiling.cpp
Transforms.cpp
Vectorization.cpp
@@ -37,7 +36,6 @@ add_mlir_dialect_library(MLIRLinalgTransforms
MLIRSCF
MLIRSCFTransforms
MLIRPass
- MLIRSparseTensor
MLIRStandard
MLIRStandardOpsTransforms
MLIRStandardToLLVM
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt
index 63623b4607044..336e834cc109c 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt
@@ -1,14 +1,24 @@
add_mlir_dialect_library(MLIRSparseTensorTransforms
- SparseTensorLowering.cpp
+ Sparsification.cpp
+ SparseTensorConversion.cpp
+ SparseTensorPasses.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor
+ DEPENDS
+ MLIRSparseTensorPassIncGen
+
LINK_LIBS PUBLIC
MLIRIR
MLIRLLVMIR
+ MLIRLinalg
+ MLIRLinalgTransforms
+ MLIRMemRef
MLIRPass
+ MLIRSCF
MLIRStandard
MLIRSparseTensor
MLIRTransforms
+ MLIRVector
)
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
similarity index 90%
rename from mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp
rename to mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index d87cc61e5bd82..faf1133b1996f 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -17,7 +17,7 @@
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
-#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h"
+#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Transforms/DialectConversion.h"
@@ -42,7 +42,7 @@ static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type result,
}
/// Sparse conversion rule to remove opaque pointer cast.
-class TensorFromPointerConverter
+class SparseTensorFromPointerConverter
: public OpConversionPattern<sparse_tensor::FromPointerOp> {
using OpConversionPattern::OpConversionPattern;
LogicalResult
@@ -54,7 +54,8 @@ class TensorFromPointerConverter
};
/// Sparse conversion rule for dimension accesses.
-class TensorToDimSizeConverter : public OpConversionPattern<memref::DimOp> {
+class SparseTensorToDimSizeConverter
+ : public OpConversionPattern<memref::DimOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult
@@ -71,7 +72,7 @@ class TensorToDimSizeConverter : public OpConversionPattern<memref::DimOp> {
};
/// Sparse conversion rule for pointer accesses.
-class TensorToPointersConverter
+class SparseTensorToPointersConverter
: public OpConversionPattern<sparse_tensor::ToPointersOp> {
public:
using OpConversionPattern::OpConversionPattern;
@@ -98,7 +99,7 @@ class TensorToPointersConverter
};
/// Sparse conversion rule for index accesses.
-class TensorToIndicesConverter
+class SparseTensorToIndicesConverter
: public OpConversionPattern<sparse_tensor::ToIndicesOp> {
public:
using OpConversionPattern::OpConversionPattern;
@@ -125,7 +126,7 @@ class TensorToIndicesConverter
};
/// Sparse conversion rule for value accesses.
-class TensorToValuesConverter
+class SparseTensorToValuesConverter
: public OpConversionPattern<sparse_tensor::ToValuesOp> {
public:
using OpConversionPattern::OpConversionPattern;
@@ -157,9 +158,8 @@ class TensorToValuesConverter
/// Populates the given patterns list with conversion rules required for
/// the sparsification of linear algebra operations.
-void sparse_tensor::populateSparsificationConversionPatterns(
- RewritePatternSet &patterns) {
- patterns.add<TensorFromPointerConverter, TensorToDimSizeConverter,
- TensorToPointersConverter, TensorToIndicesConverter,
- TensorToValuesConverter>(patterns.getContext());
+void mlir::populateSparseTensorConversionPatterns(RewritePatternSet &patterns) {
+ patterns.add<SparseTensorFromPointerConverter, SparseTensorToDimSizeConverter,
+ SparseTensorToPointersConverter, SparseTensorToIndicesConverter,
+ SparseTensorToValuesConverter>(patterns.getContext());
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
new file mode 100644
index 0000000000000..d54b2eff25afc
--- /dev/null
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp
@@ -0,0 +1,141 @@
+//===- SparsificationPass.cpp - Pass for autogen spares tensor code -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
+#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+using namespace mlir;
+
+namespace {
+
+//===----------------------------------------------------------------------===//
+// Passes declaration.
+//===----------------------------------------------------------------------===//
+
+#define GEN_PASS_CLASSES
+#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"
+
+//===----------------------------------------------------------------------===//
+// Passes implementation.
+//===----------------------------------------------------------------------===//
+
+struct SparsificationPass : public SparsificationBase<SparsificationPass> {
+
+ SparsificationPass() = default;
+ SparsificationPass(const SparsificationPass &pass) {}
+
+ Option<int32_t> parallelization{
+ *this, "parallelization-strategy",
+ llvm::cl::desc("Set the parallelization strategy"), llvm::cl::init(0)};
+
+ Option<int32_t> vectorization{
+ *this, "vectorization-strategy",
+ llvm::cl::desc("Set the vectorization strategy"), llvm::cl::init(0)};
+
+ Option<int32_t> vectorLength{
+ *this, "vl", llvm::cl::desc("Set the vector length"), llvm::cl::init(1)};
+
+ Option<int32_t> ptrType{*this, "ptr-type",
+ llvm::cl::desc("Set the pointer type"),
+ llvm::cl::init(0)};
+
+ Option<int32_t> indType{*this, "ind-type",
+ llvm::cl::desc("Set the index type"),
+ llvm::cl::init(0)};
+
+ Option<bool> fastOutput{*this, "fast-output",
+ llvm::cl::desc("Allows fast output buffers"),
+ llvm::cl::init(false)};
+
+ /// Returns parallelization strategy given on command line.
+ SparseParallelizationStrategy parallelOption() {
+ switch (parallelization) {
+ default:
+ return SparseParallelizationStrategy::kNone;
+ case 1:
+ return SparseParallelizationStrategy::kDenseOuterLoop;
+ case 2:
+ return SparseParallelizationStrategy::kAnyStorageOuterLoop;
+ case 3:
+ return SparseParallelizationStrategy::kDenseAnyLoop;
+ case 4:
+ return SparseParallelizationStrategy::kAnyStorageAnyLoop;
+ }
+ }
+
+ /// Returns vectorization strategy given on command line.
+ SparseVectorizationStrategy vectorOption() {
+ switch (vectorization) {
+ default:
+ return SparseVectorizationStrategy::kNone;
+ case 1:
+ return SparseVectorizationStrategy::kDenseInnerLoop;
+ case 2:
+ return SparseVectorizationStrategy::kAnyStorageInnerLoop;
+ }
+ }
+
+ /// Returns the requested integer type.
+ SparseIntType typeOption(int32_t option) {
+ switch (option) {
+ default:
+ return SparseIntType::kNative;
+ case 1:
+ return SparseIntType::kI64;
+ case 2:
+ return SparseIntType::kI32;
+ case 3:
+ return SparseIntType::kI16;
+ case 4:
+ return SparseIntType::kI8;
+ }
+ }
+
+ void runOnOperation() override {
+ auto *ctx = &getContext();
+ RewritePatternSet patterns(ctx);
+ // Translate strategy flags to strategy options.
+ SparsificationOptions options(parallelOption(), vectorOption(),
+ vectorLength, typeOption(ptrType),
+ typeOption(indType), fastOutput);
+ // Apply rewriting.
+ populateSparsificationPatterns(patterns, options);
+ vector::populateVectorToVectorCanonicalizationPatterns(patterns);
+ (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
+ }
+};
+
+struct SparseTensorConversionPass
+ : public SparseTensorConversionBase<SparseTensorConversionPass> {
+ void runOnOperation() override {
+ auto *ctx = &getContext();
+ RewritePatternSet conversionPatterns(ctx);
+ ConversionTarget target(*ctx);
+ target
+ .addIllegalOp<sparse_tensor::FromPointerOp, sparse_tensor::ToPointersOp,
+ sparse_tensor::ToIndicesOp, sparse_tensor::ToValuesOp>();
+ target.addLegalOp<CallOp>();
+ populateSparseTensorConversionPatterns(conversionPatterns);
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(conversionPatterns))))
+ signalPassFailure();
+ }
+};
+
+} // end anonymous namespace
+
+std::unique_ptr<Pass> mlir::createSparsificationPass() {
+ return std::make_unique<SparsificationPass>();
+}
+
+std::unique_ptr<Pass> mlir::createSparseTensorConversionPass() {
+ return std::make_unique<SparseTensorConversionPass>();
+}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
similarity index 98%
rename from mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
rename to mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 5570cbaf9e85e..d2c85841773ae 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -42,12 +42,14 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
-#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
+#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
+#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/IR/Matchers.h"
+#include "llvm/ADT/SmallBitVector.h"
using namespace mlir;
@@ -294,8 +296,7 @@ class Merger {
// Code generation.
struct CodeGen {
- CodeGen(linalg::SparsificationOptions o, unsigned numTensors,
- unsigned numLoops)
+ CodeGen(mlir::SparsificationOptions o, unsigned numTensors, unsigned numLoops)
: options(o), loops(numLoops), sizes(numLoops), buffers(numTensors),
pointers(numTensors, std::vector<Value>(numLoops)),
indices(numTensors, std::vector<Value>(numLoops)),
@@ -304,7 +305,7 @@ struct CodeGen {
idxs(numTensors, std::vector<Value>(numLoops)), redExp(-1u), redVal(),
curVecLength(1), curVecMask() {}
/// Sparsification options.
- linalg::SparsificationOptions options;
+ mlir::SparsificationOptions options;
/// Universal dense indices and upper bounds (by index). The loops array
/// is updated with the value of the universal dense index in the current
/// loop. The sizes array is set once with the inferred dimension sizes.
@@ -506,17 +507,17 @@ static unsigned buildLattices(Merger &merger, linalg::GenericOp op,
}
/// Maps sparse integer option to actual integral storage type.
-static Type genIntType(PatternRewriter &rewriter, linalg::SparseIntType tp) {
+static Type genIntType(PatternRewriter &rewriter, SparseIntType tp) {
switch (tp) {
- case linalg::SparseIntType::kNative:
+ case SparseIntType::kNative:
return rewriter.getIndexType();
- case linalg::SparseIntType::kI64:
+ case SparseIntType::kI64:
return rewriter.getIntegerType(64);
- case linalg::SparseIntType::kI32:
+ case SparseIntType::kI32:
return rewriter.getIntegerType(32);
- case linalg::SparseIntType::kI16:
+ case SparseIntType::kI16:
return rewriter.getIntegerType(16);
- case linalg::SparseIntType::kI8:
+ case SparseIntType::kI8:
return rewriter.getIntegerType(8);
}
llvm_unreachable("unexpected SparseIntType");
@@ -960,11 +961,11 @@ static bool genInit(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter,
/// depends on the requested strategy.
static bool isVectorFor(CodeGen &codegen, bool isInner, bool isSparse) {
switch (codegen.options.vectorizationStrategy) {
- case linalg::SparseVectorizationStrategy::kNone:
+ case SparseVectorizationStrategy::kNone:
return false;
- case linalg::SparseVectorizationStrategy::kDenseInnerLoop:
+ case SparseVectorizationStrategy::kDenseInnerLoop:
return isInner && !isSparse;
- case linalg::SparseVectorizationStrategy::kAnyStorageInnerLoop:
+ case SparseVectorizationStrategy::kAnyStorageInnerLoop:
return isInner;
}
llvm_unreachable("unexpected vectorization strategy");
@@ -976,15 +977,15 @@ static bool isVectorFor(CodeGen &codegen, bool isInner, bool isSparse) {
static bool isParallelFor(CodeGen &codegen, bool isOuter, bool isReduction,
bool isSparse, bool isVector) {
switch (codegen.options.parallelizationStrategy) {
- case linalg::SparseParallelizationStrategy::kNone:
+ case SparseParallelizationStrategy::kNone:
return false;
- case linalg::SparseParallelizationStrategy::kDenseOuterLoop:
+ case SparseParallelizationStrategy::kDenseOuterLoop:
return isOuter && !isSparse && !isReduction && !isVector;
- case linalg::SparseParallelizationStrategy::kAnyStorageOuterLoop:
+ case SparseParallelizationStrategy::kAnyStorageOuterLoop:
return isOuter && !isReduction && !isVector;
- case linalg::SparseParallelizationStrategy::kDenseAnyLoop:
+ case SparseParallelizationStrategy::kDenseAnyLoop:
return !isSparse && !isReduction && !isVector;
- case linalg::SparseParallelizationStrategy::kAnyStorageAnyLoop:
+ case SparseParallelizationStrategy::kAnyStorageAnyLoop:
return !isReduction && !isVector;
}
llvm_unreachable("unexpected parallelization strategy");
@@ -1355,7 +1356,7 @@ namespace {
/// Sparse rewriting rule for generic Lingalg operation.
struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
public:
- GenericOpSparsifier(MLIRContext *context, linalg::SparsificationOptions o)
+ GenericOpSparsifier(MLIRContext *context, SparsificationOptions o)
: OpRewritePattern<linalg::GenericOp>(context), options(o) {}
LogicalResult matchAndRewrite(linalg::GenericOp op,
@@ -1398,14 +1399,14 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
private:
/// Options to control sparse code generation.
- linalg::SparsificationOptions options;
+ SparsificationOptions options;
};
} // namespace
/// Populates the given patterns list with rewriting rules required for
/// the sparsification of linear algebra operations.
-void linalg::populateSparsificationPatterns(
+void mlir::populateSparsificationPatterns(
RewritePatternSet &patterns, const SparsificationOptions &options) {
patterns.add<GenericOpSparsifier>(patterns.getContext(), options);
}
diff --git a/mlir/test/Dialect/SparseTensor/lowering.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
similarity index 97%
rename from mlir/test/Dialect/SparseTensor/lowering.mlir
rename to mlir/test/Dialect/SparseTensor/conversion.mlir
index 6f02c10b49b29..508b29a2d157e 100644
--- a/mlir/test/Dialect/SparseTensor/lowering.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt --test-sparsification="lower" %s | FileCheck %s
+// RUN: mlir-opt --sparse-tensor-conversion %s | FileCheck %s
!SparseTensor = type !llvm.ptr<i8>
diff --git a/mlir/test/Dialect/Linalg/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
similarity index 99%
rename from mlir/test/Dialect/Linalg/sparse_1d.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_1d.mlir
index 7dd1b6765e9ca..9ed062cf757f4 100644
--- a/mlir/test/Dialect/Linalg/sparse_1d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
@@ -1,5 +1,5 @@
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-// RUN: mlir-opt %s -test-sparsification | FileCheck %s
+// RUN: mlir-opt %s -sparsification | FileCheck %s
#trait_d = {
indexing_maps = [
diff --git a/mlir/test/Dialect/Linalg/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
similarity index 99%
rename from mlir/test/Dialect/Linalg/sparse_2d.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 4303c9dfb41d5..80febcad1c0b1 100644
--- a/mlir/test/Dialect/Linalg/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -1,5 +1,5 @@
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-// RUN: mlir-opt %s -test-sparsification | FileCheck %s
+// RUN: mlir-opt %s -sparsification | FileCheck %s
#trait_dd = {
indexing_maps = [
diff --git a/mlir/test/Dialect/Linalg/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
similarity index 99%
rename from mlir/test/Dialect/Linalg/sparse_3d.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_3d.mlir
index 41ad5757924d4..270b11e220ed7 100644
--- a/mlir/test/Dialect/Linalg/sparse_3d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
@@ -1,5 +1,5 @@
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-// RUN: mlir-opt %s -test-sparsification | FileCheck %s
+// RUN: mlir-opt %s -sparsification | FileCheck %s
#trait_ddd = {
indexing_maps = [
diff --git a/mlir/test/Dialect/Linalg/sparse_invalid.mlir b/mlir/test/Dialect/SparseTensor/sparse_invalid.mlir
similarity index 100%
rename from mlir/test/Dialect/Linalg/sparse_invalid.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_invalid.mlir
diff --git a/mlir/test/Dialect/Linalg/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
similarity index 97%
rename from mlir/test/Dialect/Linalg/sparse_lower.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_lower.mlir
index cdbaf6d2ebec0..54179a3395f16 100644
--- a/mlir/test/Dialect/Linalg/sparse_lower.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
@@ -1,15 +1,18 @@
-// RUN: mlir-opt %s -test-sparsification | \
+// RUN: mlir-opt %s -sparsification | \
// RUN: FileCheck %s --check-prefix=CHECK-HIR
//
-// RUN: mlir-opt %s -test-sparsification="lower" --convert-linalg-to-loops | \
+// RUN: mlir-opt %s -sparsification \
+// RUN: --sparse-tensor-conversion --convert-linalg-to-loops | \
// RUN: FileCheck %s --check-prefix=CHECK-MIR
//
-// RUN: mlir-opt %s -test-sparsification="lower" --convert-linalg-to-loops \
+// RUN: mlir-opt %s -sparsification \
+// RUN: --sparse-tensor-conversion --convert-linalg-to-loops \
// RUN: --func-bufferize --tensor-constant-bufferize \
// RUN: --tensor-bufferize --finalizing-bufferize | \
// RUN: FileCheck %s --check-prefix=CHECK-LIR
//
-// RUN: mlir-opt %s -test-sparsification="lower fast-output" --convert-linalg-to-loops \
+// RUN: mlir-opt %s -sparsification="fast-output" \
+// RUN: --sparse-tensor-conversion --convert-linalg-to-loops \
// RUN: --func-bufferize --tensor-constant-bufferize \
// RUN: --tensor-bufferize --finalizing-bufferize | \
// RUN: FileCheck %s --check-prefix=CHECK-FAST
diff --git a/mlir/test/Dialect/Linalg/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir
similarity index 99%
rename from mlir/test/Dialect/Linalg/sparse_nd.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_nd.mlir
index 646f83f570fe7..7d926d33a3863 100644
--- a/mlir/test/Dialect/Linalg/sparse_nd.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir
@@ -1,5 +1,5 @@
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
-// RUN: mlir-opt %s -test-sparsification | FileCheck %s
+// RUN: mlir-opt %s -sparsification | FileCheck %s
// Example with cyclic iteration graph with sparse and dense constraints,
// but an acyclic iteration graph using sparse constraints only.
diff --git a/mlir/test/Dialect/Linalg/sparse_parallel.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
similarity index 92%
rename from mlir/test/Dialect/Linalg/sparse_parallel.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
index f8e9d4e922c58..31395d72b9cb8 100644
--- a/mlir/test/Dialect/Linalg/sparse_parallel.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir
@@ -1,12 +1,12 @@
-// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=0" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=0" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR0
-// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=1" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=1" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR1
-// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=2" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=2" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR2
-// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=3" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=3" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR3
-// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=4" | \
+// RUN: mlir-opt %s -sparsification="parallelization-strategy=4" | \
// RUN: FileCheck %s --check-prefix=CHECK-PAR4
#trait_dd = {
diff --git a/mlir/test/Dialect/Linalg/sparse_storage.mlir b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir
similarity index 94%
rename from mlir/test/Dialect/Linalg/sparse_storage.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_storage.mlir
index 998b71f5a24d2..b6d4adff69131 100644
--- a/mlir/test/Dialect/Linalg/sparse_storage.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir
@@ -1,14 +1,14 @@
-// RUN: mlir-opt %s -test-sparsification="ptr-type=1 ind-type=1" | \
+// RUN: mlir-opt %s -sparsification="ptr-type=1 ind-type=1" | \
// RUN: FileCheck %s --check-prefix=CHECK-TYPE0
-// RUN: mlir-opt %s -test-sparsification="ptr-type=1 ind-type=2" | \
+// RUN: mlir-opt %s -sparsification="ptr-type=1 ind-type=2" | \
// RUN: FileCheck %s --check-prefix=CHECK-TYPE1
-// RUN: mlir-opt %s -test-sparsification="ptr-type=2 ind-type=1" | \
+// RUN: mlir-opt %s -sparsification="ptr-type=2 ind-type=1" | \
// RUN: FileCheck %s --check-prefix=CHECK-TYPE2
-// RUN: mlir-opt %s -test-sparsification="ptr-type=2 ind-type=2" | \
+// RUN: mlir-opt %s -sparsification="ptr-type=2 ind-type=2" | \
// RUN: FileCheck %s --check-prefix=CHECK-TYPE3
-// RUN: mlir-opt %s -test-sparsification="ptr-type=3 ind-type=3" | \
+// RUN: mlir-opt %s -sparsification="ptr-type=3 ind-type=3" | \
// RUN: FileCheck %s --check-prefix=CHECK-TYPE4
-// RUN: mlir-opt %s -test-sparsification="ptr-type=4 ind-type=4" | \
+// RUN: mlir-opt %s -sparsification="ptr-type=4 ind-type=4" | \
// RUN: FileCheck %s --check-prefix=CHECK-TYPE5
#trait_mul_1d = {
diff --git a/mlir/test/Dialect/Linalg/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
similarity index 98%
rename from mlir/test/Dialect/Linalg/sparse_vector.mlir
rename to mlir/test/Dialect/SparseTensor/sparse_vector.mlir
index 69a7cfaa359e6..310076d96df3a 100644
--- a/mlir/test/Dialect/Linalg/sparse_vector.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir
@@ -1,10 +1,10 @@
-// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=0 ptr-type=2 ind-type=2 vl=16" | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=0 ptr-type=2 ind-type=2 vl=16" | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC0
-// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=1 ptr-type=2 ind-type=2 vl=16" | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=1 ptr-type=2 ind-type=2 vl=16" | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC1
-// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=2 ptr-type=2 ind-type=2 vl=16" | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 ptr-type=2 ind-type=2 vl=16" | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC2
-// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=2 ptr-type=0 ind-type=0 vl=16" | \
+// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 ptr-type=0 ind-type=0 vl=16" | \
// RUN: FileCheck %s --check-prefix=CHECK-VEC3
#trait_scale_d = {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index e78281a9da63f..5a73924c84250 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s \
-// RUN: --test-sparsification="lower ptr-type=4 ind-type=4" \
+// RUN: --sparsification="ptr-type=4 ind-type=4" --sparse-tensor-conversion \
// RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
// RUN: --std-bufferize --finalizing-bufferize \
@@ -11,7 +11,7 @@
// RUN: FileCheck %s
//
// RUN: mlir-opt %s \
-// RUN: --test-sparsification="lower vectorization-strategy=2 ptr-type=4 ind-type=4 vl=16" \
+// RUN: --sparsification="vectorization-strategy=2 ptr-type=4 ind-type=4 vl=16" --sparse-tensor-conversion \
// RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
// RUN: --std-bufferize --finalizing-bufferize \
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
index 509402a48b0d9..ee4d56c547cde 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s \
-// RUN: --test-sparsification="lower ptr-type=2 ind-type=2 fast-output" \
+// RUN: --sparsification="ptr-type=2 ind-type=2 fast-output" --sparse-tensor-conversion \
// RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
// RUN: --std-bufferize --finalizing-bufferize \
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
index 6d3dc0ee16594..4e1d44dcc2b85 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
@@ -1,5 +1,5 @@
// RUN: mlir-opt %s \
-// RUN: --test-sparsification="lower" \
+// RUN: --sparsification --sparse-tensor-conversion \
// RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
// RUN: --std-bufferize --finalizing-bufferize \
diff --git a/mlir/test/lib/Transforms/CMakeLists.txt b/mlir/test/lib/Transforms/CMakeLists.txt
index d000b950ea712..3ce4fd0e258b8 100644
--- a/mlir/test/lib/Transforms/CMakeLists.txt
+++ b/mlir/test/lib/Transforms/CMakeLists.txt
@@ -34,7 +34,6 @@ add_mlir_library(MLIRTestTransforms
TestMemRefDependenceCheck.cpp
TestMemRefStrideCalculation.cpp
TestSCFUtils.cpp
- TestSparsification.cpp
TestVectorTransforms.cpp
EXCLUDE_FROM_LIBMLIR
diff --git a/mlir/test/lib/Transforms/TestSparsification.cpp b/mlir/test/lib/Transforms/TestSparsification.cpp
deleted file mode 100644
index 5a12405789baa..0000000000000
--- a/mlir/test/lib/Transforms/TestSparsification.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-//===- TestSparsification.cpp - Test sparsification of tensors ------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
-#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
-#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
-#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h"
-#include "mlir/Dialect/Vector/VectorOps.h"
-#include "mlir/Pass/Pass.h"
-#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
-
-using namespace mlir;
-
-namespace {
-
-struct TestSparsification
- : public PassWrapper<TestSparsification, OperationPass<ModuleOp>> {
-
- TestSparsification() = default;
- TestSparsification(const TestSparsification &pass) {}
-
- Option<int32_t> parallelization{
- *this, "parallelization-strategy",
- llvm::cl::desc("Set the parallelization strategy"), llvm::cl::init(0)};
-
- Option<int32_t> vectorization{
- *this, "vectorization-strategy",
- llvm::cl::desc("Set the vectorization strategy"), llvm::cl::init(0)};
-
- Option<int32_t> vectorLength{
- *this, "vl", llvm::cl::desc("Set the vector length"), llvm::cl::init(1)};
-
- Option<int32_t> ptrType{*this, "ptr-type",
- llvm::cl::desc("Set the pointer type"),
- llvm::cl::init(0)};
-
- Option<int32_t> indType{*this, "ind-type",
- llvm::cl::desc("Set the index type"),
- llvm::cl::init(0)};
-
- Option<bool> fastOutput{*this, "fast-output",
- llvm::cl::desc("Allows fast output buffers"),
- llvm::cl::init(false)};
-
- Option<bool> lower{*this, "lower", llvm::cl::desc("Lower sparse primitives"),
- llvm::cl::init(false)};
-
- /// Registers all dialects required by testing.
- void getDependentDialects(DialectRegistry ®istry) const override {
- registry.insert<memref::MemRefDialect, scf::SCFDialect,
- sparse_tensor::SparseTensorDialect, vector::VectorDialect,
- LLVM::LLVMDialect>();
- }
-
- /// Returns parallelization strategy given on command line.
- linalg::SparseParallelizationStrategy parallelOption() {
- switch (parallelization) {
- default:
- return linalg::SparseParallelizationStrategy::kNone;
- case 1:
- return linalg::SparseParallelizationStrategy::kDenseOuterLoop;
- case 2:
- return linalg::SparseParallelizationStrategy::kAnyStorageOuterLoop;
- case 3:
- return linalg::SparseParallelizationStrategy::kDenseAnyLoop;
- case 4:
- return linalg::SparseParallelizationStrategy::kAnyStorageAnyLoop;
- }
- }
-
- /// Returns vectorization strategy given on command line.
- linalg::SparseVectorizationStrategy vectorOption() {
- switch (vectorization) {
- default:
- return linalg::SparseVectorizationStrategy::kNone;
- case 1:
- return linalg::SparseVectorizationStrategy::kDenseInnerLoop;
- case 2:
- return linalg::SparseVectorizationStrategy::kAnyStorageInnerLoop;
- }
- }
-
- /// Returns the requested integer type.
- linalg::SparseIntType typeOption(int32_t option) {
- switch (option) {
- default:
- return linalg::SparseIntType::kNative;
- case 1:
- return linalg::SparseIntType::kI64;
- case 2:
- return linalg::SparseIntType::kI32;
- case 3:
- return linalg::SparseIntType::kI16;
- case 4:
- return linalg::SparseIntType::kI8;
- }
- }
-
- /// Runs the test on a function.
- void runOnOperation() override {
- auto *ctx = &getContext();
- RewritePatternSet patterns(ctx);
- // Translate strategy flags to strategy options.
- linalg::SparsificationOptions options(parallelOption(), vectorOption(),
- vectorLength, typeOption(ptrType),
- typeOption(indType), fastOutput);
- // Apply rewriting.
- linalg::populateSparsificationPatterns(patterns, options);
- vector::populateVectorToVectorCanonicalizationPatterns(patterns);
- (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
- // Lower sparse primitives to calls into runtime support library.
- if (lower) {
- RewritePatternSet conversionPatterns(ctx);
- ConversionTarget target(*ctx);
- target.addIllegalOp<
- sparse_tensor::FromPointerOp, sparse_tensor::ToPointersOp,
- sparse_tensor::ToIndicesOp, sparse_tensor::ToValuesOp>();
- target.addLegalOp<CallOp>();
- sparse_tensor::populateSparsificationConversionPatterns(
- conversionPatterns);
- if (failed(applyPartialConversion(getOperation(), target,
- std::move(conversionPatterns))))
- signalPassFailure();
- }
- }
-};
-
-} // end anonymous namespace
-
-namespace mlir {
-namespace test {
-
-void registerTestSparsification() {
- PassRegistration<TestSparsification> sparsificationPass(
- "test-sparsification", "Test automatic generation of sparse tensor code");
-}
-
-} // namespace test
-} // namespace mlir
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index 009e12eb9174b..89b77ab53ca72 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -99,7 +99,6 @@ void registerTestPDLByteCodePass();
void registerTestPreparationPassWithAllowedMemrefResults();
void registerTestRecursiveTypesPass();
void registerTestSCFUtilsPass();
-void registerTestSparsification();
void registerTestVectorConversions();
} // namespace test
} // namespace mlir
@@ -177,7 +176,6 @@ void registerTestPasses() {
test::registerTestPDLByteCodePass();
test::registerTestRecursiveTypesPass();
test::registerTestSCFUtilsPass();
- test::registerTestSparsification();
test::registerTestVectorConversions();
}
#endif
More information about the Mlir-commits
mailing list