[Mlir-commits] [mlir] 515c617 - [mlir][linalg][sparse] add linalg optimization passes "upstream"
Aart Bik
llvmlistbot at llvm.org
Thu Feb 17 08:55:59 PST 2022
Author: Aart Bik
Date: 2022-02-17T08:55:50-08:00
New Revision: 515c617003bb340a72aeab148ea705c53950c44d
URL: https://github.com/llvm/llvm-project/commit/515c617003bb340a72aeab148ea705c53950c44d
DIFF: https://github.com/llvm/llvm-project/commit/515c617003bb340a72aeab148ea705c53950c44d.diff
LOG: [mlir][linalg][sparse] add linalg optimization passes "upstream"
It is time to compose Linalg related optimizations with SparseTensor
related optimizations. This is a careful first start by adding some
general Linalg optimizations "upstream" of the sparse compiler in the
full sparse compiler pipeline. Some minor changes were needed to make
those optimizations aware of sparsity.
Note that after this, we will add a sparse specific fusion rule,
just to demonstrate the power of the new composition.
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D119971
Added:
Modified:
mlir/lib/Dialect/Linalg/IR/CMakeLists.txt
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Linalg/IR/CMakeLists.txt b/mlir/lib/Dialect/Linalg/IR/CMakeLists.txt
index 2ff2cc8de338..e310f58a2e3c 100644
--- a/mlir/lib/Dialect/Linalg/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/IR/CMakeLists.txt
@@ -20,6 +20,7 @@ add_mlir_dialect_library(MLIRLinalg
MLIRIR
MLIRParser
MLIRSideEffectInterfaces
+ MLIRSparseTensor
MLIRSCF
MLIRMath
MLIRMemRef
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 87278dcba089..c148ab9bcfa7 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -14,6 +14,7 @@
#include "mlir/Dialect/Arithmetic/Utils/Utils.h"
#include "mlir/Dialect/SCF/SCF.h"
+#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/IR/AffineExprVisitor.h"
@@ -819,9 +820,18 @@ struct EraseIdentityGenericOp : public OpRewritePattern<GenericOp> {
Type resultType = genericOp->getResult(yieldVal.index()).getType();
// The input can have a
diff erent type than the result, e.g. a dynamic
// input dimension can be turned into a static output dimension.
- if (returnedArg.getType() != resultType)
- returnedArg = rewriter.create<tensor::CastOp>(genericOp.getLoc(),
- resultType, returnedArg);
+ Type returnType = returnedArg.getType();
+ if (returnType != resultType) {
+ // Distinguish between sparse conversion or dense tensor casting.
+ // TODO: unify the two ops?
+ if (sparse_tensor::getSparseTensorEncoding(returnType) ||
+ sparse_tensor::getSparseTensorEncoding(resultType))
+ returnedArg = rewriter.create<sparse_tensor::ConvertOp>(
+ genericOp.getLoc(), resultType, returnedArg);
+ else
+ returnedArg = rewriter.create<tensor::CastOp>(
+ genericOp.getLoc(), resultType, returnedArg);
+ }
returnedArgs.push_back(returnedArg);
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
index 6897cb9d00d0..57bef39d6533 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt
@@ -50,6 +50,7 @@ add_mlir_dialect_library(MLIRLinalgTransforms
MLIRSCFTransforms
MLIRSCFUtils
MLIRPass
+ MLIRSparseTensor
MLIRStandard
MLIRStandardOpsTransforms
MLIRStandardToLLVM
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
index 570e844878d7..7e0e857643eb 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
@@ -17,6 +17,7 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
+#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Matchers.h"
@@ -2184,6 +2185,10 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
if (!operandType)
continue;
+ // If outs is sparse, leave it to the sparse compiler.
+ if (sparse_tensor::getSparseTensorEncoding(operandVal.getType()))
+ continue;
+
// If outs is already an `init_tensor` operation, nothing to do.
auto definingOp = operandVal.getDefiningOp<InitTensorOp>();
if (definingOp)
@@ -2213,7 +2218,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
} // namespace
//===---------------------------------------------------------------------===//
-// Methods that add patterns descrined in this file to a pattern list.
+// Methods that add patterns described in this file to a pattern list.
//===---------------------------------------------------------------------===//
void mlir::linalg::populateFoldReshapeOpsByLinearizationPatterns(
diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
index 25487e431708..ff6577abaa01 100644
--- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
@@ -29,6 +29,8 @@ using namespace mlir::sparse_tensor;
void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
// TODO(wrengr): ensure the original `pm` is for ModuleOp
+ pm.addNestedPass<FuncOp>(createLinalgGeneralizationPass());
+ pm.addPass(createLinalgElementwiseOpFusionPass());
pm.addPass(createSparsificationPass(options.sparsificationOptions()));
pm.addPass(createSparseTensorConversionPass());
pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 2d8898e9ec45..a263972587ef 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -1,6 +1,5 @@
// RUN: mlir-opt %s --sparse-compiler | \
// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
-// RUN: TENSOR1="%mlir_integration_test_dir/data/zero.mtx" \
// RUN: mlir-cpu-runner \
// RUN: -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
@@ -40,15 +39,17 @@
// library.
module {
//
- // A kernel that assigns elements from A to an initially zero X.
+ // A kernel that assigns elements from A to X.
//
- func @dense_output(%arga: tensor<?x?xf64, #SparseMatrix>,
- %argx: tensor<?x?xf64, #DenseMatrix>
- {linalg.inplaceable = true})
- -> tensor<?x?xf64, #DenseMatrix> {
+ func @dense_output(%arga: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64, #DenseMatrix> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #SparseMatrix>
+ %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #SparseMatrix>
+ %init = sparse_tensor.init [%d0, %d1] : tensor<?x?xf64, #DenseMatrix>
%0 = linalg.generic #trait_assign
ins(%arga: tensor<?x?xf64, #SparseMatrix>)
- outs(%argx: tensor<?x?xf64, #DenseMatrix>) {
+ outs(%init: tensor<?x?xf64, #DenseMatrix>) {
^bb(%a: f64, %x: f64):
linalg.yield %a : f64
} -> tensor<?x?xf64, #DenseMatrix>
@@ -70,15 +71,9 @@ module {
%a = sparse_tensor.new %fileName
: !Filename to tensor<?x?xf64, #SparseMatrix>
- // Initialize all-dense annotated "sparse" matrix to all zeros.
- %fileZero = call @getTensorFilename(%c1) : (index) -> (!Filename)
- %x = sparse_tensor.new %fileZero
- : !Filename to tensor<?x?xf64, #DenseMatrix>
-
// Call the kernel.
- %0 = call @dense_output(%a, %x)
- : (tensor<?x?xf64, #SparseMatrix>,
- tensor<?x?xf64, #DenseMatrix>) -> tensor<?x?xf64, #DenseMatrix>
+ %0 = call @dense_output(%a)
+ : (tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64, #DenseMatrix>
//
// Print the linearized 5x5 result for verification.
@@ -92,7 +87,7 @@ module {
// Release the resources.
sparse_tensor.release %a : tensor<?x?xf64, #SparseMatrix>
- sparse_tensor.release %x : tensor<?x?xf64, #DenseMatrix>
+ sparse_tensor.release %0 : tensor<?x?xf64, #DenseMatrix>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
index a758a891658d..02d5cc0b5188 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir
@@ -1,18 +1,12 @@
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler="vectorization-strategy=2 vl=2" | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
index 6c207d7e59a3..f2a35efafabf 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir
@@ -1,11 +1,7 @@
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
-//
#CSR = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
index c4cb95af4599..1db865b3c2bf 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
@@ -1,18 +1,12 @@
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler="vectorization-strategy=2 vl=2" | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
index 8f409fe8ecff..b0fde087d808 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir
@@ -1,16 +1,12 @@
// RUN: mlir-opt %s --sparse-compiler | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler="vectorization-strategy=2 vl=8" | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8" | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
index 87092754224a..017950391e39 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir
@@ -1,18 +1,12 @@
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
//
// Do the same run, but now with SIMDization as well. This should not change the outcome.
//
-// RUN: mlir-opt %s \
-// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
-// RUN: --sparse-compiler="vectorization-strategy=2 vl=8" | \
-// RUN: mlir-cpu-runner \
-// RUN: -e entry -entry-point-result=void \
+// RUN: mlir-opt %s -sparse-compiler="vectorization-strategy=2 vl=8" | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index a79c4b4cf595..1b66628ad7bd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -113,7 +113,6 @@ class SparseCompiler:
def __init__(self, options: str):
pipeline = (
- f'builtin.func(linalg-generalize-named-ops,linalg-fuse-elementwise-ops),'
f'sparse-compiler{{{options} reassociate-fp-reductions=1 enable-index-optimizations=1}}')
self.pipeline = pipeline
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index f03756b8b129..c29f618e2698 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -73,7 +73,6 @@ class SparseCompiler:
def __init__(self):
pipeline = (
- f'builtin.func(linalg-generalize-named-ops,linalg-fuse-elementwise-ops),'
f'sparse-compiler{{reassociate-fp-reductions=1 enable-index-optimizations=1}}')
self.pipeline = pipeline
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
index f18655ea3ba5..ccf1ffd6cd26 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
@@ -171,7 +171,6 @@ class SparseCompiler:
def __init__(self, sparsification_options: str, support_lib: str):
self._support_lib = support_lib
self._pipeline = (
- f'builtin.func(linalg-generalize-named-ops,linalg-fuse-elementwise-ops),'
f'sparse-compiler{{{sparsification_options} reassociate-fp-reductions=1 enable-index-optimizations=1}}')
# Must be in the scope of a `with ir.Context():`
self._passmanager = PassManager.parse(self._pipeline)
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 511c22132ad6..32d78f225a05 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -6997,6 +6997,7 @@ cc_library(
":Parser",
":SCFDialect",
":SideEffectInterfaces",
+ ":SparseTensor",
":StandardOps",
":Support",
":TensorDialect",
@@ -7083,6 +7084,7 @@ cc_library(
":SCFDialect",
":SCFTransforms",
":SCFUtils",
+ ":SparseTensor",
":StandardOps",
":StandardOpsTransforms",
":Support",
More information about the Mlir-commits
mailing list