[Mlir-commits] [mlir] e7ee4ea - [mlir][sparse] disable nonunit stride dense vectorization
Aart Bik
llvmlistbot at llvm.org
Fri Mar 12 16:49:44 PST 2021
Author: Aart Bik
Date: 2021-03-12T16:49:32-08:00
New Revision: e7ee4eaaf7f051d74f0706491a1c16594a832b7d
URL: https://github.com/llvm/llvm-project/commit/e7ee4eaaf7f051d74f0706491a1c16594a832b7d
DIFF: https://github.com/llvm/llvm-project/commit/e7ee4eaaf7f051d74f0706491a1c16594a832b7d.diff
LOG: [mlir][sparse] disable nonunit stride dense vectorization
This is a temporary work-around to get our all-annotations-all-flags
stress testing effort run clean. In the long run, we want to provide
efficient implementations of strided loads and stores though
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D98563
Added:
Modified:
mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp b/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
index 30d9c7d43968..ed3b8b31f97d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp
@@ -799,11 +799,11 @@ static void genReductionEnd(Merger &merger, CodeGen &codegen,
Value red = codegen.redVal;
if (!red)
return;
+ assert(codegen.curVecLength == 1);
codegen.redVal = merger.exp(codegen.redExp).val = Value(); // end chain
unsigned lhs = op.getNumShapedOperands() - 1;
if (red.getType().isa<VectorType>()) {
// TODO: assumes + reductions for now
- codegen.curVecLength = 1;
Value ld = genTensorLoad(merger, codegen, rewriter, op, codegen.redExp);
red = rewriter.create<vector::ReductionOp>(
op.getLoc(), ld.getType(), rewriter.getStringAttr("add"), red, ld);
@@ -947,6 +947,25 @@ static bool isParallelFor(CodeGen &codegen, bool isOuter, bool isReduction,
llvm_unreachable("unexpected parallelization strategy");
}
+/// Checks unit strides for dense tensors. The iteration graph may have ignored
+/// dense access patterns in order to avoid cycles (sparse access patterns are
+/// always placed innermost), but that means dense access has become strided.
+/// For now, we reject vectorization of such cases.
+/// TODO: implement strided load/stores on dense arrays
+static bool denseUnitStrides(Merger &merger, linalg::GenericOp op,
+ unsigned idx) {
+ unsigned numTensors = op.getNumShapedOperands();
+ for (unsigned t = 0; t < numTensors; t++) {
+ if (!merger.isSparseTensor(t) && !linkedSparse(op, t)) {
+ auto map = op.getIndexingMap(t);
+ unsigned r = map.getNumResults();
+ if (r && map.getDimPosition(r - 1) != idx)
+ return false;
+ }
+ }
+ return true;
+}
+
/// Generates a for-loop on a single index.
static Operation *genFor(Merger &merger, CodeGen &codegen,
PatternRewriter &rewriter, linalg::GenericOp op,
@@ -958,7 +977,8 @@ static Operation *genFor(Merger &merger, CodeGen &codegen,
auto iteratorTypes = op.iterator_types().getValue();
bool isReduction = linalg::isReductionIteratorType(iteratorTypes[idx]);
bool isSparse = merger.isDim(fb, Dim::kSparse);
- bool isVector = isVectorFor(codegen, isInner, isSparse);
+ bool isVector = isVectorFor(codegen, isInner, isSparse) &&
+ denseUnitStrides(merger, op, idx);
bool isParallel =
isParallelFor(codegen, isOuter, isReduction, isSparse, isVector);
@@ -1279,10 +1299,10 @@ static void genStmt(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter,
}
// Wrap-up loop sequence.
+ codegen.curVecLength = 1;
genReductionEnd(merger, codegen, rewriter, op);
genInvariants(merger, codegen, rewriter, op, exp, ldx, /*hoist=*/false);
codegen.loops[idx] = Value();
- codegen.curVecLength = 1;
}
namespace {
More information about the Mlir-commits
mailing list