[Mlir-commits] [mlir] 96cab65 - [mlir][sparse] end-to-end sparse vector insertion codegen
Aart Bik
llvmlistbot at llvm.org
Wed Oct 19 12:32:28 PDT 2022
Author: Aart Bik
Date: 2022-10-19T12:32:20-07:00
New Revision: 96cab659a19e9565d34647da756461060050f518
URL: https://github.com/llvm/llvm-project/commit/96cab659a19e9565d34647da756461060050f518
DIFF: https://github.com/llvm/llvm-project/commit/96cab659a19e9565d34647da756461060050f518.diff
LOG: [mlir][sparse] end-to-end sparse vector insertion codegen
Reviewed By: Peiming
Differential Revision: https://reviews.llvm.org/D136275
Added:
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
mlir/test/Dialect/SparseTensor/codegen.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 707c6c99734c..a090e607d523 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -282,13 +282,13 @@ static scf::ForOp createFor(OpBuilder &builder, Location loc, Value count,
static void createPushback(OpBuilder &builder, Location loc,
SmallVectorImpl<Value> &fields, unsigned field,
Value value) {
- assert(field < fields.size());
+ assert(2 <= field && field < fields.size());
Type etp = fields[field].getType().cast<ShapedType>().getElementType();
if (value.getType() != etp)
value = builder.create<arith::IndexCastOp>(loc, etp, value);
fields[field] =
builder.create<PushBackOp>(loc, fields[field].getType(), fields[1],
- fields[field], value, APInt(64, field));
+ fields[field], value, APInt(64, field - 2));
}
/// Generates insertion code.
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index 6a32c72daa7f..bb2eb88d2e1d 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -373,8 +373,8 @@ func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
// CHECK: %[[R:.*]]:2 = scf.for %[[I:.*]] = %[[C0]] to %[[A8]] step %[[C1]] iter_args(%[[P0:.*]] = %[[A3]], %[[P1:.*]] = %[[A4]]) -> (memref<?xindex>, memref<?xf64>) {
// CHECK: %[[T1:.*]] = memref.load %[[A7]][%[[I]]] : memref<?xindex>
// CHECK: %[[T2:.*]] = memref.load %[[A5]][%[[T1]]] : memref<?xf64>
-// CHECK: %[[T3:.*]] = sparse_tensor.push_back %[[A1]], %[[P0]], %[[T1]] {idx = 3 : index} : memref<3xindex>, memref<?xindex>, index
-// CHECK: %[[T4:.*]] = sparse_tensor.push_back %[[A1]], %[[P1]], %[[T2]] {idx = 4 : index} : memref<3xindex>, memref<?xf64>, f64
+// CHECK: %[[T3:.*]] = sparse_tensor.push_back %[[A1]], %[[P0]], %[[T1]] {idx = 1 : index} : memref<3xindex>, memref<?xindex>, index
+// CHECK: %[[T4:.*]] = sparse_tensor.push_back %[[A1]], %[[P1]], %[[T2]] {idx = 2 : index} : memref<3xindex>, memref<?xf64>, f64
// CHECK: memref.store %[[F0]], %arg5[%[[T1]]] : memref<?xf64>
// CHECK: memref.store %[[B0]], %arg6[%[[T1]]] : memref<?xi1>
// CHECK: scf.yield %[[T3]], %[[T4]] : memref<?xindex>, memref<?xf64>
@@ -383,8 +383,8 @@ func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
// CHECK: memref.dealloc %[[A6]] : memref<?xi1>
// CHECK: memref.dealloc %[[A7]] : memref<?xindex>
// CHECK: %[[LL:.*]] = memref.load %[[A1]][%[[C2]]] : memref<3xindex>
-// CHECK: %[[P1:.*]] = sparse_tensor.push_back %[[A1]], %[[A2]], %[[C0]] {idx = 2 : index} : memref<3xindex>, memref<?xindex>, index
-// CHECK: %[[P2:.*]] = sparse_tensor.push_back %[[A1]], %[[P1]], %[[LL]] {idx = 2 : index} : memref<3xindex>, memref<?xindex>, index
+// CHECK: %[[P1:.*]] = sparse_tensor.push_back %[[A1]], %[[A2]], %[[C0]] {idx = 0 : index} : memref<3xindex>, memref<?xindex>, index
+// CHECK: %[[P2:.*]] = sparse_tensor.push_back %[[A1]], %[[P1]], %[[LL]] {idx = 0 : index} : memref<3xindex>, memref<?xindex>, index
// CHECK: return %[[A0]], %[[A1]], %[[P2]], %[[R]]#0, %[[R]]#1 : memref<1xindex>, memref<3xindex>, memref<?xindex>, memref<?xindex>, memref<?xf64>
func.func @sparse_compression_1d(%tensor: tensor<100xf64, #SV>,
%values: memref<?xf64>,
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
new file mode 100644
index 000000000000..6b8576e979fd
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir
@@ -0,0 +1,65 @@
+// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \
+// RUN: mlir-cpu-runner \
+// RUN: -e entry -entry-point-result=void \
+// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+// Insertion example using pure codegen (no sparse runtime support lib).
+
+#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
+
+#trait_mul_s = {
+ indexing_maps = [
+ affine_map<(i) -> (i)> // x (out)
+ ],
+ iterator_types = ["parallel"],
+ doc = "x(i) = x(i) * 2.0"
+}
+
+module {
+
+ // Dumps pointers, indices, values for verification.
+ func.func @dump(%argx: tensor<1024xf32, #SparseVector>) {
+ %c0 = arith.constant 0 : index
+ %cu = arith.constant 99 : index
+ %fu = arith.constant 99.0 : f32
+ %p = sparse_tensor.pointers %argx { dimension = 0 : index }
+ : tensor<1024xf32, #SparseVector> to memref<?xindex>
+ %i = sparse_tensor.indices %argx { dimension = 0 : index }
+ : tensor<1024xf32, #SparseVector> to memref<?xindex>
+ %v = sparse_tensor.values %argx
+ : tensor<1024xf32, #SparseVector> to memref<?xf32>
+ %vp = vector.transfer_read %p[%c0], %cu: memref<?xindex>, vector<8xindex>
+ %vi = vector.transfer_read %i[%c0], %cu: memref<?xindex>, vector<8xindex>
+ %vv = vector.transfer_read %v[%c0], %fu: memref<?xf32>, vector<8xf32>
+ vector.print %vp : vector<8xindex>
+ vector.print %vi : vector<8xindex>
+ vector.print %vv : vector<8xf32>
+ return
+ }
+
+ func.func @entry() {
+ %f1 = arith.constant 1.0 : f32
+ %f2 = arith.constant 2.0 : f32
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %c3 = arith.constant 3 : index
+ %c1023 = arith.constant 1023 : index
+
+ // Build the sparse vector from code.
+ %0 = bufferization.alloc_tensor() : tensor<1024xf32, #SparseVector>
+ %1 = sparse_tensor.insert %f1 into %0[%c0] : tensor<1024xf32, #SparseVector>
+ %2 = sparse_tensor.insert %f2 into %1[%c1] : tensor<1024xf32, #SparseVector>
+ %3 = sparse_tensor.insert %f1 into %2[%c3] : tensor<1024xf32, #SparseVector>
+ %4 = sparse_tensor.insert %f2 into %3[%c1023] : tensor<1024xf32, #SparseVector>
+ %5 = sparse_tensor.load %4 hasInserts : tensor<1024xf32, #SparseVector>
+
+ // CHECK: ( 0, 4, 99, 99, 99, 99, 99, 99 )
+ // CHECK-NEXT: ( 0, 1, 3, 1023, 99, 99, 99, 99 )
+ // CHECK-NEXT: ( 1, 2, 1, 2, 99, 99, 99, 99 )
+ call @dump(%5) : (tensor<1024xf32, #SparseVector>) -> ()
+
+ bufferization.dealloc_tensor %5 : tensor<1024xf32, #SparseVector>
+ return
+ }
+}
More information about the Mlir-commits
mailing list