[Mlir-commits] [mlir] 01b1b0c - [mlir][SVE] Add e2e for 1D depthwise WC convolution (#85225)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Mar 22 08:53:08 PDT 2024


Author: Andrzej WarzyƄski
Date: 2024-03-22T15:53:04Z
New Revision: 01b1b0c1f728e2c2639edc654424f50830295989

URL: https://github.com/llvm/llvm-project/commit/01b1b0c1f728e2c2639edc654424f50830295989
DIFF: https://github.com/llvm/llvm-project/commit/01b1b0c1f728e2c2639edc654424f50830295989.diff

LOG: [mlir][SVE] Add e2e for 1D depthwise WC convolution (#85225)

Follow-up for https://github.com/llvm/llvm-project/pull/81625

Added: 
    mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir
new file mode 100644
index 00000000000000..57d69383c2de65
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir
@@ -0,0 +1,60 @@
+// DEFINE: %{compile} =  mlir-opt %s \
+// DEFINE:    -transform-interpreter -test-transform-dialect-erase-schedule \
+// DEFINE:    -one-shot-bufferize="bufferize-function-boundaries" -lower-vector-mask -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \
+// DEFINE:    -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm -o %t
+// DEFINE: %{entry_point} = conv
+// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\
+// DEFINE:    -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils
+
+// RUN: %{compile} | %{run} | FileCheck %s
+
+func.func @conv() {
+  // Define input/output tensors
+  %input_init = tensor.empty() : tensor<1x8x6xi32>
+  %output_init = tensor.empty() : tensor<1x7x6xi32>
+
+  %five = arith.constant 5 : i32
+  %zero = arith.constant 0 : i32
+  %input = linalg.fill ins(%five : i32) outs(%input_init : tensor<1x8x6xi32>) -> tensor<1x8x6xi32>
+  %output = linalg.fill ins(%zero : i32) outs(%output_init : tensor<1x7x6xi32>) -> tensor<1x7x6xi32>
+
+  // Define the filter tensor
+  %filter = arith.constant dense<[
+    [ 1,  2, 3, 4, 5, 6],
+    [ 11, 12, 13, 14, 15, 16]
+  ]> : tensor<2x6xi32>
+
+  // static sizes -> dynamic sizes
+  %input_dyn = tensor.cast %input_init : tensor<1x8x6xi32> to tensor<1x8x?xi32>
+  %output_dyn = tensor.cast %output : tensor<1x7x6xi32> to tensor<1x7x?xi32>
+  %filter_dyn = tensor.cast %filter : tensor<2x6xi32> to tensor<2x?xi32>
+
+  // Run the convolution
+  %res = linalg.depthwise_conv_1d_nwc_wc
+    ins(%input_dyn, %filter_dyn : tensor<1x8x?xi32>, tensor<2x?xi32>)
+    outs(%output_dyn : tensor<1x7x?xi32>) -> tensor<1x7x?xi32>
+
+  // Print the results
+  // CHECK: SVE: START OF TEST OUTPUT
+  vector.print str "SVE: START OF TEST OUTPUT\n"
+
+  // CHECK-NEXT: Unranked Memref base@ = {{.*}} rank = 3 offset = 0 sizes = [1, 7, 6] strides = [42, 6, 1] data =
+  // CHECK-COUNT-7: [60, 70, 80, 90, 100, 110]
+  %xf = tensor.cast %res : tensor<1x7x?xi32> to tensor<*xi32>
+  call @printMemrefI32(%xf) : (tensor<*xi32>) -> ()
+
+  // CHECK-NEXT: SVE: END OF TEST OUTPUT
+  vector.print str "SVE: END OF TEST OUTPUT\n"
+
+  return
+}
+
+module attributes {transform.with_named_sequence} {
+  transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+    %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+    transform.structured.vectorize %0 vector_sizes [1, 7, [8], 2] : !transform.any_op
+    transform.yield
+  }
+}
+
+func.func private @printMemrefI32(%ptr : tensor<*xi32>) attributes { llvm.emit_c_interface }


        


More information about the Mlir-commits mailing list