[Mlir-commits] [mlir] 2679d8d - [mlir][vulkan-runner] Add test to time a large vector add

Lei Zhang llvmlistbot at llvm.org
Wed Mar 25 16:02:53 PDT 2020


Author: Lei Zhang
Date: 2020-03-25T19:02:29-04:00
New Revision: 2679d8dc7e2c726cb348b020ec61adffd95beaaa

URL: https://github.com/llvm/llvm-project/commit/2679d8dc7e2c726cb348b020ec61adffd95beaaa
DIFF: https://github.com/llvm/llvm-project/commit/2679d8dc7e2c726cb348b020ec61adffd95beaaa.diff

LOG: [mlir][vulkan-runner] Add test to time a large vector add

Summary:
The test performs add on vector<16384xf32> with
number of workgroups = (128, 1, 1)
local workgroup size = (128, 1, 1)

On a NVIDIA Quadro P1000, I see the following results:

Command buffer submit time: 13us
Compute shader execution time: 19.616us

Differential Revision: https://reviews.llvm.org/D76799

Added: 
    mlir/test/mlir-vulkan-runner/time.mlir

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/mlir/test/mlir-vulkan-runner/time.mlir b/mlir/test/mlir-vulkan-runner/time.mlir
new file mode 100644
index 000000000000..f69b4feec37f
--- /dev/null
+++ b/mlir/test/mlir-vulkan-runner/time.mlir
@@ -0,0 +1,57 @@
+// RUN: mlir-vulkan-runner %s --shared-libs=%vulkan_wrapper_library_dir/libvulkan-runtime-wrappers%shlibext,%linalg_test_lib_dir/libmlir_runner_utils%shlibext --entry-point-result=void | FileCheck %s
+
+// CHECK: Compute shader execution time
+// CHECK: Command buffer submit time
+// CHECK: Wait idle time
+
+module attributes {
+  gpu.container_module,
+  spv.target_env = #spv.target_env<
+    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
+    {max_compute_workgroup_invocations = 128 : i32,
+     max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+} {
+  gpu.module @kernels {
+    gpu.func @kernel_add(%arg0 : memref<16384xf32>, %arg1 : memref<16384xf32>, %arg2 : memref<16384xf32>)
+      attributes {gpu.kernel, spv.entry_point_abi = {local_size = dense<[128, 1, 1]>: vector<3xi32>}} {
+      %bid = "gpu.block_id"() {dimension = "x"} : () -> index
+      %tid = "gpu.thread_id"() {dimension = "x"} : () -> index
+      %cst = constant 128 : index
+      %b = muli %bid, %cst : index
+      %0 = addi %b, %tid : index
+      %1 = load %arg0[%0] : memref<16384xf32>
+      %2 = load %arg1[%0] : memref<16384xf32>
+      %3 = addf %1, %2 : f32
+      store %3, %arg2[%0] : memref<16384xf32>
+      gpu.return
+    }
+  }
+
+  func @main() {
+    %arg0 = alloc() : memref<16384xf32>
+    %arg1 = alloc() : memref<16384xf32>
+    %arg2 = alloc() : memref<16384xf32>
+    %0 = constant 0 : i32
+    %1 = constant 1 : i32
+    %2 = constant 2 : i32
+    %value0 = constant 0.0 : f32
+    %value1 = constant 1.1 : f32
+    %value2 = constant 2.2 : f32
+    %arg3 = memref_cast %arg0 : memref<16384xf32> to memref<?xf32>
+    %arg4 = memref_cast %arg1 : memref<16384xf32> to memref<?xf32>
+    %arg5 = memref_cast %arg2 : memref<16384xf32> to memref<?xf32>
+    call @fillResource1DFloat(%arg3, %value1) : (memref<?xf32>, f32) -> ()
+    call @fillResource1DFloat(%arg4, %value2) : (memref<?xf32>, f32) -> ()
+    call @fillResource1DFloat(%arg5, %value0) : (memref<?xf32>, f32) -> ()
+
+    %cst1 = constant 1 : index
+    %cst128 = constant 128 : index
+    "gpu.launch_func"(%cst128, %cst1, %cst1, %cst128, %cst1, %cst1, %arg0, %arg1, %arg2) { kernel = "kernel_add", kernel_module = @kernels }
+        : (index, index, index, index, index, index, memref<16384xf32>, memref<16384xf32>, memref<16384xf32>) -> ()
+    %arg6 = memref_cast %arg5 : memref<?xf32> to memref<*xf32>
+    return
+  }
+  func @fillResource1DFloat(%0 : memref<?xf32>, %1 : f32)
+  func @print_memref_f32(%ptr : memref<*xf32>)
+}
+


        


More information about the Mlir-commits mailing list