[Mlir-commits] [mlir] e552fa2 - [MLIR][GPU] Add CUDA Tensor core WMMA test

Uday Bondhugula llvmlistbot at llvm.org
Sat May 22 03:50:14 PDT 2021


Author: Navdeep Kumar
Date: 2021-05-22T16:19:36+05:30
New Revision: e552fa28da286f20f963d51dd05bd3ec278553b7

URL: https://github.com/llvm/llvm-project/commit/e552fa28da286f20f963d51dd05bd3ec278553b7
DIFF: https://github.com/llvm/llvm-project/commit/e552fa28da286f20f963d51dd05bd3ec278553b7.diff

LOG: [MLIR][GPU] Add CUDA Tensor core WMMA test

Add a test case to test the complete execution of WMMA ops on a Nvidia
GPU with tensor cores. These tests are enabled under
MLIR_RUN_CUDA_TENSOR_CORE_TESTS.

Reviewed By: bondhugula

Differential Revision: https://reviews.llvm.org/D95334

Added: 
    mlir/test/Integration/GPU/CUDA/TensorCore/lit.local.cfg
    mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
    mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir

Modified: 
    mlir/test/CMakeLists.txt
    mlir/test/lit.site.cfg.py.in

Removed: 
    


################################################################################
diff  --git a/mlir/test/CMakeLists.txt b/mlir/test/CMakeLists.txt
index 79b8d8950b4e2..07db8fd6d09e8 100644
--- a/mlir/test/CMakeLists.txt
+++ b/mlir/test/CMakeLists.txt
@@ -32,6 +32,7 @@ if (MLIR_INCLUDE_INTEGRATION_TESTS)
       "If set, arch-specific integration tests are run with Intel SDE.")
   option(MLIR_RUN_AMX_TESTS "Run AMX tests.")
   option(MLIR_RUN_X86VECTOR_TESTS "Run X86Vector tests.")
+  option(MLIR_RUN_CUDA_TENSOR_CORE_TESTS "Run CUDA Tensor core WMMA tests.")
   # Passed to lit.site.cfg.py.in to set up the path where to find the libraries.
   set(MLIR_INTEGRATION_TEST_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
 

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/lit.local.cfg b/mlir/test/Integration/GPU/CUDA/TensorCore/lit.local.cfg
new file mode 100644
index 0000000000000..fc19e5af86f11
--- /dev/null
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/lit.local.cfg
@@ -0,0 +1,5 @@
+import sys
+
+# TensorCore tests must be enabled via build flag.
+if config.mlir_run_cuda_tensor_core_tests != 'ON':
+  config.unsupported = True

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
new file mode 100644
index 0000000000000..3ead8610691f6
--- /dev/null
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
@@ -0,0 +1,94 @@
+// RUN: mlir-opt %s \
+// RUN: -gpu-kernel-outlining \
+// RUN: -pass-pipeline='gpu.module(strip-debuginfo,convert-gpu-to-nvvm{index-bitwidth=32},gpu-to-cubin{chip=sm_75})' \
+// RUN: --convert-scf-to-std -gpu-to-llvm \
+// RUN: | mlir-cpu-runner \
+// RUN:   --shared-libs=%linalg_test_lib_dir/libmlir_cuda_runtime%shlibext \
+// RUN:   --shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext \
+// RUN:   --entry-point-result=void \
+// RUN: | FileCheck %s
+// Test case to check the working of Tensor cores on Nvidia GPUs. The kernel has already
+// been outlined to prevent crashing due to introduction of an empty basic block by --gpu-
+// kernel-outling.
+module attributes {gpu.container_module}  {
+  func @main() {
+    %0 = memref.alloc() : memref<16x16xf16>
+    %22 = memref.alloc() : memref<16x16xf16>
+    %1 = memref.alloc() : memref<16x16xf32>
+
+    %f1 = constant 1.0e+00 : f16
+    %f0 = constant 0.0e+00 : f16
+    %c0 = constant 0 : index
+    %c16 = constant 16 : index
+    %c32 = constant 32 : index
+    %c1 = constant 1 : index
+
+    // Intialize the Input matrix with ones.
+    scf.for %arg0 = %c0 to %c16 step %c1 {
+      scf.for %arg1 = %c0 to %c16 step %c1 {
+        memref.store %f1, %0[%arg0, %arg1] : memref<16x16xf16>
+      }
+    }
+    // Intialize the accumulator matrix with zeros.
+    scf.for %arg0 = %c0 to %c16 step %c1 {
+      scf.for %arg1 = %c0 to %c16 step %c1 {
+        memref.store %f0, %22[%arg0, %arg1] : memref<16x16xf16>
+      }
+    }
+
+    %2 = memref.cast %0 : memref<16x16xf16> to memref<*xf16>
+    %33 = memref.cast %22 : memref<16x16xf16> to memref<*xf16>
+    %3 = memref.cast %1 : memref<16x16xf32> to memref<*xf32>
+    gpu.host_register %2 : memref<*xf16>
+    gpu.host_register %33 : memref<*xf16>
+
+    gpu.launch_func  @main_kernel::@main_kernel blocks in (%c1, %c1, %c1) threads in (%c32, %c1, %c1) args(%0 : memref<16x16xf16>, %22 : memref<16x16xf16>)
+
+    // Convert the results from f16 to f32 for printing.
+    scf.for %arg0 = %c0 to %c16 step %c1 {
+      scf.for %arg1 = %c0 to %c16 step %c1 {
+        %6 = memref.load %0[%arg0, %arg1] : memref<16x16xf16>
+        %7 = fpext %6 : f16 to f32
+        memref.store %7, %1[%arg0, %arg1] : memref<16x16xf32>
+      }
+    }
+
+    // Print the memref after computation.
+    call @print_memref_f32(%3) : (memref<*xf32>) -> ()
+    // CHECK: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16]
+    return
+  }
+
+  gpu.module @main_kernel {
+    gpu.func @main_kernel(%arg0: memref<16x16xf16>, %arg22 : memref<16x16xf16>) kernel {
+      %c0 = constant 0 : index
+
+      %0 = gpu.subgroup_mma_load_matrix %arg0[%c0, %c0] {operand = "AOp", leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "AOp">
+      %1 = gpu.subgroup_mma_load_matrix %arg0[%c0, %c0] {operand = "BOp", leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "BOp">
+      %2 = gpu.subgroup_mma_load_matrix %arg22[%c0, %c0] {operand = "COp", leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "COp">
+
+      %3 = gpu.subgroup_mma_compute %0, %1, %2 : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp">, !gpu.mma_matrix<16x16xf16, "COp"> -> !gpu.mma_matrix<16x16xf16, "DOp">
+
+      gpu.subgroup_mma_store_matrix %3, %arg0[%c0, %c0] {leadDimension = 16 : index}: !gpu.mma_matrix<16x16xf16, "DOp">, memref<16x16xf16>
+
+      gpu.return
+    }
+  }
+
+  func private @print_memref_f32(memref<*xf32>)
+}

diff  --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
new file mode 100644
index 0000000000000..154bdc90c405e
--- /dev/null
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
@@ -0,0 +1,85 @@
+// RUN: mlir-opt %s \
+// RUN: -gpu-kernel-outlining \
+// RUN: -pass-pipeline='gpu.module(strip-debuginfo,convert-gpu-to-nvvm{index-bitwidth=32},gpu-to-cubin{chip=sm_75})' \
+// RUN: --convert-scf-to-std -gpu-to-llvm \
+// RUN: | mlir-cpu-runner \
+// RUN:   --shared-libs=%linalg_test_lib_dir/libmlir_cuda_runtime%shlibext \
+// RUN:   --shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext \
+// RUN:   --entry-point-result=void \
+// RUN: | FileCheck %s
+// Test case to check the working of Tensor cores on Nvidia GPUs. The kernel has already
+// been outlined to prevent crashing due to introduction of an empty basic block by --gpu-
+// kernel-outling.
+module attributes {gpu.container_module}  {
+  func @main() {
+    %0 = memref.alloc() : memref<16x16xf16>
+    %22 = memref.alloc() : memref<16x16xf32>
+    %1 = memref.alloc() : memref<16x16xf32>
+
+    %f1 = constant 1.0e+00 : f16
+    %f0 = constant 0.0e+00 : f32
+    %c0 = constant 0 : index
+    %c16 = constant 16 : index
+    %c32 = constant 32 : index
+    %c1 = constant 1 : index
+
+    // Intialize the Input matrix with ones.
+    scf.for %arg0 = %c0 to %c16 step %c1 {
+      scf.for %arg1 = %c0 to %c16 step %c1 {
+        memref.store %f1, %0[%arg0, %arg1] : memref<16x16xf16>
+      }
+    }
+    // Intialize the accumulator matrix with zeros.
+    scf.for %arg0 = %c0 to %c16 step %c1 {
+      scf.for %arg1 = %c0 to %c16 step %c1 {
+        memref.store %f0, %22[%arg0, %arg1] : memref<16x16xf32>
+      }
+    }
+
+    %2 = memref.cast %0 : memref<16x16xf16> to memref<*xf16>
+    %33 = memref.cast %22 : memref<16x16xf32> to memref<*xf32>
+    %3 = memref.cast %1 : memref<16x16xf32> to memref<*xf32>
+    gpu.host_register %2 : memref<*xf16>
+    gpu.host_register %33 : memref<*xf32>
+
+    gpu.launch_func  @main_kernel::@main_kernel blocks in (%c1, %c1, %c1) threads in (%c32, %c1, %c1) args(%0 : memref<16x16xf16>, %22 : memref<16x16xf32>)
+
+    // Print the memref after computation.
+    call @print_memref_f32(%33) : (memref<*xf32>) -> ()
+    // CHECK: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16],
+    // CHECK-NEXT: [16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16,   16]
+    return
+  }
+
+  gpu.module @main_kernel {
+    gpu.func @main_kernel(%arg0: memref<16x16xf16>, %arg22 : memref<16x16xf32>) kernel {
+      %c0 = constant 0 : index
+
+      %0 = gpu.subgroup_mma_load_matrix %arg0[%c0, %c0] {operand = "AOp", leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "AOp">
+      %1 = gpu.subgroup_mma_load_matrix %arg0[%c0, %c0] {operand = "BOp", leadDimension = 16 : index} : memref<16x16xf16> -> !gpu.mma_matrix<16x16xf16, "BOp">
+      %2 = gpu.subgroup_mma_load_matrix %arg22[%c0, %c0] {operand = "COp", leadDimension = 16 : index} : memref<16x16xf32> -> !gpu.mma_matrix<16x16xf32, "COp">
+
+      %3 = gpu.subgroup_mma_compute %0, %1, %2 : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp">, !gpu.mma_matrix<16x16xf32, "COp"> -> !gpu.mma_matrix<16x16xf32, "DOp">
+
+      gpu.subgroup_mma_store_matrix %3, %arg22[%c0, %c0] {leadDimension = 16 : index}: !gpu.mma_matrix<16x16xf32, "DOp">, memref<16x16xf32>
+
+      gpu.return
+    }
+  }
+
+  func private @print_memref_f32(memref<*xf32>)
+}

diff  --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in
index 844707f280b9a..1cee32b189f86 100644
--- a/mlir/test/lit.site.cfg.py.in
+++ b/mlir/test/lit.site.cfg.py.in
@@ -49,6 +49,7 @@ config.mlir_integration_test_dir = "@MLIR_INTEGRATION_TEST_DIR@"
 config.intel_sde_executable = "@INTEL_SDE_EXECUTABLE@"
 config.mlir_run_amx_tests = "@MLIR_RUN_AMX_TESTS@"
 config.mlir_run_x86vector_tests = "@MLIR_RUN_X86VECTOR_TESTS@"
+config.mlir_run_cuda_tensor_core_tests = "@MLIR_RUN_CUDA_TENSOR_CORE_TESTS@"
 config.mlir_include_integration_tests = "@MLIR_INCLUDE_INTEGRATION_TESTS@"
 
 # Support substitution of the tools_dir with user parameters. This is


        


More information about the Mlir-commits mailing list