[Mlir-commits] [mlir] c9c2444 - [mlir][spirv] Add integration test for `vector.interleave` and `vector.shuffle` (#93595)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed May 29 09:19:37 PDT 2024


Author: Angel Zhang
Date: 2024-05-29T12:19:32-04:00
New Revision: c9c244423ffb8071bb838c3606052e12af537047

URL: https://github.com/llvm/llvm-project/commit/c9c244423ffb8071bb838c3606052e12af537047
DIFF: https://github.com/llvm/llvm-project/commit/c9c244423ffb8071bb838c3606052e12af537047.diff

LOG: [mlir][spirv] Add integration test for `vector.interleave` and `vector.shuffle` (#93595)

- Add integration test for `vector.shuffle` and `vector.interleave`,
mentioned in issue #91978
- Add `VectorToSPIRV` patterns to `GPUToSPIRVPass`

---------

Co-authored-by: Jakub Kuderski <kubakuderski at gmail.com>

Added: 
    mlir/test/mlir-vulkan-runner/vector-interleave.mlir
    mlir/test/mlir-vulkan-runner/vector-shuffle.mlir

Modified: 
    mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
index 1d1db913e3df2..53e73ec0d81bf 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
@@ -18,6 +18,7 @@
 #include "mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h"
 #include "mlir/Conversion/MemRefToSPIRV/MemRefToSPIRV.h"
 #include "mlir/Conversion/SCFToSPIRV/SCFToSPIRV.h"
+#include "mlir/Conversion/VectorToSPIRV/VectorToSPIRV.h"
 #include "mlir/Dialect/Func/IR/FuncOps.h"
 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
 #include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h"
@@ -132,6 +133,7 @@ void GPUToSPIRVPass::runOnOperation() {
     mlir::arith::populateArithToSPIRVPatterns(typeConverter, patterns);
     populateMemRefToSPIRVPatterns(typeConverter, patterns);
     populateFuncToSPIRVPatterns(typeConverter, patterns);
+    populateVectorToSPIRVPatterns(typeConverter, patterns);
 
     if (failed(applyFullConversion(gpuModule, *target, std::move(patterns))))
       return signalPassFailure();

diff  --git a/mlir/test/mlir-vulkan-runner/vector-interleave.mlir b/mlir/test/mlir-vulkan-runner/vector-interleave.mlir
new file mode 100644
index 0000000000000..2f5c319e2f5c5
--- /dev/null
+++ b/mlir/test/mlir-vulkan-runner/vector-interleave.mlir
@@ -0,0 +1,53 @@
+// RUN: mlir-vulkan-runner %s \
+// RUN:  --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \
+// RUN:  --entry-point-result=void | FileCheck %s
+
+// CHECK: [0, 2, 1, 3]
+module attributes {
+  gpu.container_module,
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
+} {
+  gpu.module @kernels {
+    gpu.func @kernel_vector_interleave(%arg0 : memref<2xi32>, %arg1 : memref<2xi32>, %arg2 : memref<4xi32>)
+      kernel attributes { spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [1, 1, 1]>} {
+      %c0 = arith.constant 0 : index
+      %vec0 = vector.load %arg0[%c0] : memref<2xi32>, vector<2xi32>
+      %vec1 = vector.load %arg1[%c0] : memref<2xi32>, vector<2xi32>
+      %result = vector.interleave %vec0, %vec1 : vector<2xi32> -> vector<4xi32>
+      vector.store %result, %arg2[%c0] : memref<4xi32>, vector<4xi32>
+      gpu.return
+    }
+  }
+
+  func.func @main() {
+    // Allocate 3 buffers.
+    %buf0 = memref.alloc() : memref<2xi32>
+    %buf1 = memref.alloc() : memref<2xi32>
+    %buf2 = memref.alloc() : memref<4xi32>
+    
+    %idx0 = arith.constant 0 : index
+    %idx1 = arith.constant 1 : index
+    %idx4 = arith.constant 4 : index
+
+    // Initialize input buffer.
+    %buf0_vals = arith.constant dense<[0, 1]> : vector<2xi32>
+    %buf1_vals = arith.constant dense<[2, 3]> : vector<2xi32>
+    vector.store %buf0_vals, %buf0[%idx0] : memref<2xi32>, vector<2xi32>
+    vector.store %buf1_vals, %buf1[%idx0] : memref<2xi32>, vector<2xi32>
+
+    // Initialize output buffer.
+    %value0 = arith.constant 0 : i32
+    %buf3 = memref.cast %buf2 : memref<4xi32> to memref<?xi32>
+    call @fillResource1DInt(%buf3, %value0) : (memref<?xi32>, i32) -> ()
+
+    gpu.launch_func @kernels::@kernel_vector_interleave
+        blocks in (%idx4, %idx1, %idx1) threads in (%idx1, %idx1, %idx1)
+        args(%buf0 : memref<2xi32>, %buf1 : memref<2xi32>, %buf2 : memref<4xi32>)
+    %buf4 = memref.cast %buf3 : memref<?xi32> to memref<*xi32>
+    call @printMemrefI32(%buf4) : (memref<*xi32>) -> ()
+    return
+  }
+  func.func private @fillResource1DInt(%0 : memref<?xi32>, %1 : i32)
+  func.func private @printMemrefI32(%ptr : memref<*xi32>)
+}

diff  --git a/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir b/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir
new file mode 100644
index 0000000000000..e29e054ccd46b
--- /dev/null
+++ b/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir
@@ -0,0 +1,53 @@
+// RUN: mlir-vulkan-runner %s \
+// RUN:  --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \
+// RUN:  --entry-point-result=void | FileCheck %s
+
+// CHECK: [2, 1, 3]
+module attributes {
+  gpu.container_module,
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
+} {
+  gpu.module @kernels {
+    gpu.func @kernel_vector_shuffle(%arg0 : memref<2xi32>, %arg1 : memref<2xi32>, %arg2 : memref<3xi32>)
+      kernel attributes { spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [1, 1, 1]>} {
+      %c0 = arith.constant 0 : index
+      %vec0 = vector.load %arg0[%c0] : memref<2xi32>, vector<2xi32>
+      %vec1 = vector.load %arg1[%c0] : memref<2xi32>, vector<2xi32>
+      %result = vector.shuffle %vec0, %vec1[2, 1, 3] : vector<2xi32>, vector<2xi32>
+      vector.store %result, %arg2[%c0] : memref<3xi32>, vector<3xi32>
+      gpu.return
+    }
+  }
+
+  func.func @main() {
+    // Allocate 3 buffers.
+    %buf0 = memref.alloc() : memref<2xi32>
+    %buf1 = memref.alloc() : memref<2xi32>
+    %buf2 = memref.alloc() : memref<3xi32>
+    
+    %idx0 = arith.constant 0 : index
+    %idx1 = arith.constant 1 : index
+    %idx4 = arith.constant 4 : index
+
+    // Initialize input buffer
+    %buf0_vals = arith.constant dense<[0, 1]> : vector<2xi32>
+    %buf1_vals = arith.constant dense<[2, 3]> : vector<2xi32>
+    vector.store %buf0_vals, %buf0[%idx0] : memref<2xi32>, vector<2xi32>
+    vector.store %buf1_vals, %buf1[%idx0] : memref<2xi32>, vector<2xi32>
+
+    // Initialize output buffer.
+    %value0 = arith.constant 0 : i32
+    %buf3 = memref.cast %buf2 : memref<3xi32> to memref<?xi32>
+    call @fillResource1DInt(%buf3, %value0) : (memref<?xi32>, i32) -> ()
+
+    gpu.launch_func @kernels::@kernel_vector_shuffle
+        blocks in (%idx4, %idx1, %idx1) threads in (%idx1, %idx1, %idx1)
+        args(%buf0 : memref<2xi32>, %buf1 : memref<2xi32>, %buf2 : memref<3xi32>)
+    %buf4 = memref.cast %buf3 : memref<?xi32> to memref<*xi32>
+    call @printMemrefI32(%buf4) : (memref<*xi32>) -> ()
+    return
+  }
+  func.func private @fillResource1DInt(%0 : memref<?xi32>, %1 : i32)
+  func.func private @printMemrefI32(%ptr : memref<*xi32>)
+}


        


More information about the Mlir-commits mailing list