[Mlir-commits] [mlir] b301a98 - [mlir][spirv] Add integration tests for `vector.interleave` and `vector.shuffle` (#93858)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Mon Jun 3 07:12:43 PDT 2024
Author: Angel Zhang
Date: 2024-06-03T10:12:39-04:00
New Revision: b301a98b33d75813d73838c1bd4c47024d044af6
URL: https://github.com/llvm/llvm-project/commit/b301a98b33d75813d73838c1bd4c47024d044af6
DIFF: https://github.com/llvm/llvm-project/commit/b301a98b33d75813d73838c1bd4c47024d044af6.diff
LOG: [mlir][spirv] Add integration tests for `vector.interleave` and `vector.shuffle` (#93858)
This PR tries to reland #93595 which was reverted in #93732 due to some
issues. The original PR:
- Add integration test for `vector.shuffle` and `vector.interleave`
- Add `VectorToSPIRV` patterns to `GPUToSPIRVPass`
Description of the issue:
-
https://github.com/llvm/llvm-project/pull/93595#issuecomment-2138541700
- Using either `vector.load` or `vector.store` in the kernel function
will cause the validation layer to report an error
- Trying to bypass the issue by using `memref.load` and `memref.store`
to load/store individual elements from/to the vectors, and populate the
vectors using `vector.insertelement` and `vector.extractelement`
instead.
Added:
mlir/test/mlir-vulkan-runner/vector-interleave.mlir
mlir/test/mlir-vulkan-runner/vector-shuffle.mlir
Modified:
mlir/lib/Conversion/GPUToSPIRV/CMakeLists.txt
mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
Removed:
################################################################################
diff --git a/mlir/lib/Conversion/GPUToSPIRV/CMakeLists.txt b/mlir/lib/Conversion/GPUToSPIRV/CMakeLists.txt
index 3deb219789f7f..7e97eeb2e2496 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/CMakeLists.txt
+++ b/mlir/lib/Conversion/GPUToSPIRV/CMakeLists.txt
@@ -13,6 +13,7 @@ add_mlir_conversion_library(MLIRGPUToSPIRV
MLIRIR
MLIRPass
MLIRSCFToSPIRV
+ MLIRVectorToSPIRV
MLIRSPIRVDialect
MLIRSPIRVConversion
MLIRSupport
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
index 1d1db913e3df2..53e73ec0d81bf 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
@@ -18,6 +18,7 @@
#include "mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h"
#include "mlir/Conversion/MemRefToSPIRV/MemRefToSPIRV.h"
#include "mlir/Conversion/SCFToSPIRV/SCFToSPIRV.h"
+#include "mlir/Conversion/VectorToSPIRV/VectorToSPIRV.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h"
@@ -132,6 +133,7 @@ void GPUToSPIRVPass::runOnOperation() {
mlir::arith::populateArithToSPIRVPatterns(typeConverter, patterns);
populateMemRefToSPIRVPatterns(typeConverter, patterns);
populateFuncToSPIRVPatterns(typeConverter, patterns);
+ populateVectorToSPIRVPatterns(typeConverter, patterns);
if (failed(applyFullConversion(gpuModule, *target, std::move(patterns))))
return signalPassFailure();
diff --git a/mlir/test/mlir-vulkan-runner/vector-interleave.mlir b/mlir/test/mlir-vulkan-runner/vector-interleave.mlir
new file mode 100644
index 0000000000000..0846d52a45b11
--- /dev/null
+++ b/mlir/test/mlir-vulkan-runner/vector-interleave.mlir
@@ -0,0 +1,79 @@
+// RUN: mlir-vulkan-runner %s \
+// RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \
+// RUN: --entry-point-result=void | FileCheck %s
+
+// CHECK: [0, 2, 1, 3]
+module attributes {
+ gpu.container_module,
+ spirv.target_env = #spirv.target_env<
+ #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
+} {
+ gpu.module @kernels {
+ gpu.func @kernel_vector_interleave(%arg0 : memref<2xi32>, %arg1 : memref<2xi32>, %arg2 : memref<4xi32>)
+ kernel attributes { spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [1, 1, 1]>} {
+ %idx0 = arith.constant 0 : index
+ %idx1 = arith.constant 1 : index
+ %idx2 = arith.constant 2 : index
+ %idx3 = arith.constant 3 : index
+ %idx4 = arith.constant 4 : index
+
+ %lhs = arith.constant dense<[0, 0]> : vector<2xi32>
+ %rhs = arith.constant dense<[0, 0]> : vector<2xi32>
+
+ %val0 = memref.load %arg0[%idx0] : memref<2xi32>
+ %val1 = memref.load %arg0[%idx1] : memref<2xi32>
+ %val2 = memref.load %arg1[%idx0] : memref<2xi32>
+ %val3 = memref.load %arg1[%idx1] : memref<2xi32>
+
+ %lhs0 = vector.insertelement %val0, %lhs[%idx0 : index] : vector<2xi32>
+ %lhs1 = vector.insertelement %val1, %lhs0[%idx1 : index] : vector<2xi32>
+ %rhs0 = vector.insertelement %val2, %rhs[%idx0 : index] : vector<2xi32>
+ %rhs1 = vector.insertelement %val3, %rhs0[%idx1 : index] : vector<2xi32>
+
+ %interleave = vector.interleave %lhs1, %rhs1 : vector<2xi32> -> vector<4xi32>
+
+ %res0 = vector.extractelement %interleave[%idx0 : index] : vector<4xi32>
+ %res1 = vector.extractelement %interleave[%idx1 : index] : vector<4xi32>
+ %res2 = vector.extractelement %interleave[%idx2 : index] : vector<4xi32>
+ %res3 = vector.extractelement %interleave[%idx3 : index] : vector<4xi32>
+
+ memref.store %res0, %arg2[%idx0]: memref<4xi32>
+ memref.store %res1, %arg2[%idx1]: memref<4xi32>
+ memref.store %res2, %arg2[%idx2]: memref<4xi32>
+ memref.store %res3, %arg2[%idx3]: memref<4xi32>
+
+ gpu.return
+ }
+ }
+
+ func.func @main() {
+ // Allocate 3 buffers.
+ %buf0 = memref.alloc() : memref<2xi32>
+ %buf1 = memref.alloc() : memref<2xi32>
+ %buf2 = memref.alloc() : memref<4xi32>
+
+ %idx0 = arith.constant 0 : index
+ %idx1 = arith.constant 1 : index
+ %idx4 = arith.constant 4 : index
+
+ // Initialize input buffer.
+ %buf0_vals = arith.constant dense<[0, 1]> : vector<2xi32>
+ %buf1_vals = arith.constant dense<[2, 3]> : vector<2xi32>
+ vector.store %buf0_vals, %buf0[%idx0] : memref<2xi32>, vector<2xi32>
+ vector.store %buf1_vals, %buf1[%idx0] : memref<2xi32>, vector<2xi32>
+
+ // Initialize output buffer.
+ %value0 = arith.constant 0 : i32
+ %buf3 = memref.cast %buf2 : memref<4xi32> to memref<?xi32>
+ call @fillResource1DInt(%buf3, %value0) : (memref<?xi32>, i32) -> ()
+
+ gpu.launch_func @kernels::@kernel_vector_interleave
+ blocks in (%idx4, %idx1, %idx1) threads in (%idx1, %idx1, %idx1)
+ args(%buf0 : memref<2xi32>, %buf1 : memref<2xi32>, %buf2 : memref<4xi32>)
+ %buf4 = memref.cast %buf3 : memref<?xi32> to memref<*xi32>
+ call @printMemrefI32(%buf4) : (memref<*xi32>) -> ()
+ return
+ }
+ func.func private @fillResource1DInt(%0 : memref<?xi32>, %1 : i32)
+ func.func private @printMemrefI32(%ptr : memref<*xi32>)
+}
diff --git a/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir b/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir
new file mode 100644
index 0000000000000..7cf53b54590bc
--- /dev/null
+++ b/mlir/test/mlir-vulkan-runner/vector-shuffle.mlir
@@ -0,0 +1,79 @@
+// RUN: mlir-vulkan-runner %s \
+// RUN: --shared-libs=%vulkan-runtime-wrappers,%mlir_runner_utils \
+// RUN: --entry-point-result=void | FileCheck %s
+
+// CHECK: [2, 1, 3, 3]
+module attributes {
+ gpu.container_module,
+ spirv.target_env = #spirv.target_env<
+ #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
+} {
+ gpu.module @kernels {
+ gpu.func @kernel_vector_shuffle(%arg0 : memref<2xi32>, %arg1 : memref<2xi32>, %arg2 : memref<4xi32>)
+ kernel attributes { spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [1, 1, 1]>} {
+ %idx0 = arith.constant 0 : index
+ %idx1 = arith.constant 1 : index
+ %idx2 = arith.constant 2 : index
+ %idx3 = arith.constant 3 : index
+ %idx4 = arith.constant 4 : index
+
+ %lhs = arith.constant dense<[0, 0]> : vector<2xi32>
+ %rhs = arith.constant dense<[0, 0]> : vector<2xi32>
+
+ %val0 = memref.load %arg0[%idx0] : memref<2xi32>
+ %val1 = memref.load %arg0[%idx1] : memref<2xi32>
+ %val2 = memref.load %arg1[%idx0] : memref<2xi32>
+ %val3 = memref.load %arg1[%idx1] : memref<2xi32>
+
+ %lhs0 = vector.insertelement %val0, %lhs[%idx0 : index] : vector<2xi32>
+ %lhs1 = vector.insertelement %val1, %lhs0[%idx1 : index] : vector<2xi32>
+ %rhs0 = vector.insertelement %val2, %rhs[%idx0 : index] : vector<2xi32>
+ %rhs1 = vector.insertelement %val3, %rhs0[%idx1 : index] : vector<2xi32>
+
+ %shuffle = vector.shuffle %lhs1, %rhs1[2, 1, 3, 3] : vector<2xi32>, vector<2xi32>
+
+ %res0 = vector.extractelement %shuffle[%idx0 : index] : vector<4xi32>
+ %res1 = vector.extractelement %shuffle[%idx1 : index] : vector<4xi32>
+ %res2 = vector.extractelement %shuffle[%idx2 : index] : vector<4xi32>
+ %res3 = vector.extractelement %shuffle[%idx3 : index] : vector<4xi32>
+
+ memref.store %res0, %arg2[%idx0]: memref<4xi32>
+ memref.store %res1, %arg2[%idx1]: memref<4xi32>
+ memref.store %res2, %arg2[%idx2]: memref<4xi32>
+ memref.store %res3, %arg2[%idx3]: memref<4xi32>
+
+ gpu.return
+ }
+ }
+
+ func.func @main() {
+ // Allocate 3 buffers.
+ %buf0 = memref.alloc() : memref<2xi32>
+ %buf1 = memref.alloc() : memref<2xi32>
+ %buf2 = memref.alloc() : memref<4xi32>
+
+ %idx0 = arith.constant 0 : index
+ %idx1 = arith.constant 1 : index
+ %idx4 = arith.constant 4 : index
+
+ // Initialize input buffer.
+ %buf0_vals = arith.constant dense<[0, 1]> : vector<2xi32>
+ %buf1_vals = arith.constant dense<[2, 3]> : vector<2xi32>
+ vector.store %buf0_vals, %buf0[%idx0] : memref<2xi32>, vector<2xi32>
+ vector.store %buf1_vals, %buf1[%idx0] : memref<2xi32>, vector<2xi32>
+
+ // Initialize output buffer.
+ %value0 = arith.constant 0 : i32
+ %buf3 = memref.cast %buf2 : memref<4xi32> to memref<?xi32>
+ call @fillResource1DInt(%buf3, %value0) : (memref<?xi32>, i32) -> ()
+
+ gpu.launch_func @kernels::@kernel_vector_shuffle
+ blocks in (%idx4, %idx1, %idx1) threads in (%idx1, %idx1, %idx1)
+ args(%buf0 : memref<2xi32>, %buf1 : memref<2xi32>, %buf2 : memref<4xi32>)
+ %buf4 = memref.cast %buf3 : memref<?xi32> to memref<*xi32>
+ call @printMemrefI32(%buf4) : (memref<*xi32>) -> ()
+ return
+ }
+ func.func private @fillResource1DInt(%0 : memref<?xi32>, %1 : i32)
+ func.func private @printMemrefI32(%ptr : memref<*xi32>)
+}
More information about the Mlir-commits
mailing list