[Mlir-commits] [mlir] 641e05d - [mlir][gpu] Support dynamic_shared_memory Op with vector dialect (#74475)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Dec 6 01:42:00 PST 2023


Author: Guray Ozen
Date: 2023-12-06T10:41:57+01:00
New Revision: 641e05decc64c541aa5fd5ce253020178ffe094a

URL: https://github.com/llvm/llvm-project/commit/641e05decc64c541aa5fd5ce253020178ffe094a
DIFF: https://github.com/llvm/llvm-project/commit/641e05decc64c541aa5fd5ce253020178ffe094a.diff

LOG: [mlir][gpu] Support dynamic_shared_memory Op with vector dialect (#74475)

`gpu.dynamic_shared_memory` currently does not get lowered when it is
used with vector dialect. The reason is that vector-to-llvm conversion
is not included in gpu-to-nvvm. This PR includes that and adds a test.

Added: 
    

Modified: 
    mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
    mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index 4855fd187eb58..0e978ca0a6424 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -21,6 +21,7 @@
 #include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
 #include "mlir/Conversion/LLVMCommon/TypeConverter.h"
 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h"
+#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
 #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h"
 #include "mlir/Dialect/Func/IR/FuncOps.h"
 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
@@ -282,6 +283,7 @@ struct LowerGpuOpsToNVVMOpsPass
     populateFinalizeMemRefToLLVMConversionPatterns(converter, llvmPatterns);
     populateGpuToNVVMConversionPatterns(converter, llvmPatterns);
     populateGpuWMMAToNVVMConversionPatterns(converter, llvmPatterns);
+    populateVectorToLLVMConversionPatterns(converter, llvmPatterns);
     if (this->hasRedux)
       populateGpuSubgroupReduceOpLoweringPattern(converter, llvmPatterns);
     LLVMConversionTarget target(getContext());

diff  --git a/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir b/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir
index f8d82f533cc0b..771f3185904bb 100644
--- a/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir
@@ -49,6 +49,26 @@ gpu.module @kernel {
 
 // -----
 
+gpu.module @kernel {
+  gpu.func @dynamic_shmem_with_vector(%arg1: memref<1xf32>) {
+    %0 = arith.constant 0 : index
+    %1 = gpu.dynamic_shared_memory : memref<?xi8, #gpu.address_space<workgroup>>
+    %2 = memref.view %1[%0][] : memref<?xi8, #gpu.address_space<workgroup>> to memref<1xf32, #gpu.address_space<workgroup>>
+    %3 = vector.load %2[%0] : memref<1xf32, #gpu.address_space<workgroup>>, vector<1xf32>
+    vector.store %3, %arg1[%0] : memref<1xf32>, vector<1xf32>
+    gpu.return
+  }
+}
+
+// ROCDL: llvm.mlir.global internal @__dynamic_shmem__0() {addr_space = 3 : i32} : !llvm.array<0 x i8>
+// NVVM: llvm.mlir.global internal @__dynamic_shmem__0() {addr_space = 3 : i32, alignment = 16 : i64} : !llvm.array<0 x i8>
+// CHECK-LABEL:  llvm.func @dynamic_shmem_with_vector
+// CHECK: llvm.mlir.addressof @__dynamic_shmem__0 : !llvm.ptr<3>
+// CHECK: llvm.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr<3> -> vector<1xf32>
+// CHECK: llvm.store
+
+// -----
+
 gpu.module @kernel {
   gpu.func @dynamic_shmem(%arg0: f32)  {
     %0 = arith.constant 0 : index


        


More information about the Mlir-commits mailing list