[Mlir-commits] [mlir] 9dad32c - [mlir][nvgpu] Improve finding module Op to for `mbarrier.create`

Guray Ozen llvmlistbot at llvm.org
Fri Jul 21 01:36:50 PDT 2023


Author: Guray Ozen
Date: 2023-07-21T10:36:45+02:00
New Revision: 9dad32cb9094e3e3da5d30bfe2aa2d5cf2b066a5

URL: https://github.com/llvm/llvm-project/commit/9dad32cb9094e3e3da5d30bfe2aa2d5cf2b066a5
DIFF: https://github.com/llvm/llvm-project/commit/9dad32cb9094e3e3da5d30bfe2aa2d5cf2b066a5.diff

LOG: [mlir][nvgpu] Improve finding module Op to for `mbarrier.create`

Current transformation expects module op to be two level higher, however, it is not always the case. This work searches module op in a while loop.

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D155825

Added: 
    

Modified: 
    mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
    mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 512f3ce9ecb7b1..3e10ba59ddb2f2 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -744,14 +744,13 @@ struct NVGPUMBarrierCreateLowering
   matchAndRewrite(nvgpu::MBarrierCreateOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     Operation *funcOp = op->getParentOp();
-    Operation *mOp = funcOp->getParentOp();
     MemRefType barrierType =
         createMBarrierMemrefType(rewriter, op.getBarrier().getType());
 
     memref::GlobalOp global;
-    if (auto moduleOp = dyn_cast<gpu::GPUModuleOp>(mOp))
+    if (auto moduleOp = funcOp->getParentOfType<gpu::GPUModuleOp>())
       global = generateGlobalBarrier(rewriter, funcOp, moduleOp, barrierType);
-    else if (auto moduleOp = dyn_cast<ModuleOp>(mOp))
+    else if (auto moduleOp = funcOp->getParentOfType<ModuleOp>())
       global = generateGlobalBarrier(rewriter, funcOp, moduleOp, barrierType);
 
     rewriter.setInsertionPoint(op);

diff  --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index f22a9e7ed60aef..73e1d5d3cf0513 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -635,3 +635,18 @@ func.func @async_tma_load(%tensorMap1d: !tensorMap1d, %tensorMap2d: !tensorMap2d
   func.return 
 }
 
+// -----
+
+!barrierType = !nvgpu.mbarrier.barrier<memorySpace = #gpu.address_space<workgroup>>
+module @find_parent{
+  func.func @main()  {
+  %c1 = arith.constant 1 : index   
+    gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
+          threads(%tx, %ty, %tz) in (%block_x = %c1, %block_y = %c1, %block_z = %c1) {
+          // CHECK: memref.get_global @__mbarrier : memref<1xi64, 3>
+          %barrier = nvgpu.mbarrier.create -> !barrierType
+          gpu.terminator
+    }
+    func.return 
+  }
+}


        


More information about the Mlir-commits mailing list