[Mlir-commits] [mlir] [SCFToGPU] Convert scf.parallel+scf.reduce to gpu.all_reduce (PR #122782)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Mon Jan 13 12:08:12 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-gpu

@llvm/pr-subscribers-mlir

Author: Tuomas Kärnä (tkarna)

<details>
<summary>Changes</summary>

Support reductions in SCFToGPU: `scf.parallel` and `scf.reduce` op combination is now converted to a `gpu.all_reduce` op.

Example: 1D reduction loop, mapped to gpu block_x (first block only) and thread_x:

```mlir
%c1 = arith.constant 1 : index
%c64 = arith.constant 64 : index
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
scf.parallel (%arg1) = (%c0) to (%c1) step (%c1) {
  %0 = scf.parallel (%arg2) = (%c0) to (%c64) step (%c1) init (%cst) -> f32 {
    %1 = memref.load %alloc_0[%arg2] : memref<64xf32>
    scf.reduce(%1 : f32) {
    ^bb0(%arg3: f32, %arg4: f32):
      %2 = arith.addf %arg3, %arg4 : f32
      scf.reduce.return %2 : f32
    }
  } {mapping = [#gpu.loop_dim_map<processor = thread_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
  memref.store %0, %alloc[] : memref<f32>
  scf.reduce
} {mapping = [#gpu.loop_dim_map<processor = block_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
```

After `convert-parallel-loops-to-gpu` pass, the outer `scf.parallel` is converted to a `gpu.launch` op while the inner one is converted to a `gpu.all_reduce`:

```mlir
#map = affine_map<(d0)[s0, s1] -> ((d0 - s0) ceildiv s1)>
#map1 = affine_map<(d0)[s0, s1] -> (d0 * s0 + s1)>
%c1 = arith.constant 1 : index
%c64 = arith.constant 64 : index
%c0 = arith.constant 0 : index
%cst = arith.constant 0.000000e+00 : f32
%0 = affine.apply #map(%c1)[%c0, %c1]
%1 = affine.apply #map(%c64)[%c0, %c1]
gpu.launch blocks(%arg0, %arg1, %arg2) in (%arg6 = %0, %arg7 = %c1, %arg8 = %c1)
    threads(%arg3, %arg4, %arg5) in (%arg9 = %1, %arg10 = %c1, %arg11 = %c1) {
  %2 = affine.apply #map1(%arg0)[%c1, %c0]
  %3 = affine.apply #map1(%arg3)[%c1, %c0]
  %4 = memref.load %alloc_0[%3] : memref<64xf32>
  %5 = gpu.all_reduce  %4 {
  ^bb0(%arg12: f32, %arg13: f32):
    %6 = arith.addf %arg12, %arg13 : f32
    gpu.yield %6 : f32
  } : (f32) -> f32
  memref.store %5, %alloc[] : memref<f32>
  gpu.terminator
} {SCFToGPU_visited}
```


---
Full diff: https://github.com/llvm/llvm-project/pull/122782.diff


2 Files Affected:

- (modified) mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp (+31-2) 
- (modified) mlir/test/Conversion/SCFToGPU/parallel_loop.mlir (+147) 


``````````diff
diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index dece254c325fcd..ea2f1db244537f 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -408,8 +408,8 @@ static LogicalResult processParallelLoop(
   ArrayAttr mapping =
       parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName());
 
-  // TODO: Support reductions.
-  if (!mapping || parallelOp.getNumResults() != 0)
+  // TODO: Support multiple reductions.
+  if (!mapping || parallelOp.getNumResults() > 1)
     return failure();
 
   Location loc = parallelOp.getLoc();
@@ -556,6 +556,11 @@ static LogicalResult processParallelLoop(
 
   Block *body = parallelOp.getBody();
   worklist.reserve(worklist.size() + body->getOperations().size());
+  // Include scf.reduce terminator if exists and has an operand.
+  if (auto terminator = body->getTerminator();
+      isa<scf::ReduceOp>(terminator) && terminator->getOperands().size() == 1) {
+    worklist.push_back(terminator);
+  }
   for (Operation &op : llvm::reverse(body->without_terminator()))
     worklist.push_back(&op);
   return success();
@@ -648,6 +653,30 @@ ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
       rewriter.setInsertionPointAfter(parent);
       leftNestingScope = true;
       seenSideeffects = false;
+    } else if (auto reduceOp = dyn_cast<scf::ReduceOp>(op)) {
+      // Convert scf.reduction op
+      auto parentLoop = op->getParentOfType<ParallelOp>();
+      if (!parentLoop || op->getOperands().size() != 1) {
+        return failure();
+      }
+      auto operand = op->getOperands().front();
+      auto newValue = cloningMap.lookupOrNull(operand);
+      if (!newValue) {
+        return failure();
+      }
+      // Replace by gpu.all_reduce.
+      auto gpuRedOp = rewriter.create<gpu::AllReduceOp>(loc, newValue);
+      cloningMap.map(parentLoop->getResult(0), gpuRedOp.getResult());
+      // Copy region.
+      rewriter.inlineRegionBefore(reduceOp.getRegion(0), gpuRedOp.getRegion(),
+                                  gpuRedOp.getRegion().begin());
+      // Replace src.reduce.return with gpu.yield.
+      auto scfReturn = gpuRedOp.getRegion().front().getTerminator();
+      auto ip = rewriter.saveInsertionPoint();
+      rewriter.setInsertionPointToEnd(&gpuRedOp.getRegion().front());
+      rewriter.replaceOpWithNewOp<gpu::YieldOp>(
+          scfReturn, scfReturn->getOperands().front());
+      rewriter.restoreInsertionPoint(ip);
     } else {
       // Otherwise we copy it over.
       Operation *clone = rewriter.clone(*op, cloningMap);
diff --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index 59441e5ed66290..e5cafde39df1f0 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -428,3 +428,150 @@ func.func @step_invariant() {
 // CHECK: %[[rhs:.*]] = memref.load %[[alloc_1]][%[[dim0]], %[[dim1]]] : memref<1x1xf64>
 // CHECK: %[[sum:.*]] = arith.addf %[[lhs]], %[[rhs]] : f64
 // CHECK: memref.store %[[sum]], %[[alloc_0]][%[[dim0]], %[[dim1]]] : memref<1x1xf64>
+
+// -----
+
+// 1-d parallel reduction mapped to block.x and thread.x.
+
+// CHECK-LABEL: @parallel_reduction_1d
+func.func @parallel_reduction_1d() {
+  %alloc = memref.alloc() : memref<f32>
+  %alloc_0 = memref.alloc() : memref<64xf32>
+  %c1 = arith.constant 1 : index
+  %c64 = arith.constant 64 : index
+  %c0 = arith.constant 0 : index
+  %cst = arith.constant 0.000000e+00 : f32
+  scf.parallel (%arg1) = (%c0) to (%c1) step (%c1) {
+    %0 = scf.parallel (%arg2) = (%c0) to (%c64) step (%c1) init (%cst) -> f32 {
+      %1 = memref.load %alloc_0[%arg2] : memref<64xf32>
+      scf.reduce(%1 : f32) {
+      ^bb0(%arg3: f32, %arg4: f32):
+        %2 = arith.addf %arg3, %arg4 : f32
+        scf.reduce.return %2 : f32
+      }
+    } {mapping = [#gpu.loop_dim_map<processor = thread_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+    memref.store %0, %alloc[] : memref<f32>
+    scf.reduce 
+  } {mapping = [#gpu.loop_dim_map<processor = block_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+  memref.dealloc %alloc : memref<f32>
+  memref.dealloc %alloc_0 : memref<64xf32>
+  return
+}
+
+// CHECK: %[[alloc_0:.*]] = memref.alloc() : memref<f32>
+// CHECK: %[[alloc_1:.*]] = memref.alloc() : memref<64xf32>
+// CHECK: %[[map_0:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: %[[map_1:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: gpu.launch
+// CHECK-SAME: blocks(%[[arg_0:.*]], %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[map_0]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
+// CHECK-SAME: threads(%[[arg_3:.*]], %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[map_1]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
+// CHECK-NEXT: %[[dim0:.*]] = affine.apply #map1(%[[arg_0]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[dim1:.*]] = affine.apply #map1(%[[arg_3]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[src:.*]] = memref.load %[[alloc_1]][%[[dim1]]] : memref<64xf32>
+// CHECK-NEXT: %[[res:.*]] = gpu.all_reduce %[[src]] {
+// CHECK-NEXT: ^bb0(%[[arg12:.*]]: f32, %[[arg13:.*]]: f32):
+// CHECK-NEXT: %[[sum:.*]] = arith.addf %[[arg12]], %[[arg13]] : f32
+// CHECK-NEXT: gpu.yield %[[sum]] : f32
+// CHECK-NEXT: } : (f32) -> f32
+// CHECK-NEXT: memref.store %[[res]], %[[alloc_0]][] : memref<f32>
+
+// -----
+
+// 2-d parallel reduction mapped to block.x and thread.x and thread.y.
+
+// CHECK-LABEL: @parallel_reduction_2d
+func.func @parallel_reduction_2d() {
+  %alloc = memref.alloc() : memref<f32>
+  %alloc_0 = memref.alloc() : memref<8x8xf32>
+  %c1 = arith.constant 1 : index
+  %c8 = arith.constant 8 : index
+  %c0 = arith.constant 0 : index
+  %cst = arith.constant 0.000000e+00 : f32
+  scf.parallel (%arg1) = (%c0) to (%c1) step (%c1) {
+    %0 = scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%c8, %c8) step (%c1, %c1) init (%cst) -> f32 {
+      %1 = memref.load %alloc_0[%arg2, %arg3] : memref<8x8xf32>
+      scf.reduce(%1 : f32) {
+      ^bb0(%arg4: f32, %arg5: f32):
+        %2 = arith.addf %arg4, %arg5 : f32
+        scf.reduce.return %2 : f32
+      }
+    } {mapping = [#gpu.loop_dim_map<processor = thread_x, map = (d0) -> (d0), bound = (d0) -> (d0)>, #gpu.loop_dim_map<processor = thread_y, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+    memref.store %0, %alloc[] : memref<f32>
+    scf.reduce 
+  } {mapping = [#gpu.loop_dim_map<processor = block_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+  memref.dealloc %alloc : memref<f32>
+  memref.dealloc %alloc_0 : memref<8x8xf32>
+  return
+}
+
+// CHECK: %[[alloc_0:.*]] = memref.alloc() : memref<f32>
+// CHECK: %[[alloc_1:.*]] = memref.alloc() : memref<8x8xf32>
+// CHECK: %[[map_0:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: %[[map_1:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: %[[map_2:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: gpu.launch
+// CHECK-SAME: blocks(%[[arg_0:.*]], %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[map_0]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
+// CHECK-SAME: threads(%[[arg_3:.*]], %[[arg_4:.*]], %{{[^)]*}}) in (%{{[^)]*}} = %[[map_1]], %{{[^)]*}} = %[[map_2]], %{{[^)]*}} = %{{[^)]*}})
+// CHECK-NEXT: %[[dim0:.*]] = affine.apply #map1(%[[arg_0]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[dim1:.*]] = affine.apply #map1(%[[arg_3]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[dim2:.*]] = affine.apply #map1(%[[arg_4]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[src:.*]] = memref.load %[[alloc_1]][%[[dim1]], %[[dim2]]] : memref<8x8xf32>
+// CHECK-NEXT: %[[res:.*]] = gpu.all_reduce %[[src]] {
+// CHECK-NEXT: ^bb0(%[[arg12:.*]]: f32, %[[arg13:.*]]: f32):
+// CHECK-NEXT: %[[sum:.*]] = arith.addf %[[arg12]], %[[arg13]] : f32
+// CHECK-NEXT: gpu.yield %[[sum]] : f32
+// CHECK-NEXT: } : (f32) -> f32
+// CHECK-NEXT: memref.store %[[res]], %[[alloc_0]][] : memref<f32>
+
+// -----
+
+// tiled 1-d parallel reduction mapped to block.x and thread.x.
+
+// CHECK-LABEL: @parallel_reduction_1d_tiled
+func.func @parallel_reduction_1d_tiled() {
+  %c128 = arith.constant 128 : index
+  %c1 = arith.constant 1 : index
+  %c64 = arith.constant 64 : index
+  %c0 = arith.constant 0 : index
+  %cst = arith.constant 0.000000e+00 : f32
+  %alloc_0 = memref.alloc() : memref<8192xf32>
+  %alloc_1 = memref.alloc() : memref<64xf32>
+  scf.parallel (%arg1) = (%c0) to (%c64) step (%c1) {
+    %subview = memref.subview %alloc_1[%arg1] [1] [1] : memref<64xf32> to memref<f32, strided<[], offset: ?>>
+    %0 = affine.apply affine_map<(d0) -> (d0 * 128)>(%arg1)
+    %subview_1 = memref.subview %alloc_0[%0] [128] [1] : memref<8192xf32> to memref<128xf32, strided<[1], offset: ?>>
+    %1 = scf.parallel (%arg2) = (%c0) to (%c128) step (%c1) init (%cst) -> f32 {
+      %2 = memref.load %subview_1[%arg2] : memref<128xf32, strided<[1], offset: ?>>
+      scf.reduce(%2 : f32) {
+      ^bb0(%arg3: f32, %arg4: f32):
+        %3 = arith.addf %arg3, %arg4 : f32
+        scf.reduce.return %3 : f32
+      }
+    } {mapping = [#gpu.loop_dim_map<processor = thread_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+    memref.store %1, %subview[] : memref<f32, strided<[], offset: ?>>
+    scf.reduce 
+  } {mapping = [#gpu.loop_dim_map<processor = block_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+  memref.dealloc %alloc_0 : memref<8192xf32>
+  memref.dealloc %alloc_1 : memref<64xf32>
+  return
+}
+
+// CHECK: %[[alloc_0:.*]] = memref.alloc() : memref<8192xf32>
+// CHECK: %[[alloc_1:.*]] = memref.alloc() : memref<64xf32>
+// CHECK: %[[map_0:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: %[[map_1:.*]] = affine.apply #map({{.*}})[{{.*}}, {{.*}}]
+// CHECK: gpu.launch
+// CHECK-SAME: blocks(%[[arg_0:.*]], %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[map_0]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
+// CHECK-SAME: threads(%[[arg_3:.*]], %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[map_1]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}})
+// CHECK-NEXT: %[[dim0:.*]] = affine.apply #map1(%[[arg_0]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[dst:.*]] = memref.subview %[[alloc_1]][%[[dim0]]] [1] [1] : memref<64xf32>
+// CHECK-NEXT: %[[dim1:.*]] = affine.apply #map2(%[[dim0]])
+// CHECK-NEXT: %[[tile:.*]] = memref.subview %[[alloc_0]][%[[dim1]]] [128] [1] : memref<8192xf32>
+// CHECK-NEXT: %[[dim2:.*]] = affine.apply #map1(%[[arg_3]])[{{.*}}, {{.*}}]
+// CHECK-NEXT: %[[src:.*]] = memref.load %[[tile]][%[[dim2]]] : memref<128xf32, strided<[1], offset: ?>>
+// CHECK-NEXT: %[[res:.*]] = gpu.all_reduce %[[src]] {
+// CHECK-NEXT: ^bb0(%[[arg12:.*]]: f32, %[[arg13:.*]]: f32):
+// CHECK-NEXT: %[[sum:.*]] = arith.addf %[[arg12]], %[[arg13]] : f32
+// CHECK-NEXT: gpu.yield %[[sum]] : f32
+// CHECK-NEXT: } : (f32) -> f32
+// CHECK-NEXT: memref.store %[[res]], %[[dst]][] : memref<f32, strided<[], offset: ?>>

``````````

</details>


https://github.com/llvm/llvm-project/pull/122782


More information about the Mlir-commits mailing list