[Mlir-commits] [mlir] [MLIR][XeGPU] Add blocking pass (PR #140163)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu May 15 16:46:16 PDT 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {clang-format}-->


:warning: C/C++ code formatter, clang-format found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions h,cpp -- mlir/lib/Dialect/XeGPU/Transforms/XeGPUInstructionlize.cpp mlir/include/mlir/Dialect/XeGPU/Utils/XeGPUUtils.h mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
``````````

</details>

<details>
<summary>
View the diff from clang-format here.
</summary>

``````````diff
diff --git a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
index 023e44520..14b2b909e 100644
--- a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
+++ b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
@@ -308,8 +308,8 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(Operation *op) {
   { // perform the conversion from RankedTensorType to VectorType based on the
     // LayoutAttr
     auto computeTileShapeAndCount = [&](ArrayRef<int64_t> shape,
-                                          DenseI32ArrayAttr sgDataAttr,
-                                          DenseI32ArrayAttr sgLayoutAttr) {
+                                        DenseI32ArrayAttr sgDataAttr,
+                                        DenseI32ArrayAttr sgLayoutAttr) {
       SmallVector<int64_t> tileShape;
       auto sgLayout = llvm::to_vector_of<int64_t>(sgLayoutAttr.asArrayRef());
       if (sgDataAttr)
@@ -317,7 +317,8 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(Operation *op) {
       else
         tileShape = computeShapeRatio(shape, sgLayout).value_or(tileShape);
       assert(tileShape.size() && "failed to compute tileShape");
-      SmallVector<int64_t> distUnit = computeElementwiseMul(sgLayout, tileShape);
+      SmallVector<int64_t> distUnit =
+          computeElementwiseMul(sgLayout, tileShape);
       int count = computeProduct(shape) / computeProduct(distUnit);
       return std::make_pair(tileShape, count);
     };
@@ -341,7 +342,8 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(Operation *op) {
             if (layout.isWgLayout()) {
               // for WgToSg, the subShape is either from sgData or computed as
               // shape/sgLayout
-              std::tie(subShape, count) = computeTileShapeAndCount(shape, layout.getSgData(), layout.getSgLayout());
+              std::tie(subShape, count) = computeTileShapeAndCount(
+                  shape, layout.getSgData(), layout.getSgLayout());
             } else if (DenseI32ArrayAttr instData = layout.getInstData()) {
               // for unrolling, the subShape is determined by inst_data
               subShape = llvm::to_vector_of<int64_t>(instData.asArrayRef());
@@ -371,7 +373,8 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(Operation *op) {
             if (layout.isWgLayout()) {
               // for WgToSg, the subShape is either from sgData or computed as
               // shape/sgLayout
-              std::tie(subShape, count) = computeTileShapeAndCount(shape, layout.getSgData(), layout.getSgLayout());
+              std::tie(subShape, count) = computeTileShapeAndCount(
+                  shape, layout.getSgData(), layout.getSgLayout());
               layout = layout.dropSgLayoutAndData();
             } else if (DenseI32ArrayAttr instData = layout.getInstData()) {
               // for unrolling, the subShape is determined by inst_data
@@ -390,7 +393,7 @@ void xegpu::doSCFStructuralTypeConversionWithTensorType(Operation *op) {
 
     converter.addSourceMaterialization(materializeCast);
     converter.addTargetMaterialization([&](OpBuilder &builder, TypeRange type,
-                                        ValueRange inputs, Location loc) {
+                                           ValueRange inputs, Location loc) {
       return builder.create<UnrealizedConversionCastOp>(loc, type, inputs)
           .getResults();
     });

``````````

</details>


https://github.com/llvm/llvm-project/pull/140163


More information about the Mlir-commits mailing list