[Mlir-commits] [mlir] [mlir][xegpu] Add initial skeleton implementation for lowering ConvertLayoutOp (PR #146176)

Chao Chen llvmlistbot at llvm.org
Thu Jul 17 08:07:58 PDT 2025


================
@@ -392,6 +392,46 @@ struct WgToSgElementwiseOp : public ConversionPattern {
   }
 };
 
+struct WgToSgConvertLayoutOp
+    : public OpConversionPattern<xegpu::ConvertLayoutOp> {
+  using OpConversionPattern<xegpu::ConvertLayoutOp>::OpConversionPattern;
+  LogicalResult
+  matchAndRewrite(xegpu::ConvertLayoutOp op, OneToNOpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+    xegpu::LayoutAttr input = op.getInputLayout();
+    xegpu::LayoutAttr target = op.getTargetLayout();
+
+    if (!input || !target || !input.isWgLayout() || !target.isWgLayout())
+      return rewriter.notifyMatchFailure(
+          op, "Input and target layouts must have subgroup layout");
+
+    DenseI32ArrayAttr inputSgLayout = input.getSgLayout();
+    DenseI32ArrayAttr inputSgData = input.getSgData();
+    DenseI32ArrayAttr targetSgLayout = target.getSgLayout();
+    DenseI32ArrayAttr targetSgData = target.getSgData();
+
+    // TODO: currently we only support for optimal case, where input and
+    // output has the same sg_layout and sg_data, so SLM is not involved.
+    if (inputSgLayout != targetSgLayout || inputSgData != targetSgData)
+      return failure();
+
+    input = input.dropSgLayoutAndData();
+    target = target.dropSgLayoutAndData();
+
+    SmallVector<Value> newOps(adaptor.getSource());
+
+    if (input && target) {
+      for (auto [i, src] : llvm::enumerate(adaptor.getSource())) {
+        auto newOp = rewriter.create<xegpu::ConvertLayoutOp>(
----------------
chencha3 wrote:

No. they are not folded. ConvertLayoutOp will be folded only when `InputLayout` and `TargetLayout` are the same. So here input and target could be different.  

https://github.com/llvm/llvm-project/pull/146176


More information about the Mlir-commits mailing list