[Mlir-commits] [mlir] [MLIR][XeGPU] Add support for elementwise ops in Wg to Sg distribute pass [1/N] (PR #142797)
Nishant Patel
llvmlistbot at llvm.org
Wed Jun 11 10:01:25 PDT 2025
================
@@ -314,6 +317,90 @@ struct WgToSgPrefetchNdOp : public OpConversionPattern<xegpu::PrefetchNdOp> {
}
};
+// This pattern transforms elementwise ops (unary/binary) in math/arith dialect
+template <typename Op>
+struct WgToSgElementwiseOp : public OpConversionPattern<Op> {
+ using OpConversionPattern<Op>::OpConversionPattern;
+ using OneToNOpAdaptor = typename OpConversionPattern<Op>::OneToNOpAdaptor;
+
+ LogicalResult
+ matchAndRewrite(Op op, OneToNOpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ // All operands/results must be 1D or 2D vectors
+ auto resultType = dyn_cast<VectorType>(op.getResult().getType());
+ if (!resultType || (resultType.getRank() != 1 && resultType.getRank() != 2))
+ return rewriter.notifyMatchFailure(
+ op, "Result type is not a 1D or 2D vector");
+
+ ArrayRef<int64_t> shape = resultType.getShape();
+ for (Value operand : op->getOperands()) {
+ auto operandType = dyn_cast<VectorType>(operand.getType());
+ if (!operandType || operandType.getRank() != resultType.getRank() ||
+ operandType.getShape() != shape) {
+ return rewriter.notifyMatchFailure(
+ op, "Operand type is not a 1D or 2D vector with the same shape as "
+ "result type");
+ }
+ }
+
+ auto layout = dyn_cast_or_null<xegpu::LayoutAttr>(op->getAttr("layout"));
+ if (!layout || !layout.getSgLayout())
+ return rewriter.notifyMatchFailure(
+ op, "Operation does not have a valid layout attribute for subgroup "
+ "distribution");
+
+ // Extract sgShape from layout
+ SmallVector<int64_t> sgShape;
----------------
nbpatel wrote:
Yes, I will rebase once that PR goes in. I think that will likely be merged before this
https://github.com/llvm/llvm-project/pull/142797
More information about the Mlir-commits
mailing list