[Mlir-commits] [mlir] [MLIR][AMDGPU] Adding Vector to AMDGPU conversion lowering (PR #131803)
Jakub Kuderski
llvmlistbot at llvm.org
Tue Mar 18 07:29:08 PDT 2025
================
@@ -0,0 +1,147 @@
+//===- VectorToAMDGPU.cpp - Vector to AMDGPU dialect conversion ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/VectorToAMDGPU/VectorToAMDGPU.h"
+
+#include "mlir/Dialect/AMDGPU/IR/AMDGPUDialect.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/IR/TypeUtilities.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Support/LogicalResult.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+namespace mlir {
+#define GEN_PASS_DEF_CONVERTVECTORTOAMDGPUPASS
+#include "mlir/Conversion/Passes.h.inc"
+} // namespace mlir
+
+using namespace mlir;
+
+/// This pattern supports lowering of:
+/// `vector.transfer_read` to a combination of `vector.load`, `arith.select` and
+/// `vector.broadcast` if all of the following hold:
+/// - The transfer op is masked.
+/// - The memref is in buffer address space.
+/// - Stride of most minor memref dimension must be 1.
+/// - Out-of-bounds masking is not required.
+/// - If the memref's element type is a vector type then it coincides with the
+/// result type.
+/// - The permutation map doesn't perform permutation (broadcasting is allowed).
+/// Note: those conditions mostly come from TransferReadToVectorLoadLowering
+/// pass.
+static LogicalResult
+transferPreconditions(PatternRewriter &rewriter,
+ VectorTransferOpInterface xferOp,
+ SmallVector<unsigned> &broadcastedDims,
+ VectorType &unbroadcastedVectorType) {
+ if (!xferOp.getMask())
+ return rewriter.notifyMatchFailure(xferOp, "Only support masked transfer");
+
+ // Permutations are handled by VectorToSCF or
+ // populateVectorTransferPermutationMapLoweringPatterns.
+ // We let the 0-d corner case pass-through as it is supported.
+ if (!xferOp.getPermutationMap().isMinorIdentityWithBroadcasting(
+ &broadcastedDims))
+ return rewriter.notifyMatchFailure(xferOp, "not minor identity + bcast");
+
+ auto memRefType = dyn_cast<MemRefType>(xferOp.getShapedType());
+ if (!memRefType)
+ return rewriter.notifyMatchFailure(xferOp, "not a memref source");
+
+ Attribute addrSpace = memRefType.getMemorySpace();
+ if (!addrSpace ||
+ llvm::dyn_cast<amdgpu::AddressSpaceAttr>(addrSpace).getValue() !=
+ amdgpu::AddressSpace::FatRawBuffer)
+ return rewriter.notifyMatchFailure(xferOp, "not in buffer address space");
+
+ // Non-unit strides are handled by VectorToSCF.
+ if (!memRefType.isLastDimUnitStride())
+ return rewriter.notifyMatchFailure(xferOp, "!= 1 stride needs VectorToSCF");
+
+ // If there is broadcasting involved then we first load the unbroadcasted
+ // vector, and then broadcast it with `vector.broadcast`.
+ ArrayRef<int64_t> vectorShape = xferOp.getVectorType().getShape();
+ SmallVector<int64_t> unbroadcastedVectorShape(vectorShape);
+ for (unsigned i : broadcastedDims)
+ unbroadcastedVectorShape[i] = 1;
+ unbroadcastedVectorType = xferOp.getVectorType().cloneWith(
+ unbroadcastedVectorShape, xferOp.getVectorType().getElementType());
+
+ // `vector.load` supports vector types as memref's elements only when the
+ // resulting vector type is the same as the element type.
+ auto memrefElTy = memRefType.getElementType();
+ if (isa<VectorType>(memrefElTy) && memrefElTy != unbroadcastedVectorType)
+ return rewriter.notifyMatchFailure(xferOp, "incompatible element type");
+
+ // Otherwise, element types of the memref and the vector must match.
+ if (!isa<VectorType>(memrefElTy) &&
+ memrefElTy != xferOp.getVectorType().getElementType())
+ return rewriter.notifyMatchFailure(xferOp, "non-matching element type");
+
+ // Out-of-bounds dims are handled by MaterializeTransferMask.
+ if (xferOp.hasOutOfBoundsDim())
+ return rewriter.notifyMatchFailure(xferOp, "out-of-bounds needs mask");
+
+ if (xferOp.getVectorType().getRank() != 1)
+ // vector.maskedload operates on 1-D vectors.
+ return rewriter.notifyMatchFailure(
+ xferOp, "vector type is not rank 1, can't create masked load, needs "
+ "VectorToSCF");
+
+ return success();
+}
+
+struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
----------------
kuhar wrote:
```suggestion
struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
```
nit: public is already the default for struct and is redundant here. Instead, it would be worth marking this as final so that we know nothing derives from it later on.
https://github.com/llvm/llvm-project/pull/131803
More information about the Mlir-commits
mailing list