[llvm-branch-commits] [flang] [Flang][OpenMP] Derived type explicit allocatable member mapping (PR #96266)
Sergio Afonso via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Aug 12 08:42:16 PDT 2024
================
@@ -85,67 +138,187 @@ class OMPMapInfoFinalizationPass
descriptor = alloca;
}
+ return descriptor;
+ }
+
+ /// Simple function that will generate a FIR operation accessing
+ /// the descriptors base address (BoxOffsetOp) and then generate a
+ /// MapInfoOp for it, the most important thing to note is that
+ /// we normally move the bounds from the descriptor map onto the
+ /// base address map.
+ mlir::omp::MapInfoOp getBaseAddrMap(mlir::Value descriptor,
+ mlir::OperandRange bounds,
+ int64_t mapType,
+ fir::FirOpBuilder &builder) {
+ mlir::Location loc = descriptor.getLoc();
mlir::Value baseAddrAddr = builder.create<fir::BoxOffsetOp>(
loc, descriptor, fir::BoxFieldAttr::base_addr);
// Member of the descriptor pointing at the allocated data
- mlir::Value baseAddr = builder.create<mlir::omp::MapInfoOp>(
+ return builder.create<mlir::omp::MapInfoOp>(
loc, baseAddrAddr.getType(), descriptor,
mlir::TypeAttr::get(llvm::cast<mlir::omp::PointerLikeType>(
fir::unwrapRefType(baseAddrAddr.getType()))
.getElementType()),
baseAddrAddr, /*members=*/mlir::SmallVector<mlir::Value>{},
- /*member_index=*/mlir::DenseIntElementsAttr{}, op.getBounds(),
- builder.getIntegerAttr(builder.getIntegerType(64, false),
- op.getMapType().value()),
+ /*membersIndex=*/mlir::ArrayAttr{}, bounds,
+ builder.getIntegerAttr(builder.getIntegerType(64, false), mapType),
builder.getAttr<mlir::omp::VariableCaptureKindAttr>(
mlir::omp::VariableCaptureKind::ByRef),
/*name=*/builder.getStringAttr(""),
/*partial_map=*/builder.getBoolAttr(false));
+ }
- // TODO: map the addendum segment of the descriptor, similarly to the
- // above base address/data pointer member.
+ /// This function adjusts the member indices vector to include a new
+ /// base address member, we take the position of the descriptor in
+ /// the member indices list, which is the index data that the base
+ /// addresses index will be based off of, as the base address is
+ /// a member of the descriptor, we must also alter other members
+ /// indices in the list to account for this new addition. This
+ /// requires inserting into the middle of a member index vector
+ /// in some cases (i.e. we could be accessing the member of a
+ /// descriptor type with a subsequent map, so we must be sure to
+ /// adjust any of these cases with the addition of the new base
+ /// address index value).
+ void adjustMemberIndices(
+ llvm::SmallVector<llvm::SmallVector<int64_t>> &memberIndices,
+ size_t memberIndex) {
+ llvm::SmallVector<int64_t> baseAddrIndex = memberIndices[memberIndex];
+ baseAddrIndex.push_back(0);
- if (auto mapClauseOwner =
- llvm::dyn_cast<mlir::omp::MapClauseOwningOpInterface>(target)) {
- llvm::SmallVector<mlir::Value> newMapOps;
- mlir::OperandRange mapVarsArr = mapClauseOwner.getMapVars();
+ // If we find another member that is "derived/a member of" the descriptor
+ // that is not the descriptor itself, we must insert a 0 for the new base
+ // address we have just added for the descriptor into the list at the
+ // appropriate position to maintain correctness of the positional/index data
+ // for that member.
+ size_t insertPosition =
+ std::distance(baseAddrIndex.begin(), std::prev(baseAddrIndex.end()));
+ for (size_t i = 0; i < memberIndices.size(); ++i) {
+ if (memberIndices[i].size() > insertPosition &&
+ std::equal(baseAddrIndex.begin(), std::prev(baseAddrIndex.end()),
+ memberIndices[i].begin())) {
+ memberIndices[i].insert(
+ std::next(memberIndices[i].begin(), insertPosition), 0);
+ }
+ }
- for (size_t i = 0; i < mapVarsArr.size(); ++i) {
- if (mapVarsArr[i] == op) {
- // Push new implicit maps generated for the descriptor.
- newMapOps.push_back(baseAddr);
+ // Insert our newly created baseAddrIndex into the larger list of indices at
+ // the correct location.
+ memberIndices.insert(std::next(memberIndices.begin(), memberIndex + 1),
+ baseAddrIndex);
+ }
- // for TargetOp's which have IsolatedFromAbove we must align the
- // new additional map operand with an appropriate BlockArgument,
- // as the printing and later processing currently requires a 1:1
- // mapping of BlockArgs to MapInfoOp's at the same placement in
- // each array (BlockArgs and MapVars).
- if (auto targetOp = llvm::dyn_cast<mlir::omp::TargetOp>(target))
- targetOp.getRegion().insertArgument(i, baseAddr.getType(), loc);
- }
- newMapOps.push_back(mapVarsArr[i]);
+ /// Adjusts the descriptors map type the main alteration that is done
+ /// currently is transforming the map type to OMP_MAP_TO where possible.
+ // This is because we will always need to map the descriptor to device
+ /// (or at the very least it seems to be the case currently with the
+ /// current lowered kernel IR), as without the appropriate descriptor
+ /// information on the device there is a risk of the kernel IR
+ /// requesting for various data that will not have been copied to
+ /// perform things like indexing, this can cause segfaults and
+ /// memory access errors. However, we do not need this data mapped
+ /// back to the host from the device, as we cannot alter the data
+ /// via resizing or deletion on the device, this is specified in the
+ /// OpenMP specification, so discarding any descriptor alterations via
+ /// no map back is reasonable (and required for certain segments
+ /// of descriptor data like the type descriptor that are global
+ /// constants). This alteration is only unapplicable to
+ /// target exit and target update currently, and that's due to
+ /// target exit not allowing To mappings, and target update not
+ /// allowing both to and from simultaneously. We currently try
+ /// to maintain the implicit flag where neccesary, although, it
+ /// does not seem strictly required.
+ unsigned long getDescriptorMapType(unsigned long mapTypeFlag,
+ mlir::Operation *target) {
+ if (llvm::isa_and_nonnull<mlir::omp::TargetExitDataOp>(target) ||
+ llvm::isa_and_nonnull<mlir::omp::TargetUpdateOp>(target))
+ return mapTypeFlag;
+
+ bool hasImplicitMap =
+ (llvm::omp::OpenMPOffloadMappingFlags(mapTypeFlag) &
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT) ==
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
+
+ return llvm::to_underlying(
+ hasImplicitMap
+ ? llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT
+ : llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO);
+ }
+
+ mlir::omp::MapInfoOp genDescriptorMemberMaps(mlir::omp::MapInfoOp op,
+ fir::FirOpBuilder &builder,
+ mlir::Operation *target) {
+ llvm::SmallVector<ParentAndPlacement> mapMemberUsers;
+ getMemberUserList(op, mapMemberUsers);
+
+ // TODO: map the addendum segment of the descriptor, similarly to the
+ // base address/data pointer member.
+ mlir::Value descriptor = getDescriptorFromBoxMap(op, builder);
+ auto baseAddr = getBaseAddrMap(descriptor, op.getBounds(),
+ op.getMapType().value_or(0), builder);
+ mlir::ArrayAttr newMembersAttr;
+ mlir::SmallVector<mlir::Value> newMembers;
+ llvm::SmallVector<llvm::SmallVector<int64_t>> memberIndices;
+
+ if (!mapMemberUsers.empty() || !op.getMembers().empty())
+ getMemberIndicesAsVectors(
+ !mapMemberUsers.empty() ? mapMemberUsers[0].parent : op,
+ memberIndices);
+
+ // If the operation that we are expanding with a descriptor has a user
+ // (parent), then we have to expand the parents member indices to reflect
+ // the adjusted member indices for the base address insertion. However, if
+ // it does not then we are expanding a MapInfoOp without any pre-existing
+ // member information to now have one new member for the base address or we
----------------
skatrak wrote:
```suggestion
// member information to now have one new member for the base address, or we
```
https://github.com/llvm/llvm-project/pull/96266
More information about the llvm-branch-commits
mailing list