[flang-commits] [flang] 6c14e84 - [flang][hlfir] Add codegen for vector subscripted LHS
Jean Perier via flang-commits
flang-commits at lists.llvm.org
Tue Jun 27 04:31:22 PDT 2023
Author: Jean Perier
Date: 2023-06-27T13:30:24+02:00
New Revision: 6c14e849266e55a022d54ef4dcf09a1c93dcefda
URL: https://github.com/llvm/llvm-project/commit/6c14e849266e55a022d54ef4dcf09a1c93dcefda
DIFF: https://github.com/llvm/llvm-project/commit/6c14e849266e55a022d54ef4dcf09a1c93dcefda.diff
LOG: [flang][hlfir] Add codegen for vector subscripted LHS
This patch adds support for vector subscripted assignment left-hand
side. It does not yet add support for the cases where the LHS must be
saved because its evaluation could be impacted by the assignment.
The implementation adds an hlfir::ElementalOpInterface to share the
elemental inlining utility and some other tools between
hlfir::ElementalOp and hlfir::ElelemntalAddrOp.
It adds generateYieldedLHS() to allow retrieving the LHS value
in lowering, whether or not it is vector subscripted. If it is vector
subscripted, this utility creates a loop nest iterating over the
elements and returns the address of an element.
Differential Revision: https://reviews.llvm.org/D153759
Added:
flang/test/HLFIR/order_assignments/vector-subscripts-codegen.fir
Modified:
flang/include/flang/Optimizer/Builder/HLFIRTools.h
flang/include/flang/Optimizer/HLFIR/HLFIROps.td
flang/lib/Optimizer/Builder/HLFIRTools.cpp
flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
Removed:
################################################################################
diff --git a/flang/include/flang/Optimizer/Builder/HLFIRTools.h b/flang/include/flang/Optimizer/Builder/HLFIRTools.h
index 3b776e73d2d44..7bf12896d6721 100644
--- a/flang/include/flang/Optimizer/Builder/HLFIRTools.h
+++ b/flang/include/flang/Optimizer/Builder/HLFIRTools.h
@@ -31,6 +31,7 @@ namespace hlfir {
class AssociateOp;
class ElementalOp;
+class ElementalOpInterface;
class ElementalAddrOp;
class YieldElementOp;
@@ -401,17 +402,16 @@ hlfir::YieldElementOp inlineElementalOp(mlir::Location loc,
hlfir::ElementalOp elemental,
mlir::ValueRange oneBasedIndices);
-/// Inline the body of an hlfir.elemental without cloning the resulting
-/// hlfir.yield_element, and return the cloned operand of the
-/// hlfir.yield_element. The mapper must be provided to cover complex cases
-/// where the inlined elemental is not defined in the current context and uses
-/// values that have been cloned already.
-/// A callback is provided to indicate if an hlfir.apply inside the
-/// hlfir.elemental must be immediately replaced by the inlining of the
-/// applied hlfir.elemental.
+/// Inline the body of an hlfir.elemental or hlfir.elemental_addr without
+/// cloning the resulting hlfir.yield_element/hlfir.yield, and return the cloned
+/// operand of the hlfir.yield_element/hlfir.yield. The mapper must be provided
+/// to cover complex cases where the inlined elemental is not defined in the
+/// current context and uses values that have been cloned already. A callback is
+/// provided to indicate if an hlfir.apply inside the hlfir.elemental must be
+/// immediately replaced by the inlining of the applied hlfir.elemental.
mlir::Value inlineElementalOp(
mlir::Location loc, fir::FirOpBuilder &builder,
- hlfir::ElementalOp elemental, mlir::ValueRange oneBasedIndices,
+ hlfir::ElementalOpInterface elemental, mlir::ValueRange oneBasedIndices,
mlir::IRMapping &mapper,
const std::function<bool(hlfir::ElementalOp)> &mustRecursivelyInline);
diff --git a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
index ee99cc77ef518..5f3f533792431 100644
--- a/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
+++ b/flang/include/flang/Optimizer/HLFIR/HLFIROps.td
@@ -671,7 +671,54 @@ def hlfir_NoReassocOp : hlfir_Op<"no_reassoc", [NoMemoryEffect, SameOperandsAndR
let assemblyFormat = "$val attr-dict `:` type($val)";
}
-def hlfir_ElementalOp : hlfir_Op<"elemental", [RecursiveMemoryEffects]> {
+def hlfir_ElementalOpInterface : OpInterface<"ElementalOpInterface"> {
+ let description = [{
+ Interface for the operation holding a region with elemental computation.
+ It is used as a common interface bewteen hlfir.elemental and hlfir.elemental_addr.
+ }];
+
+ let methods = [
+ InterfaceMethod<
+ /*desc=*/"Return the one based elemental indices.",
+ /*retTy=*/"mlir::Block::BlockArgListType",
+ /*methodName=*/"getIndices",
+ /*args=*/(ins),
+ /*methodBody=*/[{}]
+ >,
+ InterfaceMethod<
+ /*desc=*/"Return the element entity being computed",
+ /*retTy=*/"mlir::Value",
+ /*methodName=*/"getElementEntity",
+ /*args=*/(ins),
+ /*methodBody=*/[{}]
+ >,
+ InterfaceMethod<
+ /*desc=*/"Get element cleanup region, if any.",
+ /*retTy=*/"mlir::Region*",
+ /*methodName=*/"getElementCleanup",
+ /*args=*/(ins),
+ /*methodBody=*/[{}]
+ >,
+ InterfaceMethod<
+ /*desc=*/"Get elemental region.",
+ /*retTy=*/"mlir::Region&",
+ /*methodName=*/"getElementalRegion",
+ /*args=*/(ins),
+ /*methodBody=*/[{}]
+ >,
+ InterfaceMethod<
+ /*desc=*/"Must this elemental operation be evaluated in order?",
+ /*retTy=*/"bool",
+ /*methodName=*/"isOrdered",
+ /*args=*/(ins),
+ /*methodBody=*/[{}]
+ >,
+ ];
+
+ let cppNamespace = "hlfir";
+}
+
+def hlfir_ElementalOp : hlfir_Op<"elemental", [RecursiveMemoryEffects, hlfir_ElementalOpInterface]> {
let summary = "elemental expression";
let description = [{
Represent an elemental expression as a function of the indices.
@@ -726,6 +773,11 @@ def hlfir_ElementalOp : hlfir_Op<"elemental", [RecursiveMemoryEffects]> {
mlir::Block::BlockArgListType getIndices() {
return getBody()->getArguments();
}
+ /// ElementalOpInterface implementation.
+
+ mlir::Region& getElementalRegion() {return getRegion();}
+ mlir::Value getElementEntity();
+ mlir::Region* getElementCleanup() {return nullptr;}
/// Must this elemental be evaluated in order?
/// TODO: add attribute and set it in lowering.
@@ -1117,7 +1169,7 @@ def hlfir_YieldOp : hlfir_Op<"yield", [Terminator, ParentOneOf<["RegionAssignOp"
let assemblyFormat = "$entity attr-dict `:` type($entity) custom<YieldOpCleanup>($cleanup)";
}
-def hlfir_ElementalAddrOp : hlfir_Op<"elemental_addr", [Terminator, HasParent<"RegionAssignOp">, RecursiveMemoryEffects, RecursivelySpeculatable]> {
+def hlfir_ElementalAddrOp : hlfir_Op<"elemental_addr", [Terminator, HasParent<"RegionAssignOp">, RecursiveMemoryEffects, RecursivelySpeculatable, hlfir_ElementalOpInterface]> {
let summary = "Yield the address of a vector subscripted variable inside an hlfir.region_assign";
let description = [{
Special terminator node for the left-hand side region of an hlfir.region_assign
@@ -1180,6 +1232,16 @@ def hlfir_ElementalAddrOp : hlfir_Op<"elemental_addr", [Terminator, HasParent<"R
/// body. It yields the variable element address.
/// This should only be called once the ElementalAddrOp has been built.
hlfir::YieldOp getYieldOp();
+
+ /// ElementalOpInterface implementation.
+
+ mlir::Region& getElementalRegion() {return getBody();}
+ mlir::Value getElementEntity();
+ mlir::Region* getElementCleanup();
+
+ /// Must this elemental be evaluated in order?
+ /// TODO: add attribute and set it in lowering.
+ bool isOrdered() {return true;}
}];
let hasVerifier = 1;
diff --git a/flang/lib/Optimizer/Builder/HLFIRTools.cpp b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
index b8010584d2c8b..30f5ba5d067a5 100644
--- a/flang/lib/Optimizer/Builder/HLFIRTools.cpp
+++ b/flang/lib/Optimizer/Builder/HLFIRTools.cpp
@@ -776,36 +776,33 @@ hlfir::inlineElementalOp(mlir::Location loc, fir::FirOpBuilder &builder,
mlir::Value hlfir::inlineElementalOp(
mlir::Location loc, fir::FirOpBuilder &builder,
- hlfir::ElementalOp elemental, mlir::ValueRange oneBasedIndices,
+ hlfir::ElementalOpInterface elemental, mlir::ValueRange oneBasedIndices,
mlir::IRMapping &mapper,
const std::function<bool(hlfir::ElementalOp)> &mustRecursivelyInline) {
- mlir::Region ®ion = elemental.getRegion();
+ mlir::Region ®ion = elemental.getElementalRegion();
// hlfir.elemental region is a SizedRegion<1>.
assert(region.hasOneBlock() && "elemental region must have one block");
mapper.map(elemental.getIndices(), oneBasedIndices);
- mlir::Block::OpListType &ops = region.back().getOperations();
- assert(!ops.empty() && "elemental block cannot be empty");
- auto end = ops.end();
- for (auto opIt = ops.begin(); std::next(opIt) != end; ++opIt) {
- if (auto apply = mlir::dyn_cast<hlfir::ApplyOp>(*opIt))
+ for (auto &op : region.front().without_terminator()) {
+ if (auto apply = mlir::dyn_cast<hlfir::ApplyOp>(op))
if (auto appliedElemental =
apply.getExpr().getDefiningOp<hlfir::ElementalOp>())
if (mustRecursivelyInline(appliedElemental)) {
llvm::SmallVector<mlir::Value> clonedApplyIndices;
for (auto indice : apply.getIndices())
clonedApplyIndices.push_back(mapper.lookupOrDefault(indice));
- mlir::Value inlined = inlineElementalOp(
- loc, builder, appliedElemental, clonedApplyIndices, mapper,
- mustRecursivelyInline);
+ hlfir::ElementalOpInterface elementalIface =
+ mlir::cast<hlfir::ElementalOpInterface>(
+ appliedElemental.getOperation());
+ mlir::Value inlined = inlineElementalOp(loc, builder, elementalIface,
+ clonedApplyIndices, mapper,
+ mustRecursivelyInline);
mapper.map(apply.getResult(), inlined);
continue;
}
- (void)builder.clone(*opIt, mapper);
+ (void)builder.clone(op, mapper);
}
- auto oldYield = mlir::dyn_cast_or_null<hlfir::YieldElementOp>(
- region.back().getOperations().back());
- assert(oldYield && "must terminate with yieldElementalOp");
- return mapper.lookupOrDefault(oldYield.getElementValue());
+ return mapper.lookupOrDefault(elemental.getElementEntity());
}
hlfir::LoopNest hlfir::genLoopNest(mlir::Location loc,
diff --git a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
index a0e06351ab81a..04687b54bff7b 100644
--- a/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
+++ b/flang/lib/Optimizer/HLFIR/IR/HLFIROps.cpp
@@ -1043,6 +1043,10 @@ void hlfir::ElementalOp::build(mlir::OpBuilder &builder,
}
}
+mlir::Value hlfir::ElementalOp::getElementEntity() {
+ return mlir::cast<hlfir::YieldElementOp>(getBody()->back()).getElementValue();
+}
+
//===----------------------------------------------------------------------===//
// ApplyOp
//===----------------------------------------------------------------------===//
@@ -1297,6 +1301,15 @@ hlfir::YieldOp hlfir::ElementalAddrOp::getYieldOp() {
return yieldOp;
}
+mlir::Value hlfir::ElementalAddrOp::getElementEntity() {
+ return getYieldOp().getEntity();
+}
+
+mlir::Region *hlfir::ElementalAddrOp::getElementCleanup() {
+ mlir::Region *cleanup = &getYieldOp().getCleanup();
+ return cleanup->empty() ? nullptr : cleanup;
+}
+
//===----------------------------------------------------------------------===//
// OrderedAssignmentTreeOpInterface
//===----------------------------------------------------------------------===//
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
index 84f91c3b1d98c..acb14e402d210 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp
@@ -144,8 +144,6 @@ class OrderedAssignmentRewriter {
/// but do not generate the elswhere mask or the new fir.if.
void enterElsewhere(hlfir::ElseWhereOp);
- /// Is this an assignment to a vector subscripted entity?
- static bool hasVectorSubscriptedLhs(hlfir::RegionAssignOp regionAssignOp);
/// Are there any leaf region in the node that must be saved in the current
/// run?
bool mustSaveRegionIn(
@@ -178,9 +176,24 @@ class OrderedAssignmentRewriter {
generateYieldedEntity(mlir::Region ®ion,
std::optional<mlir::Type> castToType = std::nullopt);
+ struct LhsValueAndCleanUp {
+ mlir::Value lhs;
+ std::optional<hlfir::YieldOp> elementalCleanup;
+ mlir::Region *nonElementalCleanup = nullptr;
+ std::optional<hlfir::LoopNest> vectorSubscriptLoopNest;
+ };
+
+ /// Generate the left-hand side. If the left-hand side is vector
+ /// subscripted (hlfir.elemental_addr), this will create a loop nest
+ /// (unless it was already created by a WHERE mask) and return the
+ /// element address.
+ LhsValueAndCleanUp generateYieldedLHS(mlir::Location loc,
+ mlir::Region &lhsRegion);
+
/// If \p maybeYield is present and has a clean-up, generate the clean-up
/// at the current insertion point (by cloning).
void generateCleanupIfAny(std::optional<hlfir::YieldOp> maybeYield);
+ void generateCleanupIfAny(mlir::Region *cleanupRegion);
/// Generate a masked entity. This can only be called when whereLoopNest was
/// set (When an hlfir.where is being visited).
@@ -405,30 +418,31 @@ convertToMoldType(mlir::Location loc, fir::FirOpBuilder &builder,
void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
mlir::Location loc = regionAssignOp.getLoc();
- auto [rhs, oldRhsYield] =
+ std::optional<hlfir::LoopNest> elementalLoopNest;
+ auto [rhsValue, oldRhsYield] =
generateYieldedEntity(regionAssignOp.getRhsRegion());
- if (hasVectorSubscriptedLhs(regionAssignOp))
- TODO(loc, "assignment to vector subscripted entity");
- auto [lhs, oldLhsYield] =
- generateYieldedEntity(regionAssignOp.getLhsRegion());
+ LhsValueAndCleanUp loweredLhs =
+ generateYieldedLHS(loc, regionAssignOp.getLhsRegion());
+ hlfir::Entity rhsEntity{rhsValue};
+ hlfir::Entity lhsEntity{loweredLhs.lhs};
+ if (loweredLhs.vectorSubscriptLoopNest)
+ rhsEntity = hlfir::getElementAt(
+ loc, builder, rhsEntity,
+ loweredLhs.vectorSubscriptLoopNest->oneBasedIndices);
if (!regionAssignOp.getUserDefinedAssignment().empty()) {
hlfir::Entity userAssignLhs{regionAssignOp.getUserAssignmentLhs()};
hlfir::Entity userAssignRhs{regionAssignOp.getUserAssignmentRhs()};
- hlfir::Entity lhsEntity{lhs};
- hlfir::Entity rhsEntity{rhs};
- fir::DoLoopOp outerElementalLoop = nullptr;
+ std::optional<hlfir::LoopNest> elementalLoopNest;
if (lhsEntity.isArray() && userAssignLhs.isScalar()) {
// Elemental assignment with array argument (the RHS cannot be an array
// if the LHS is not).
mlir::Value shape = hlfir::genShape(loc, builder, lhsEntity);
- hlfir::LoopNest elementalLoopNest =
- hlfir::genLoopNest(loc, builder, shape);
- outerElementalLoop = elementalLoopNest.outerLoop;
- builder.setInsertionPointToStart(elementalLoopNest.innerLoop.getBody());
+ elementalLoopNest = hlfir::genLoopNest(loc, builder, shape);
+ builder.setInsertionPointToStart(elementalLoopNest->innerLoop.getBody());
lhsEntity = hlfir::getElementAt(loc, builder, lhsEntity,
- elementalLoopNest.oneBasedIndices);
+ elementalLoopNest->oneBasedIndices);
rhsEntity = hlfir::getElementAt(loc, builder, rhsEntity,
- elementalLoopNest.oneBasedIndices);
+ elementalLoopNest->oneBasedIndices);
}
llvm::SmallVector<hlfir::CleanupFunction, 2> argConversionCleanups;
@@ -443,15 +457,19 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) {
(void)builder.clone(op, mapper);
for (auto &cleanupConversion : argConversionCleanups)
cleanupConversion();
- if (outerElementalLoop)
- builder.setInsertionPointAfter(outerElementalLoop);
+ if (elementalLoopNest)
+ builder.setInsertionPointAfter(elementalLoopNest->outerLoop);
} else {
// TODO: preserve allocatable assignment aspects for forall once
// they are conveyed in hlfir.region_assign.
- builder.create<hlfir::AssignOp>(loc, rhs, lhs);
+ builder.create<hlfir::AssignOp>(loc, rhsEntity, lhsEntity);
}
+ generateCleanupIfAny(loweredLhs.elementalCleanup);
+ if (loweredLhs.vectorSubscriptLoopNest)
+ builder.setInsertionPointAfter(
+ loweredLhs.vectorSubscriptLoopNest->outerLoop);
generateCleanupIfAny(oldRhsYield);
- generateCleanupIfAny(oldLhsYield);
+ generateCleanupIfAny(loweredLhs.nonElementalCleanup);
}
void OrderedAssignmentRewriter::generateMaskIfOp(mlir::Value cdt) {
@@ -673,6 +691,36 @@ mlir::Value OrderedAssignmentRewriter::generateYieldedScalarValue(
return value;
}
+OrderedAssignmentRewriter::LhsValueAndCleanUp
+OrderedAssignmentRewriter::generateYieldedLHS(mlir::Location loc,
+ mlir::Region &lhsRegion) {
+ LhsValueAndCleanUp loweredLhs;
+ hlfir::ElementalAddrOp elementalAddrLhs =
+ mlir::dyn_cast<hlfir::ElementalAddrOp>(lhsRegion.back().back());
+ if (elementalAddrLhs && !whereLoopNest) {
+ for (auto &op : lhsRegion.front().without_terminator())
+ (void)builder.clone(op, mapper);
+ mlir::Value newShape = mapper.lookupOrDefault(elementalAddrLhs.getShape());
+ loweredLhs.vectorSubscriptLoopNest =
+ hlfir::genLoopNest(loc, builder, newShape);
+ builder.setInsertionPointToStart(
+ loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody());
+ mapper.map(elementalAddrLhs.getIndices(),
+ loweredLhs.vectorSubscriptLoopNest->oneBasedIndices);
+ for (auto &op : elementalAddrLhs.getBody().front().without_terminator())
+ (void)builder.clone(op, mapper);
+ loweredLhs.elementalCleanup = elementalAddrLhs.getYieldOp();
+ loweredLhs.lhs =
+ mapper.lookupOrDefault(loweredLhs.elementalCleanup->getEntity());
+ } else {
+ auto [lhs, yield] = generateYieldedEntity(lhsRegion);
+ loweredLhs.lhs = lhs;
+ if (yield && !yield->getCleanup().empty())
+ loweredLhs.nonElementalCleanup = &yield->getCleanup();
+ }
+ return loweredLhs;
+}
+
mlir::Value
OrderedAssignmentRewriter::generateMaskedEntity(MaskedArrayExpr &maskedExpr) {
assert(whereLoopNest.has_value() && "must be inside WHERE loop nest");
@@ -697,19 +745,15 @@ OrderedAssignmentRewriter::generateMaskedEntity(MaskedArrayExpr &maskedExpr) {
void OrderedAssignmentRewriter::generateCleanupIfAny(
std::optional<hlfir::YieldOp> maybeYield) {
if (maybeYield.has_value())
- if (!maybeYield->getCleanup().empty()) {
- assert(maybeYield->getCleanup().hasOneBlock() &&
- "region must contain one block");
- for (auto &op : maybeYield->getCleanup().back().getOperations())
- if (!mlir::isa<fir::FirEndOp>(op))
- builder.clone(op, mapper);
- }
+ generateCleanupIfAny(&maybeYield->getCleanup());
}
-
-bool OrderedAssignmentRewriter::hasVectorSubscriptedLhs(
- hlfir::RegionAssignOp regionAssignOp) {
- return mlir::isa<hlfir::ElementalAddrOp>(
- regionAssignOp.getLhsRegion().back().back());
+void OrderedAssignmentRewriter::generateCleanupIfAny(
+ mlir::Region *cleanupRegion) {
+ if (cleanupRegion && !cleanupRegion->empty()) {
+ assert(cleanupRegion->hasOneBlock() && "region must contain one block");
+ for (auto &op : cleanupRegion->back().without_terminator())
+ builder.clone(op, mapper);
+ }
}
bool OrderedAssignmentRewriter::mustSaveRegionIn(
@@ -749,22 +793,25 @@ bool OrderedAssignmentRewriter::isRequiredInCurrentRun(
}
/// Is the apply using all the elemental indices in order?
-static bool isInOrderApply(hlfir::ApplyOp apply, hlfir::ElementalOp elemental) {
- if (elemental.getIndices().size() != apply.getIndices().size())
+static bool isInOrderApply(hlfir::ApplyOp apply,
+ hlfir::ElementalOpInterface elemental) {
+ mlir::Region::BlockArgListType elementalIndices = elemental.getIndices();
+ if (elementalIndices.size() != apply.getIndices().size())
return false;
for (auto [elementalIdx, applyIdx] :
- llvm::zip(elemental.getIndices(), apply.getIndices()))
+ llvm::zip(elementalIndices, apply.getIndices()))
if (elementalIdx != applyIdx)
return false;
return true;
}
-/// Gather the chain of hlfir::ElementalOp, if any, that produced \p value.
+/// Gather the tree of hlfir::ElementalOpInterface use-def, if any, starting
+/// from \p elemental, which may be a nullptr.
static void
-gatherElementalTree(mlir::Value value,
+gatherElementalTree(hlfir::ElementalOpInterface elemental,
llvm::SmallPtrSetImpl<mlir::Operation *> &elementalOps,
bool isOutOfOrder) {
- if (auto elemental = value.getDefiningOp<hlfir::ElementalOp>()) {
+ if (elemental) {
// Only inline an applied elemental that must be executed in order if the
// applying indices are in order. An hlfir::Elemental may have been created
// for a transformational like transpose, and Fortran 2018 standard
@@ -774,11 +821,14 @@ gatherElementalTree(mlir::Value value,
if (isOutOfOrder && elemental.isOrdered())
return;
elementalOps.insert(elemental.getOperation());
- for (mlir::Operation &op : elemental.getBody()->getOperations())
+ for (mlir::Operation &op : elemental.getElementalRegion().getOps())
if (auto apply = mlir::dyn_cast<hlfir::ApplyOp>(op)) {
bool isUnorderedApply =
isOutOfOrder || !isInOrderApply(apply, elemental);
- gatherElementalTree(apply.getExpr(), elementalOps, isUnorderedApply);
+ auto maybeElemental =
+ mlir::dyn_cast_or_null<hlfir::ElementalOpInterface>(
+ apply.getExpr().getDefiningOp());
+ gatherElementalTree(maybeElemental, elementalOps, isUnorderedApply);
}
}
}
@@ -786,14 +836,17 @@ gatherElementalTree(mlir::Value value,
MaskedArrayExpr::MaskedArrayExpr(mlir::Location loc, mlir::Region ®ion)
: loc{loc}, region{region} {
mlir::Operation &terminator = region.back().back();
- // TODO: clarify if vector subscripts must be inlined or not here.
- // In case of x(elemental(A), :), this could lead to more elemental(A)
- // evaluation than needed, which is not OK if "elemental" is impure.
- // The standard is not very clear here.
- if (mlir::isa<hlfir::ElementalAddrOp>(terminator))
- TODO(loc, "vector subscripted assignments inside WHERE");
+ if (auto elementalAddr =
+ mlir::dyn_cast<hlfir::ElementalOpInterface>(terminator)) {
+ // Vector subscripted designator (hlfir.elemental_addr terminator).
+ gatherElementalTree(elementalAddr, elementalParts, /*isOutOfOrder=*/false);
+ return;
+ }
+ // Try if elemental expression.
mlir::Value entity = mlir::cast<hlfir::YieldOp>(terminator).getEntity();
- gatherElementalTree(entity, elementalParts, /*isOutOfOrder=*/false);
+ auto maybeElemental = mlir::dyn_cast_or_null<hlfir::ElementalOpInterface>(
+ entity.getDefiningOp());
+ gatherElementalTree(maybeElemental, elementalParts, /*isOutOfOrder=*/false);
}
void MaskedArrayExpr::generateNoneElementalPart(fir::FirOpBuilder &builder,
@@ -836,14 +889,21 @@ MaskedArrayExpr::generateElementalParts(fir::FirOpBuilder &builder,
assert(noneElementalPartWasGenerated &&
"non elemental part must have been generated");
mlir::Operation &terminator = region.back().back();
- if (mlir::isa<hlfir::ElementalAddrOp>(terminator))
- TODO(loc, "vector subscripted assignments inside WHERE");
- mlir::Value entity = mlir::cast<hlfir::YieldOp>(terminator).getEntity();
- auto elemental = entity.getDefiningOp<hlfir::ElementalOp>();
+ hlfir::ElementalOpInterface elemental =
+ mlir::dyn_cast<hlfir::ElementalAddrOp>(terminator);
if (!elemental) {
- hlfir::Entity clonedEntity{mapper.lookupOrDefault(entity)};
- return hlfir::getElementAt(loc, builder, clonedEntity, oneBasedIndices);
+ // If the terminator is not an hlfir.elemental_addr, try if the yielded
+ // entity was produced by an hlfir.elemental.
+ mlir::Value entity = mlir::cast<hlfir::YieldOp>(terminator).getEntity();
+ elemental = entity.getDefiningOp<hlfir::ElementalOp>();
+ if (!elemental) {
+ // The yielded entity was not produced by an elemental operation,
+ // get its clone in the non elemental part evaluation and address it.
+ hlfir::Entity clonedEntity{mapper.lookupOrDefault(entity)};
+ return hlfir::getElementAt(loc, builder, clonedEntity, oneBasedIndices);
+ }
}
+
auto mustRecursivelyInline =
[&](hlfir::ElementalOp appliedElemental) -> bool {
return elementalParts.contains(appliedElemental.getOperation());
@@ -855,17 +915,20 @@ MaskedArrayExpr::generateElementalParts(fir::FirOpBuilder &builder,
void MaskedArrayExpr::generateNoneElementalCleanupIfAny(
fir::FirOpBuilder &builder, mlir::IRMapping &mapper) {
mlir::Operation &terminator = region.back().back();
- if (mlir::isa<hlfir::ElementalAddrOp>(terminator))
- TODO(loc, "vector subscripted assignments inside WHERE");
- auto yieldOp = mlir::cast<hlfir::YieldOp>(terminator);
- if (yieldOp.getCleanup().empty())
+ mlir::Region *cleanupRegion = nullptr;
+ if (auto elementalAddr = mlir::dyn_cast<hlfir::ElementalAddrOp>(terminator)) {
+ cleanupRegion = &elementalAddr.getCleanup();
+ } else {
+ auto yieldOp = mlir::cast<hlfir::YieldOp>(terminator);
+ cleanupRegion = &yieldOp.getCleanup();
+ }
+ if (cleanupRegion->empty())
return;
- for (mlir::Operation &op : yieldOp.getCleanup().getOps()) {
+ for (mlir::Operation &op : cleanupRegion->front().without_terminator()) {
if (auto destroy = mlir::dyn_cast<hlfir::DestroyOp>(op))
if (elementalParts.contains(destroy.getExpr().getDefiningOp()))
continue;
- if (!mlir::isa<fir::FirEndOp>(op))
- (void)builder.clone(op, mapper);
+ (void)builder.clone(op, mapper);
}
}
@@ -1070,10 +1133,7 @@ class RegionAssignConversion
mlir::PatternRewriter &rewriter) const override {
auto root = mlir::cast<hlfir::OrderedAssignmentTreeOpInterface>(
regionAssignOp.getOperation());
- if (!regionAssignOp.getUserDefinedAssignment().empty())
- return ::rewrite(root, /*tryFusingAssignments=*/false, rewriter);
- TODO(regionAssignOp.getLoc(),
- "assignments to vector subscripted entity in HLFIR");
+ return ::rewrite(root, /*tryFusingAssignments=*/false, rewriter);
}
};
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
index de0fd19eaa8a4..d53543aea1adf 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/ScheduleOrderedAssignments.cpp
@@ -225,6 +225,9 @@ static mlir::Value getYieldedEntity(mlir::Region ®ion) {
return nullptr;
if (auto yield = mlir::dyn_cast<hlfir::YieldOp>(region.back().back()))
return yield.getEntity();
+ if (auto elementalAddr =
+ mlir::dyn_cast<hlfir::ElementalAddrOp>(region.back().back()))
+ return elementalAddr.getYieldOp().getEntity();
return nullptr;
}
@@ -237,9 +240,7 @@ static void gatherAssignEffects(
bool userDefAssignmentMayOnlyWriteToAssignedVariable,
llvm::SmallVectorImpl<mlir::MemoryEffects::EffectInstance> &assignEffects) {
mlir::Value assignedVar = getYieldedEntity(regionAssign.getLhsRegion());
- if (!assignedVar)
- TODO(regionAssign.getLoc(),
- "assignment to vector subscripted entity in HLFIR");
+ assert(assignedVar && "lhs cannot be an empty region");
assignEffects.emplace_back(mlir::MemoryEffects::Write::get(), assignedVar);
if (!regionAssign.getUserDefinedAssignment().empty()) {
diff --git a/flang/test/HLFIR/order_assignments/vector-subscripts-codegen.fir b/flang/test/HLFIR/order_assignments/vector-subscripts-codegen.fir
new file mode 100644
index 0000000000000..fa510d2611f07
--- /dev/null
+++ b/flang/test/HLFIR/order_assignments/vector-subscripts-codegen.fir
@@ -0,0 +1,171 @@
+// Test code generation of hlfir.region_assign where the left-hand
+// side terminator is an hlfir.elemental_addr (Fortran assignments to
+// vector subscripted designators).
+// RUN: fir-opt %s --lower-hlfir-ordered-assignments | FileCheck %s
+
+func.func @simple(%arg0: !fir.ref<!fir.array<100xf32>> , %arg1: !fir.ref<!fir.array<10xi64>> , %arg2: !fir.ref<!fir.array<10xf32>> ) {
+ %c10 = arith.constant 10 : index
+ %c100 = arith.constant 100 : index
+ %0 = fir.shape %c100 : (index) -> !fir.shape<1>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "_QFsimpleEx"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+ %2 = fir.shape %c10 : (index) -> !fir.shape<1>
+ %3:2 = hlfir.declare %arg1(%2) {uniq_name = "y"} : (!fir.ref<!fir.array<10xi64>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi64>>, !fir.ref<!fir.array<10xi64>>)
+ %4:2 = hlfir.declare %arg2(%2) {uniq_name = "z"} : (!fir.ref<!fir.array<10xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>)
+ hlfir.region_assign {
+ hlfir.yield %4#0 : !fir.ref<!fir.array<10xf32>>
+ } to {
+ hlfir.elemental_addr %2 : !fir.shape<1> {
+ ^bb0(%arg3: index):
+ %5 = hlfir.designate %3#0 (%arg3) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+ %6 = fir.load %5 : !fir.ref<i64>
+ %7 = hlfir.designate %1#0 (%6) : (!fir.ref<!fir.array<100xf32>>, i64) -> !fir.ref<f32>
+ hlfir.yield %7 : !fir.ref<f32>
+ }
+ }
+ return
+}
+// CHECK-LABEL: func.func @simple(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100xf32>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.array<10xi64>>,
+// CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<!fir.array<10xf32>>) {
+// CHECK: %[[VAL_3:.*]] = arith.constant 10 : index
+// CHECK: %[[VAL_4:.*]] = arith.constant 100 : index
+// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "_QFsimpleEx"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+// CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_1]](%[[VAL_7]]) {uniq_name = "y"} : (!fir.ref<!fir.array<10xi64>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi64>>, !fir.ref<!fir.array<10xi64>>)
+// CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_2]](%[[VAL_7]]) {uniq_name = "z"} : (!fir.ref<!fir.array<10xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>)
+// CHECK: %[[VAL_10:.*]] = arith.constant 1 : index
+// CHECK: fir.do_loop %[[VAL_11:.*]] = %[[VAL_10]] to %[[VAL_3]] step %[[VAL_10]] {
+// CHECK: %[[VAL_12:.*]] = hlfir.designate %[[VAL_8]]#0 (%[[VAL_11]]) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_12]] : !fir.ref<i64>
+// CHECK: %[[VAL_14:.*]] = hlfir.designate %[[VAL_6]]#0 (%[[VAL_13]]) : (!fir.ref<!fir.array<100xf32>>, i64) -> !fir.ref<f32>
+// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_9]]#0 (%[[VAL_11]]) : (!fir.ref<!fir.array<10xf32>>, index) -> !fir.ref<f32>
+// CHECK: hlfir.assign %[[VAL_15]] to %[[VAL_14]] : !fir.ref<f32>, !fir.ref<f32>
+// CHECK: }
+
+func.func @forall_vector_lhs(%arg0: !fir.ref<!fir.array<100x20xf32>> , %arg1: !fir.ref<!fir.array<10xi64>> ) {
+ %c20_i32 = arith.constant 20 : i32
+ %c1_i32 = arith.constant 1 : i32
+ %c10 = arith.constant 10 : index
+ %c100 = arith.constant 100 : index
+ %c20 = arith.constant 20 : index
+ %0 = fir.shape %c100, %c20 : (index, index) -> !fir.shape<2>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "x"} : (!fir.ref<!fir.array<100x20xf32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<100x20xf32>>, !fir.ref<!fir.array<100x20xf32>>)
+ %2 = fir.shape %c10 : (index) -> !fir.shape<1>
+ %3:2 = hlfir.declare %arg1(%2) {uniq_name = "y"} : (!fir.ref<!fir.array<10xi64>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi64>>, !fir.ref<!fir.array<10xi64>>)
+ hlfir.forall lb {
+ hlfir.yield %c1_i32 : i32
+ } ub {
+ hlfir.yield %c20_i32 : i32
+ } (%arg2: i32) {
+ hlfir.region_assign {
+ %4 = hlfir.elemental %2 : (!fir.shape<1>) -> !hlfir.expr<10xf32> {
+ ^bb0(%arg3: index):
+ %5 = hlfir.designate %3#0 (%arg3) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+ %6 = fir.load %5 : !fir.ref<i64>
+ %7 = fir.convert %6 : (i64) -> f32
+ hlfir.yield_element %7 : f32
+ }
+ hlfir.yield %4 : !hlfir.expr<10xf32> cleanup {
+ hlfir.destroy %4 : !hlfir.expr<10xf32>
+ }
+ } to {
+ %4 = fir.convert %arg2 : (i32) -> i64
+ hlfir.elemental_addr %2 : !fir.shape<1> {
+ ^bb0(%arg3: index):
+ %5 = hlfir.designate %3#0 (%arg3) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+ %6 = fir.load %5 : !fir.ref<i64>
+ %7 = hlfir.designate %1#0 (%6, %4) : (!fir.ref<!fir.array<100x20xf32>>, i64, i64) -> !fir.ref<f32>
+ hlfir.yield %7 : !fir.ref<f32>
+ }
+ }
+ }
+ return
+}
+// CHECK-LABEL: func.func @forall_vector_lhs(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<100x20xf32>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.array<10xi64>>) {
+// CHECK: %[[VAL_2:.*]] = arith.constant 20 : i32
+// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
+// CHECK: %[[VAL_4:.*]] = arith.constant 10 : index
+// CHECK: %[[VAL_5:.*]] = arith.constant 100 : index
+// CHECK: %[[VAL_6:.*]] = arith.constant 20 : index
+// CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_5]], %[[VAL_6]] : (index, index) -> !fir.shape<2>
+// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_7]]) {uniq_name = "x"} : (!fir.ref<!fir.array<100x20xf32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<100x20xf32>>, !fir.ref<!fir.array<100x20xf32>>)
+// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_1]](%[[VAL_9]]) {uniq_name = "y"} : (!fir.ref<!fir.array<10xi64>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi64>>, !fir.ref<!fir.array<10xi64>>)
+// CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_3]] : (i32) -> index
+// CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_2]] : (i32) -> index
+// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index
+// CHECK: fir.do_loop %[[VAL_14:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_13]] {
+// CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (index) -> i32
+// CHECK: %[[VAL_16:.*]] = hlfir.elemental %[[VAL_9]] : (!fir.shape<1>) -> !hlfir.expr<10xf32> {
+// CHECK: ^bb0(%[[VAL_17:.*]]: index):
+// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_17]]) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_18]] : !fir.ref<i64>
+// CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_19]] : (i64) -> f32
+// CHECK: hlfir.yield_element %[[VAL_20]] : f32
+// CHECK: }
+// CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_15]] : (i32) -> i64
+// CHECK: %[[VAL_22:.*]] = arith.constant 1 : index
+// CHECK: fir.do_loop %[[VAL_23:.*]] = %[[VAL_22]] to %[[VAL_4]] step %[[VAL_22]] {
+// CHECK: %[[VAL_24:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_23]]) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+// CHECK: %[[VAL_25:.*]] = fir.load %[[VAL_24]] : !fir.ref<i64>
+// CHECK: %[[VAL_26:.*]] = hlfir.designate %[[VAL_8]]#0 (%[[VAL_25]], %[[VAL_21]]) : (!fir.ref<!fir.array<100x20xf32>>, i64, i64) -> !fir.ref<f32>
+// CHECK: %[[VAL_27:.*]] = hlfir.apply %[[VAL_28:.*]], %[[VAL_23]] : (!hlfir.expr<10xf32>, index) -> f32
+// CHECK: hlfir.assign %[[VAL_27]] to %[[VAL_26]] : f32, !fir.ref<f32>
+// CHECK: }
+// CHECK: hlfir.destroy %[[VAL_16]] : !hlfir.expr<10xf32>
+// CHECK: }
+
+func.func @where_vector_subscripts(%arg0: !fir.ref<!fir.array<10x!fir.logical<4>>> , %arg1: !fir.ref<!fir.array<100xf32>> , %arg2: !fir.ref<!fir.array<10xi64>> ) {
+ %cst = arith.constant 0.000000e+00 : f32
+ %c100 = arith.constant 100 : index
+ %c10 = arith.constant 10 : index
+ %0 = fir.shape %c10 : (index) -> !fir.shape<1>
+ %1:2 = hlfir.declare %arg0(%0) {uniq_name = "l"} : (!fir.ref<!fir.array<10x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10x!fir.logical<4>>>, !fir.ref<!fir.array<10x!fir.logical<4>>>)
+ %2 = fir.shape %c100 : (index) -> !fir.shape<1>
+ %3:2 = hlfir.declare %arg1(%2) {uniq_name = "x"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+ %4:2 = hlfir.declare %arg2(%0) {uniq_name = "y"} : (!fir.ref<!fir.array<10xi64>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi64>>, !fir.ref<!fir.array<10xi64>>)
+ hlfir.where {
+ hlfir.yield %1#0 : !fir.ref<!fir.array<10x!fir.logical<4>>>
+ } do {
+ hlfir.region_assign {
+ hlfir.yield %cst : f32
+ } to {
+ hlfir.elemental_addr %0 : !fir.shape<1> {
+ ^bb0(%arg3: index):
+ %5 = hlfir.designate %4#0 (%arg3) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+ %6 = fir.load %5 : !fir.ref<i64>
+ %7 = hlfir.designate %3#0 (%6) : (!fir.ref<!fir.array<100xf32>>, i64) -> !fir.ref<f32>
+ hlfir.yield %7 : !fir.ref<f32>
+ }
+ }
+ }
+ return
+}
+// CHECK-LABEL: func.func @where_vector_subscripts(
+// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<10x!fir.logical<4>>>,
+// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.array<100xf32>>,
+// CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<!fir.array<10xi64>>) {
+// CHECK: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[VAL_4:.*]] = arith.constant 100 : index
+// CHECK: %[[VAL_5:.*]] = arith.constant 10 : index
+// CHECK: %[[VAL_6:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_6]]) {uniq_name = "l"} : (!fir.ref<!fir.array<10x!fir.logical<4>>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10x!fir.logical<4>>>, !fir.ref<!fir.array<10x!fir.logical<4>>>)
+// CHECK: %[[VAL_8:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+// CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_1]](%[[VAL_8]]) {uniq_name = "x"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
+// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_2]](%[[VAL_6]]) {uniq_name = "y"} : (!fir.ref<!fir.array<10xi64>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi64>>, !fir.ref<!fir.array<10xi64>>)
+// CHECK: %[[VAL_11:.*]] = arith.constant 1 : index
+// CHECK: fir.do_loop %[[VAL_12:.*]] = %[[VAL_11]] to %[[VAL_5]] step %[[VAL_11]] {
+// CHECK: %[[VAL_13:.*]] = hlfir.designate %[[VAL_7]]#0 (%[[VAL_12]]) : (!fir.ref<!fir.array<10x!fir.logical<4>>>, index) -> !fir.ref<!fir.logical<4>>
+// CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_13]] : !fir.ref<!fir.logical<4>>
+// CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (!fir.logical<4>) -> i1
+// CHECK: fir.if %[[VAL_15]] {
+// CHECK: %[[VAL_16:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_12]]) : (!fir.ref<!fir.array<10xi64>>, index) -> !fir.ref<i64>
+// CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_16]] : !fir.ref<i64>
+// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_9]]#0 (%[[VAL_17]]) : (!fir.ref<!fir.array<100xf32>>, i64) -> !fir.ref<f32>
+// CHECK: hlfir.assign %[[VAL_3]] to %[[VAL_18]] : f32, !fir.ref<f32>
+// CHECK: }
+// CHECK: }
More information about the flang-commits
mailing list