[flang-commits] [flang] 859d4a1 - [flang] Lower more cases of assignments on allocatable variables
Valentin Clement via flang-commits
flang-commits at lists.llvm.org
Wed Mar 2 11:05:32 PST 2022
Author: Valentin Clement
Date: 2022-03-02T20:05:23+01:00
New Revision: 859d4a18b5937a064ba3b7acfea0bea6cac2f074
URL: https://github.com/llvm/llvm-project/commit/859d4a18b5937a064ba3b7acfea0bea6cac2f074
DIFF: https://github.com/llvm/llvm-project/commit/859d4a18b5937a064ba3b7acfea0bea6cac2f074.diff
LOG: [flang] Lower more cases of assignments on allocatable variables
This patch enables the lowering of various allocatable assignements
for character type and numeric types.
This patch is part of the upstreaming effort from fir-dev branch.
Depends on D120819
Reviewed By: PeteSteinfeld, schweitz
Differential Revision: https://reviews.llvm.org/D120820
Co-authored-by: Eric Schweitz <eschweitz at nvidia.com>
Co-authored-by: Jean Perier <jperier at nvidia.com>
Added:
Modified:
flang/include/flang/Lower/ConvertExpr.h
flang/include/flang/Lower/IntrinsicCall.h
flang/include/flang/Optimizer/Builder/FIRBuilder.h
flang/include/flang/Optimizer/Builder/Factory.h
flang/lib/Lower/ConvertExpr.cpp
flang/lib/Lower/ConvertVariable.cpp
flang/lib/Lower/IntrinsicCall.cpp
flang/lib/Optimizer/Builder/FIRBuilder.cpp
flang/test/Lower/allocatable-assignment.f90
Removed:
################################################################################
diff --git a/flang/include/flang/Lower/ConvertExpr.h b/flang/include/flang/Lower/ConvertExpr.h
index cb4a86945c726..f4bdeaa54ef69 100644
--- a/flang/include/flang/Lower/ConvertExpr.h
+++ b/flang/include/flang/Lower/ConvertExpr.h
@@ -149,6 +149,10 @@ inline mlir::NamedAttribute getAdaptToByRefAttr(fir::FirOpBuilder &builder) {
builder.getUnitAttr()};
}
+/// Generate max(\p value, 0) where \p value is a scalar integer.
+mlir::Value genMaxWithZero(fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Value value);
+
} // namespace Fortran::lower
#endif // FORTRAN_LOWER_CONVERTEXPR_H
diff --git a/flang/include/flang/Lower/IntrinsicCall.h b/flang/include/flang/Lower/IntrinsicCall.h
index a85bd419a14e5..78f0fe4a486d6 100644
--- a/flang/include/flang/Lower/IntrinsicCall.h
+++ b/flang/include/flang/Lower/IntrinsicCall.h
@@ -84,6 +84,11 @@ fir::ExtendedValue getAbsentIntrinsicArgument();
// of intrinsic call lowering.
//===----------------------------------------------------------------------===//
+/// Generate maximum. There must be at least one argument and all arguments
+/// must have the same type.
+mlir::Value genMax(fir::FirOpBuilder &, mlir::Location,
+ llvm::ArrayRef<mlir::Value> args);
+
/// Generate power function x**y with the given expected
/// result type.
mlir::Value genPow(fir::FirOpBuilder &, mlir::Location, mlir::Type resultType,
diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
index 2d0abeafaaa0a..20d657d9135d2 100644
--- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h
+++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h
@@ -488,6 +488,32 @@ fir::ExtendedValue arraySectionElementToExtendedValue(
fir::FirOpBuilder &builder, mlir::Location loc,
const fir::ExtendedValue &array, mlir::Value element, mlir::Value slice);
+/// Assign \p rhs to \p lhs. Both \p rhs and \p lhs must be scalars. The
+/// assignment follows Fortran intrinsic assignment semantic (10.2.1.3).
+void genScalarAssignment(fir::FirOpBuilder &builder, mlir::Location loc,
+ const fir::ExtendedValue &lhs,
+ const fir::ExtendedValue &rhs);
+/// Assign \p rhs to \p lhs. Both \p rhs and \p lhs must be scalar derived
+/// types. The assignment follows Fortran intrinsic assignment semantic for
+/// derived types (10.2.1.3 point 13).
+void genRecordAssignment(fir::FirOpBuilder &builder, mlir::Location loc,
+ const fir::ExtendedValue &lhs,
+ const fir::ExtendedValue &rhs);
+
+/// Generate the, possibly dynamic, LEN of a CHARACTER. \p arrLoad determines
+/// the base array. After applying \p path, the result must be a reference to a
+/// `!fir.char` type object. \p substring must have 0, 1, or 2 members. The
+/// first member is the starting offset. The second is the ending offset.
+mlir::Value genLenOfCharacter(fir::FirOpBuilder &builder, mlir::Location loc,
+ fir::ArrayLoadOp arrLoad,
+ llvm::ArrayRef<mlir::Value> path,
+ llvm::ArrayRef<mlir::Value> substring);
+mlir::Value genLenOfCharacter(fir::FirOpBuilder &builder, mlir::Location loc,
+ fir::SequenceType seqTy, mlir::Value memref,
+ llvm::ArrayRef<mlir::Value> typeParams,
+ llvm::ArrayRef<mlir::Value> path,
+ llvm::ArrayRef<mlir::Value> substring);
+
} // namespace fir::factory
#endif // FORTRAN_OPTIMIZER_BUILDER_FIRBUILDER_H
diff --git a/flang/include/flang/Optimizer/Builder/Factory.h b/flang/include/flang/Optimizer/Builder/Factory.h
index d0d5510d6d9a3..f8337d2a47e59 100644
--- a/flang/include/flang/Optimizer/Builder/Factory.h
+++ b/flang/include/flang/Optimizer/Builder/Factory.h
@@ -31,6 +31,117 @@ constexpr llvm::StringRef attrFortranArrayOffsets() {
return "Fortran.offsets";
}
+/// Generate a character copy with optimized forms.
+///
+/// If the lengths are constant and equal, use load/store rather than a loop.
+/// Otherwise, if the lengths are constant and the input is longer than the
+/// output, generate a loop to move a truncated portion of the source to the
+/// destination. Finally, if the lengths are runtime values or the destination
+/// is longer than the source, move the entire source character and pad the
+/// destination with spaces as needed.
+template <typename B>
+void genCharacterCopy(mlir::Value src, mlir::Value srcLen, mlir::Value dst,
+ mlir::Value dstLen, B &builder, mlir::Location loc) {
+ auto srcTy =
+ fir::dyn_cast_ptrEleTy(src.getType()).template cast<fir::CharacterType>();
+ auto dstTy =
+ fir::dyn_cast_ptrEleTy(dst.getType()).template cast<fir::CharacterType>();
+ if (!srcLen && !dstLen && srcTy.getFKind() == dstTy.getFKind() &&
+ srcTy.getLen() == dstTy.getLen()) {
+ // same size, so just use load and store
+ auto load = builder.template create<fir::LoadOp>(loc, src);
+ builder.template create<fir::StoreOp>(loc, load, dst);
+ return;
+ }
+ auto zero = builder.template create<mlir::arith::ConstantIndexOp>(loc, 0);
+ auto one = builder.template create<mlir::arith::ConstantIndexOp>(loc, 1);
+ auto toArrayTy = [&](fir::CharacterType ty) {
+ return fir::ReferenceType::get(fir::SequenceType::get(
+ fir::SequenceType::ShapeRef{fir::SequenceType::getUnknownExtent()},
+ fir::CharacterType::getSingleton(ty.getContext(), ty.getFKind())));
+ };
+ auto toEleTy = [&](fir::ReferenceType ty) {
+ auto seqTy = ty.getEleTy().cast<fir::SequenceType>();
+ return seqTy.getEleTy().cast<fir::CharacterType>();
+ };
+ auto toCoorTy = [&](fir::ReferenceType ty) {
+ return fir::ReferenceType::get(toEleTy(ty));
+ };
+ if (!srcLen && !dstLen && srcTy.getLen() >= dstTy.getLen()) {
+ auto upper = builder.template create<mlir::arith::ConstantIndexOp>(
+ loc, dstTy.getLen() - 1);
+ auto loop = builder.template create<fir::DoLoopOp>(loc, zero, upper, one);
+ auto insPt = builder.saveInsertionPoint();
+ builder.setInsertionPointToStart(loop.getBody());
+ auto csrcTy = toArrayTy(srcTy);
+ auto csrc = builder.template create<fir::ConvertOp>(loc, csrcTy, src);
+ auto in = builder.template create<fir::CoordinateOp>(
+ loc, toCoorTy(csrcTy), csrc, loop.getInductionVar());
+ auto load = builder.template create<fir::LoadOp>(loc, in);
+ auto cdstTy = toArrayTy(dstTy);
+ auto cdst = builder.template create<fir::ConvertOp>(loc, cdstTy, dst);
+ auto out = builder.template create<fir::CoordinateOp>(
+ loc, toCoorTy(cdstTy), cdst, loop.getInductionVar());
+ mlir::Value cast =
+ srcTy.getFKind() == dstTy.getFKind()
+ ? load.getResult()
+ : builder
+ .template create<fir::ConvertOp>(loc, toEleTy(cdstTy), load)
+ .getResult();
+ builder.template create<fir::StoreOp>(loc, cast, out);
+ builder.restoreInsertionPoint(insPt);
+ return;
+ }
+ auto minusOne = [&](mlir::Value v) -> mlir::Value {
+ return builder.template create<mlir::arith::SubIOp>(
+ loc, builder.template create<fir::ConvertOp>(loc, one.getType(), v),
+ one);
+ };
+ mlir::Value len = dstLen ? minusOne(dstLen)
+ : builder
+ .template create<mlir::arith::ConstantIndexOp>(
+ loc, dstTy.getLen() - 1)
+ .getResult();
+ auto loop = builder.template create<fir::DoLoopOp>(loc, zero, len, one);
+ auto insPt = builder.saveInsertionPoint();
+ builder.setInsertionPointToStart(loop.getBody());
+ mlir::Value slen =
+ srcLen
+ ? builder.template create<fir::ConvertOp>(loc, one.getType(), srcLen)
+ .getResult()
+ : builder
+ .template create<mlir::arith::ConstantIndexOp>(loc,
+ srcTy.getLen())
+ .getResult();
+ auto cond = builder.template create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::slt, loop.getInductionVar(), slen);
+ auto ifOp = builder.template create<fir::IfOp>(loc, cond, /*withElse=*/true);
+ builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
+ auto csrcTy = toArrayTy(srcTy);
+ auto csrc = builder.template create<fir::ConvertOp>(loc, csrcTy, src);
+ auto in = builder.template create<fir::CoordinateOp>(
+ loc, toCoorTy(csrcTy), csrc, loop.getInductionVar());
+ auto load = builder.template create<fir::LoadOp>(loc, in);
+ auto cdstTy = toArrayTy(dstTy);
+ auto cdst = builder.template create<fir::ConvertOp>(loc, cdstTy, dst);
+ auto out = builder.template create<fir::CoordinateOp>(
+ loc, toCoorTy(cdstTy), cdst, loop.getInductionVar());
+ mlir::Value cast =
+ srcTy.getFKind() == dstTy.getFKind()
+ ? load.getResult()
+ : builder.template create<fir::ConvertOp>(loc, toEleTy(cdstTy), load)
+ .getResult();
+ builder.template create<fir::StoreOp>(loc, cast, out);
+ builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
+ auto space = builder.template create<fir::StringLitOp>(
+ loc, toEleTy(cdstTy), llvm::ArrayRef<char>{' '});
+ auto cdst2 = builder.template create<fir::ConvertOp>(loc, cdstTy, dst);
+ auto out2 = builder.template create<fir::CoordinateOp>(
+ loc, toCoorTy(cdstTy), cdst2, loop.getInductionVar());
+ builder.template create<fir::StoreOp>(loc, space, out2);
+ builder.restoreInsertionPoint(insPt);
+}
+
/// Get extents from fir.shape/fir.shape_shift op. Empty result if
/// \p shapeVal is empty or is a fir.shift.
inline std::vector<mlir::Value> getExtents(mlir::Value shapeVal) {
diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp
index be32d99814d60..73889cbec6f4f 100644
--- a/flang/lib/Lower/ConvertExpr.cpp
+++ b/flang/lib/Lower/ConvertExpr.cpp
@@ -1753,6 +1753,59 @@ convertToArrayBoxValue(mlir::Location loc, fir::FirOpBuilder &builder,
return fir::ArrayBoxValue(val, extents);
}
+//===----------------------------------------------------------------------===//
+//
+// Lowering of scalar expressions in an explicit iteration space context.
+//
+//===----------------------------------------------------------------------===//
+
+// Shared code for creating a copy of a derived type element. This function is
+// called from a continuation.
+inline static fir::ArrayAmendOp
+createDerivedArrayAmend(mlir::Location loc, fir::ArrayLoadOp destLoad,
+ fir::FirOpBuilder &builder, fir::ArrayAccessOp destAcc,
+ const fir::ExtendedValue &elementExv, mlir::Type eleTy,
+ mlir::Value innerArg) {
+ if (destLoad.getTypeparams().empty()) {
+ fir::factory::genRecordAssignment(builder, loc, destAcc, elementExv);
+ } else {
+ auto boxTy = fir::BoxType::get(eleTy);
+ auto toBox = builder.create<fir::EmboxOp>(loc, boxTy, destAcc.getResult(),
+ mlir::Value{}, mlir::Value{},
+ destLoad.getTypeparams());
+ auto fromBox = builder.create<fir::EmboxOp>(
+ loc, boxTy, fir::getBase(elementExv), mlir::Value{}, mlir::Value{},
+ destLoad.getTypeparams());
+ fir::factory::genRecordAssignment(builder, loc, fir::BoxValue(toBox),
+ fir::BoxValue(fromBox));
+ }
+ return builder.create<fir::ArrayAmendOp>(loc, innerArg.getType(), innerArg,
+ destAcc);
+}
+
+inline static fir::ArrayAmendOp
+createCharArrayAmend(mlir::Location loc, fir::FirOpBuilder &builder,
+ fir::ArrayAccessOp dstOp, mlir::Value &dstLen,
+ const fir::ExtendedValue &srcExv, mlir::Value innerArg,
+ llvm::ArrayRef<mlir::Value> bounds) {
+ fir::CharBoxValue dstChar(dstOp, dstLen);
+ fir::factory::CharacterExprHelper helper{builder, loc};
+ if (!bounds.empty()) {
+ dstChar = helper.createSubstring(dstChar, bounds);
+ fir::factory::genCharacterCopy(fir::getBase(srcExv), fir::getLen(srcExv),
+ dstChar.getAddr(), dstChar.getLen(), builder,
+ loc);
+ // Update the LEN to the substring's LEN.
+ dstLen = dstChar.getLen();
+ }
+ // For a CHARACTER, we generate the element assignment loops inline.
+ helper.createAssign(fir::ExtendedValue{dstChar}, srcExv);
+ // Mark this array element as amended.
+ mlir::Type ty = innerArg.getType();
+ auto amend = builder.create<fir::ArrayAmendOp>(loc, ty, innerArg, dstOp);
+ return amend;
+}
+
//===----------------------------------------------------------------------===//
//
// Lowering of array expressions.
@@ -2435,8 +2488,37 @@ class ArrayExprLowering {
TODO(getLoc(), "genarr Component");
}
+ /// Array reference with subscripts. If this has rank > 0, this is a form
+ /// of an array section (slice).
+ ///
+ /// There are two "slicing" primitives that may be applied on a dimension by
+ /// dimension basis: (1) triple notation and (2) vector addressing. Since
+ /// dimensions can be selectively sliced, some dimensions may contain
+ /// regular scalar expressions and those dimensions do not participate in
+ /// the array expression evaluation.
CC genarr(const Fortran::evaluate::ArrayRef &x, ComponentPath &components) {
- TODO(getLoc(), "genar ArrayRef");
+ if (explicitSpaceIsActive()) {
+ TODO(getLoc(), "genarr ArrayRef explicitSpace");
+ } else {
+ if (Fortran::lower::isRankedArrayAccess(x)) {
+ components.reversePath.push_back(&x);
+ return genImplicitArrayAccess(x.base(), components);
+ }
+ }
+ bool atEnd = pathIsEmpty(components);
+ components.reversePath.push_back(&x);
+ auto result = genarr(x.base(), components);
+ if (components.applied)
+ return result;
+ mlir::Location loc = getLoc();
+ if (atEnd) {
+ if (x.Rank() == 0)
+ return genAsScalar(x);
+ fir::emitFatalError(loc, "expected scalar");
+ }
+ return [=](IterSpace) -> ExtValue {
+ fir::emitFatalError(loc, "reached arrayref with path");
+ };
}
CC genarr(const Fortran::evaluate::CoarrayRef &x, ComponentPath &components) {
@@ -2454,6 +2536,10 @@ class ArrayExprLowering {
x.u);
}
+ bool pathIsEmpty(const ComponentPath &components) {
+ return components.reversePath.empty();
+ }
+
CC genarr(const Fortran::evaluate::ComplexPart &x,
ComponentPath &components) {
TODO(getLoc(), "genarr ComplexPart");
@@ -2666,7 +2752,30 @@ class ArrayExprLowering {
mlir::Type arrTy = innerArg.getType();
mlir::Type eleTy = fir::applyPathToType(arrTy, iterSpace.iterVec());
if (isAdjustedArrayElementType(eleTy)) {
- TODO(loc, "isAdjustedArrayElementType");
+ // The elemental update is in the memref domain. Under this semantics,
+ // we must always copy the computed new element from its location in
+ // memory into the destination array.
+ mlir::Type resRefTy = builder.getRefType(eleTy);
+ // Get a reference to the array element to be amended.
+ auto arrayOp = builder.create<fir::ArrayAccessOp>(
+ loc, resRefTy, innerArg, iterSpace.iterVec(),
+ destination.getTypeparams());
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ llvm::SmallVector<mlir::Value> substringBounds;
+ populateBounds(substringBounds, substring);
+ mlir::Value dstLen = fir::factory::genLenOfCharacter(
+ builder, loc, destination, iterSpace.iterVec(), substringBounds);
+ fir::ArrayAmendOp amend = createCharArrayAmend(
+ loc, builder, arrayOp, dstLen, exv, innerArg, substringBounds);
+ return abstractArrayExtValue(amend, dstLen);
+ }
+ if (fir::isa_derived(eleTy)) {
+ fir::ArrayAmendOp amend = createDerivedArrayAmend(
+ loc, destination, builder, arrayOp, exv, eleTy, innerArg);
+ return abstractArrayExtValue(amend /*FIXME: typeparams?*/);
+ }
+ assert(eleTy.isa<fir::SequenceType>() && "must be an array");
+ TODO(loc, "array (as element) assignment");
}
// By value semantics. The element is being assigned by value.
mlir::Value ele = builder.createConvert(loc, eleTy, fir::getBase(exv));
@@ -2987,3 +3096,15 @@ void Fortran::lower::createAllocatableArrayAssignment(
ArrayExprLowering::lowerAllocatableArrayAssignment(
converter, symMap, stmtCtx, lhs, rhs, explicitSpace, implicitSpace);
}
+
+mlir::Value Fortran::lower::genMaxWithZero(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ mlir::Value value) {
+ mlir::Value zero = builder.createIntegerConstant(loc, value.getType(), 0);
+ if (mlir::Operation *definingOp = value.getDefiningOp())
+ if (auto cst = mlir::dyn_cast<mlir::arith::ConstantOp>(definingOp))
+ if (auto intAttr = cst.getValue().dyn_cast<mlir::IntegerAttr>())
+ return intAttr.getInt() < 0 ? zero : value;
+ return Fortran::lower::genMax(builder, loc,
+ llvm::SmallVector<mlir::Value>{value, zero});
+}
diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp
index 3341feae7c4fb..ba2d2e692fcf5 100644
--- a/flang/lib/Lower/ConvertVariable.cpp
+++ b/flang/lib/Lower/ConvertVariable.cpp
@@ -830,8 +830,17 @@ void Fortran::lower::mapSymbolAttributes(
}
};
- // For symbols reaching this point, all properties are constant and can be
- // read/computed already into ssa values.
+ // Lower length expression for non deferred and non dummy assumed length
+ // characters.
+ auto genExplicitCharLen =
+ [&](llvm::Optional<Fortran::lower::SomeExpr> charLen) -> mlir::Value {
+ if (!charLen)
+ fir::emitFatalError(loc, "expected explicit character length");
+ mlir::Value rawLen = genValue(*charLen);
+ // If the length expression is negative, the length is zero. See
+ // F2018 7.4.4.2 point 5.
+ return genMaxWithZero(builder, loc, rawLen);
+ };
ba.match(
//===--------------------------------------------------------------===//
@@ -976,13 +985,126 @@ void Fortran::lower::mapSymbolAttributes(
//===--------------------------------------------------------------===//
[&](const Fortran::lower::details::StaticArrayStaticChar &x) {
- TODO(loc, "StaticArrayStaticChar variable lowering");
+ // if element type is a CHARACTER, determine the LEN value
+ auto charLen = x.charLen();
+ mlir::Value addr;
+ mlir::Value len;
+ if (isDummy) {
+ Fortran::lower::SymbolBox symBox = symMap.lookupSymbol(sym);
+ std::pair<mlir::Value, mlir::Value> unboxchar =
+ charHelp.createUnboxChar(symBox.getAddr());
+ addr = unboxchar.first;
+ // Set/override LEN with a constant
+ len = builder.createIntegerConstant(loc, idxTy, charLen);
+ } else {
+ // local CHARACTER variable
+ len = builder.createIntegerConstant(loc, idxTy, charLen);
+ }
+
+ // object shape is constant
+ mlir::Type castTy = builder.getRefType(converter.genType(var));
+ if (addr)
+ addr = builder.createConvert(loc, castTy, addr);
+
+ if (x.lboundAllOnes()) {
+ // if lower bounds are all ones, build simple shaped object
+ llvm::SmallVector<mlir::Value> shape;
+ for (int64_t i : x.shapes)
+ shape.push_back(genExtentValue(builder, loc, idxTy, i));
+ mlir::Value local =
+ isDummy ? addr : createNewLocal(converter, loc, var, preAlloc);
+ symMap.addCharSymbolWithShape(sym, local, len, shape, isDummy);
+ return;
+ }
+
+ // if object is an array process the lower bound and extent values
+ llvm::SmallVector<mlir::Value> extents;
+ llvm::SmallVector<mlir::Value> lbounds;
+ // construct constants and populate `bounds`
+ for (auto [fst, snd] : llvm::zip(x.lbounds, x.shapes)) {
+ lbounds.emplace_back(builder.createIntegerConstant(loc, idxTy, fst));
+ extents.emplace_back(genExtentValue(builder, loc, idxTy, snd));
+ }
+
+ if (isDummy) {
+ symMap.addCharSymbolWithBounds(sym, addr, len, extents, lbounds,
+ true);
+ return;
+ }
+ // local CHARACTER array with computed bounds
+ assert(Fortran::lower::isExplicitShape(sym));
+ mlir::Value local =
+ createNewLocal(converter, loc, var, preAlloc, extents);
+ symMap.addCharSymbolWithBounds(sym, local, len, extents, lbounds);
},
//===--------------------------------------------------------------===//
[&](const Fortran::lower::details::StaticArrayDynamicChar &x) {
- TODO(loc, "StaticArrayDynamicChar variable lowering");
+ mlir::Value addr;
+ mlir::Value len;
+ [[maybe_unused]] bool mustBeDummy = false;
+ auto charLen = x.charLen();
+ // if element type is a CHARACTER, determine the LEN value
+ if (isDummy) {
+ Fortran::lower::SymbolBox symBox = symMap.lookupSymbol(sym);
+ std::pair<mlir::Value, mlir::Value> unboxchar =
+ charHelp.createUnboxChar(symBox.getAddr());
+ addr = unboxchar.first;
+ if (charLen) {
+ // Set/override LEN with an expression
+ len = genExplicitCharLen(charLen);
+ } else {
+ // LEN is from the boxchar
+ len = unboxchar.second;
+ mustBeDummy = true;
+ }
+ } else {
+ // local CHARACTER variable
+ len = genExplicitCharLen(charLen);
+ }
+ llvm::SmallVector<mlir::Value> lengths = {len};
+
+ // cast to the known constant parts from the declaration
+ mlir::Type castTy = builder.getRefType(converter.genType(var));
+ if (addr)
+ addr = builder.createConvert(loc, castTy, addr);
+
+ if (x.lboundAllOnes()) {
+ // if lower bounds are all ones, build simple shaped object
+ llvm::SmallVector<mlir::Value> shape;
+ for (int64_t i : x.shapes)
+ shape.push_back(genExtentValue(builder, loc, idxTy, i));
+ if (isDummy) {
+ symMap.addCharSymbolWithShape(sym, addr, len, shape, true);
+ return;
+ }
+ // local CHARACTER array with constant size
+ mlir::Value local = createNewLocal(converter, loc, var, preAlloc,
+ llvm::None, lengths);
+ symMap.addCharSymbolWithShape(sym, local, len, shape);
+ return;
+ }
+
+ // if object is an array process the lower bound and extent values
+ llvm::SmallVector<mlir::Value> extents;
+ llvm::SmallVector<mlir::Value> lbounds;
+
+ // construct constants and populate `bounds`
+ for (auto [fst, snd] : llvm::zip(x.lbounds, x.shapes)) {
+ lbounds.emplace_back(builder.createIntegerConstant(loc, idxTy, fst));
+ extents.emplace_back(genExtentValue(builder, loc, idxTy, snd));
+ }
+ if (isDummy) {
+ symMap.addCharSymbolWithBounds(sym, addr, len, extents, lbounds,
+ true);
+ return;
+ }
+ // local CHARACTER array with computed bounds
+ assert((!mustBeDummy) && (Fortran::lower::isExplicitShape(sym)));
+ mlir::Value local =
+ createNewLocal(converter, loc, var, preAlloc, llvm::None, lengths);
+ symMap.addCharSymbolWithBounds(sym, local, len, extents, lbounds);
},
//===--------------------------------------------------------------===//
diff --git a/flang/lib/Lower/IntrinsicCall.cpp b/flang/lib/Lower/IntrinsicCall.cpp
index 09834ee42ee6a..5fe0a1149b6ab 100644
--- a/flang/lib/Lower/IntrinsicCall.cpp
+++ b/flang/lib/Lower/IntrinsicCall.cpp
@@ -15,10 +15,12 @@
#include "flang/Lower/IntrinsicCall.h"
#include "flang/Common/static-multimap-view.h"
+#include "flang/Lower/StatementContext.h"
#include "flang/Lower/SymbolMap.h"
#include "flang/Lower/Todo.h"
#include "flang/Optimizer/Builder/Complex.h"
#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/MutableBox.h"
#include "flang/Optimizer/Builder/Runtime/RTBuilder.h"
#include "flang/Optimizer/Support/FatalError.h"
#include "llvm/Support/CommandLine.h"
@@ -28,6 +30,49 @@
#define PGMATH_DECLARE
#include "flang/Evaluate/pgmath.h.inc"
+/// Enums used to templatize and share lowering of MIN and MAX.
+enum class Extremum { Min, Max };
+
+// There are
diff erent ways to deal with NaNs in MIN and MAX.
+// Known existing behaviors are listed below and can be selected for
+// f18 MIN/MAX implementation.
+enum class ExtremumBehavior {
+ // Note: the Signaling/quiet aspect of NaNs in the behaviors below are
+ // not described because there is no way to control/observe such aspect in
+ // MLIR/LLVM yet. The IEEE behaviors come with requirements regarding this
+ // aspect that are therefore currently not enforced. In the descriptions
+ // below, NaNs can be signaling or quite. Returned NaNs may be signaling
+ // if one of the input NaN was signaling but it cannot be guaranteed either.
+ // Existing compilers using an IEEE behavior (gfortran) also do not fulfill
+ // signaling/quiet requirements.
+ IeeeMinMaximumNumber,
+ // IEEE minimumNumber/maximumNumber behavior (754-2019, section 9.6):
+ // If one of the argument is and number and the other is NaN, return the
+ // number. If both arguements are NaN, return NaN.
+ // Compilers: gfortran.
+ IeeeMinMaximum,
+ // IEEE minimum/maximum behavior (754-2019, section 9.6):
+ // If one of the argument is NaN, return NaN.
+ MinMaxss,
+ // x86 minss/maxss behavior:
+ // If the second argument is a number and the other is NaN, return the number.
+ // In all other cases where at least one operand is NaN, return NaN.
+ // Compilers: xlf (only for MAX), ifort, pgfortran -nollvm, and nagfor.
+ PgfortranLlvm,
+ // "Opposite of" x86 minss/maxss behavior:
+ // If the first argument is a number and the other is NaN, return the
+ // number.
+ // In all other cases where at least one operand is NaN, return NaN.
+ // Compilers: xlf (only for MIN), and pgfortran (with llvm).
+ IeeeMinMaxNum
+ // IEEE minNum/maxNum behavior (754-2008, section 5.3.1):
+ // TODO: Not implemented.
+ // It is the only behavior where the signaling/quiet aspect of a NaN argument
+ // impacts if the result should be NaN or the argument that is a number.
+ // LLVM/MLIR do not provide ways to observe this aspect, so it is not
+ // possible to implement it without some target dependent runtime.
+};
+
/// This file implements lowering of Fortran intrinsic procedures.
/// Intrinsics are lowered to a mix of FIR and MLIR operations as
/// well as call to runtime functions or LLVM intrinsics.
@@ -81,6 +126,8 @@ struct IntrinsicLibrary {
/// if the argument is an integer, into llvm intrinsics if the argument is
/// real and to the `hypot` math routine if the argument is of complex type.
mlir::Value genAbs(mlir::Type, llvm::ArrayRef<mlir::Value>);
+ template <Extremum, ExtremumBehavior>
+ mlir::Value genExtremum(mlir::Type, llvm::ArrayRef<mlir::Value>);
/// Lowering for the IAND intrinsic. The IAND intrinsic expects two arguments
/// in the llvm::ArrayRef.
mlir::Value genIand(mlir::Type, llvm::ArrayRef<mlir::Value>);
@@ -600,6 +647,81 @@ mlir::Value IntrinsicLibrary::genIand(mlir::Type resultType,
return builder.create<mlir::arith::AndIOp>(loc, args[0], args[1]);
}
+// Compare two FIR values and return boolean result as i1.
+template <Extremum extremum, ExtremumBehavior behavior>
+static mlir::Value createExtremumCompare(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ mlir::Value left, mlir::Value right) {
+ static constexpr mlir::arith::CmpIPredicate integerPredicate =
+ extremum == Extremum::Max ? mlir::arith::CmpIPredicate::sgt
+ : mlir::arith::CmpIPredicate::slt;
+ static constexpr mlir::arith::CmpFPredicate orderedCmp =
+ extremum == Extremum::Max ? mlir::arith::CmpFPredicate::OGT
+ : mlir::arith::CmpFPredicate::OLT;
+ mlir::Type type = left.getType();
+ mlir::Value result;
+ if (fir::isa_real(type)) {
+ // Note: the signaling/quit aspect of the result required by IEEE
+ // cannot currently be obtained with LLVM without ad-hoc runtime.
+ if constexpr (behavior == ExtremumBehavior::IeeeMinMaximumNumber) {
+ // Return the number if one of the inputs is NaN and the other is
+ // a number.
+ auto leftIsResult =
+ builder.create<mlir::arith::CmpFOp>(loc, orderedCmp, left, right);
+ auto rightIsNan = builder.create<mlir::arith::CmpFOp>(
+ loc, mlir::arith::CmpFPredicate::UNE, right, right);
+ result =
+ builder.create<mlir::arith::OrIOp>(loc, leftIsResult, rightIsNan);
+ } else if constexpr (behavior == ExtremumBehavior::IeeeMinMaximum) {
+ // Always return NaNs if one the input is NaNs
+ auto leftIsResult =
+ builder.create<mlir::arith::CmpFOp>(loc, orderedCmp, left, right);
+ auto leftIsNan = builder.create<mlir::arith::CmpFOp>(
+ loc, mlir::arith::CmpFPredicate::UNE, left, left);
+ result = builder.create<mlir::arith::OrIOp>(loc, leftIsResult, leftIsNan);
+ } else if constexpr (behavior == ExtremumBehavior::MinMaxss) {
+ // If the left is a NaN, return the right whatever it is.
+ result =
+ builder.create<mlir::arith::CmpFOp>(loc, orderedCmp, left, right);
+ } else if constexpr (behavior == ExtremumBehavior::PgfortranLlvm) {
+ // If one of the operand is a NaN, return left whatever it is.
+ static constexpr auto unorderedCmp =
+ extremum == Extremum::Max ? mlir::arith::CmpFPredicate::UGT
+ : mlir::arith::CmpFPredicate::ULT;
+ result =
+ builder.create<mlir::arith::CmpFOp>(loc, unorderedCmp, left, right);
+ } else {
+ // TODO: ieeeMinNum/ieeeMaxNum
+ static_assert(behavior == ExtremumBehavior::IeeeMinMaxNum,
+ "ieeeMinNum/ieeeMaxNum behavior not implemented");
+ }
+ } else if (fir::isa_integer(type)) {
+ result =
+ builder.create<mlir::arith::CmpIOp>(loc, integerPredicate, left, right);
+ } else if (fir::isa_char(type)) {
+ // TODO: ! character min and max is tricky because the result
+ // length is the length of the longest argument!
+ // So we may need a temp.
+ TODO(loc, "CHARACTER min and max");
+ }
+ assert(result && "result must be defined");
+ return result;
+}
+
+// MIN and MAX
+template <Extremum extremum, ExtremumBehavior behavior>
+mlir::Value IntrinsicLibrary::genExtremum(mlir::Type,
+ llvm::ArrayRef<mlir::Value> args) {
+ assert(args.size() >= 1);
+ mlir::Value result = args[0];
+ for (auto arg : args.drop_front()) {
+ mlir::Value mask =
+ createExtremumCompare<extremum, behavior>(loc, builder, result, arg);
+ result = builder.create<mlir::arith::SelectOp>(loc, mask, result, arg);
+ }
+ return result;
+}
+
//===----------------------------------------------------------------------===//
// Argument lowering rules interface
//===----------------------------------------------------------------------===//
@@ -639,6 +761,15 @@ Fortran::lower::genIntrinsicCall(fir::FirOpBuilder &builder, mlir::Location loc,
args);
}
+mlir::Value Fortran::lower::genMax(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ llvm::ArrayRef<mlir::Value> args) {
+ assert(args.size() > 0 && "max requires at least one argument");
+ return IntrinsicLibrary{builder, loc}
+ .genExtremum<Extremum::Max, ExtremumBehavior::MinMaxss>(args[0].getType(),
+ args);
+}
+
mlir::Value Fortran::lower::genPow(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Type type,
mlir::Value x, mlir::Value y) {
diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
index b1271e583e3e3..87f9c42f9a304 100644
--- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp
+++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp
@@ -866,3 +866,210 @@ mlir::Value fir::factory::createZeroValue(fir::FirOpBuilder &builder,
fir::emitFatalError(loc, "internal: trying to generate zero value of non "
"numeric or logical type");
}
+
+void fir::factory::genScalarAssignment(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ const fir::ExtendedValue &lhs,
+ const fir::ExtendedValue &rhs) {
+ assert(lhs.rank() == 0 && rhs.rank() == 0 && "must be scalars");
+ auto type = fir::unwrapSequenceType(
+ fir::unwrapPassByRefType(fir::getBase(lhs).getType()));
+ if (type.isa<fir::CharacterType>()) {
+ const fir::CharBoxValue *toChar = lhs.getCharBox();
+ const fir::CharBoxValue *fromChar = rhs.getCharBox();
+ assert(toChar && fromChar);
+ fir::factory::CharacterExprHelper helper{builder, loc};
+ helper.createAssign(fir::ExtendedValue{*toChar},
+ fir::ExtendedValue{*fromChar});
+ } else if (type.isa<fir::RecordType>()) {
+ fir::factory::genRecordAssignment(builder, loc, lhs, rhs);
+ } else {
+ assert(!fir::hasDynamicSize(type));
+ auto rhsVal = fir::getBase(rhs);
+ if (fir::isa_ref_type(rhsVal.getType()))
+ rhsVal = builder.create<fir::LoadOp>(loc, rhsVal);
+ mlir::Value lhsAddr = fir::getBase(lhs);
+ rhsVal = builder.createConvert(loc, fir::unwrapRefType(lhsAddr.getType()),
+ rhsVal);
+ builder.create<fir::StoreOp>(loc, rhsVal, lhsAddr);
+ }
+}
+
+static void genComponentByComponentAssignment(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ const fir::ExtendedValue &lhs,
+ const fir::ExtendedValue &rhs) {
+ auto baseType = fir::unwrapPassByRefType(fir::getBase(lhs).getType());
+ auto lhsType = baseType.dyn_cast<fir::RecordType>();
+ assert(lhsType && "lhs must be a scalar record type");
+ auto fieldIndexType = fir::FieldType::get(lhsType.getContext());
+ for (auto [fieldName, fieldType] : lhsType.getTypeList()) {
+ assert(!fir::hasDynamicSize(fieldType));
+ mlir::Value field = builder.create<fir::FieldIndexOp>(
+ loc, fieldIndexType, fieldName, lhsType, fir::getTypeParams(lhs));
+ auto fieldRefType = builder.getRefType(fieldType);
+ mlir::Value fromCoor = builder.create<fir::CoordinateOp>(
+ loc, fieldRefType, fir::getBase(rhs), field);
+ mlir::Value toCoor = builder.create<fir::CoordinateOp>(
+ loc, fieldRefType, fir::getBase(lhs), field);
+ llvm::Optional<fir::DoLoopOp> outerLoop;
+ if (auto sequenceType = fieldType.dyn_cast<fir::SequenceType>()) {
+ // Create loops to assign array components elements by elements.
+ // Note that, since these are components, they either do not overlap,
+ // or are the same and exactly overlap. They also have compile time
+ // constant shapes.
+ mlir::Type idxTy = builder.getIndexType();
+ llvm::SmallVector<mlir::Value> indices;
+ mlir::Value zero = builder.createIntegerConstant(loc, idxTy, 0);
+ mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
+ for (auto extent : llvm::reverse(sequenceType.getShape())) {
+ // TODO: add zero size test !
+ mlir::Value ub = builder.createIntegerConstant(loc, idxTy, extent - 1);
+ auto loop = builder.create<fir::DoLoopOp>(loc, zero, ub, one);
+ if (!outerLoop)
+ outerLoop = loop;
+ indices.push_back(loop.getInductionVar());
+ builder.setInsertionPointToStart(loop.getBody());
+ }
+ // Set indices in column-major order.
+ std::reverse(indices.begin(), indices.end());
+ auto elementRefType = builder.getRefType(sequenceType.getEleTy());
+ toCoor = builder.create<fir::CoordinateOp>(loc, elementRefType, toCoor,
+ indices);
+ fromCoor = builder.create<fir::CoordinateOp>(loc, elementRefType,
+ fromCoor, indices);
+ }
+ auto fieldElementType = fir::unwrapSequenceType(fieldType);
+ if (fieldElementType.isa<fir::BoxType>()) {
+ assert(fieldElementType.cast<fir::BoxType>()
+ .getEleTy()
+ .isa<fir::PointerType>() &&
+ "allocatable require deep copy");
+ auto fromPointerValue = builder.create<fir::LoadOp>(loc, fromCoor);
+ builder.create<fir::StoreOp>(loc, fromPointerValue, toCoor);
+ } else {
+ auto from =
+ fir::factory::componentToExtendedValue(builder, loc, fromCoor);
+ auto to = fir::factory::componentToExtendedValue(builder, loc, toCoor);
+ fir::factory::genScalarAssignment(builder, loc, to, from);
+ }
+ if (outerLoop)
+ builder.setInsertionPointAfter(*outerLoop);
+ }
+}
+
+/// Can the assignment of this record type be implement with a simple memory
+/// copy (it requires no deep copy or user defined assignment of components )?
+static bool recordTypeCanBeMemCopied(fir::RecordType recordType) {
+ if (fir::hasDynamicSize(recordType))
+ return false;
+ for (auto [_, fieldType] : recordType.getTypeList()) {
+ // Derived type component may have user assignment (so far, we cannot tell
+ // in FIR, so assume it is always the case, TODO: get the actual info).
+ if (fir::unwrapSequenceType(fieldType).isa<fir::RecordType>())
+ return false;
+ // Allocatable components need deep copy.
+ if (auto boxType = fieldType.dyn_cast<fir::BoxType>())
+ if (boxType.getEleTy().isa<fir::HeapType>())
+ return false;
+ }
+ // Constant size components without user defined assignment and pointers can
+ // be memcopied.
+ return true;
+}
+
+void fir::factory::genRecordAssignment(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ const fir::ExtendedValue &lhs,
+ const fir::ExtendedValue &rhs) {
+ assert(lhs.rank() == 0 && rhs.rank() == 0 && "assume scalar assignment");
+ auto baseTy = fir::dyn_cast_ptrOrBoxEleTy(fir::getBase(lhs).getType());
+ assert(baseTy && "must be a memory type");
+ // Box operands may be polymorphic, it is not entirely clear from 10.2.1.3
+ // if the assignment is performed on the dynamic of declared type. Use the
+ // runtime assuming it is performed on the dynamic type.
+ bool hasBoxOperands = fir::getBase(lhs).getType().isa<fir::BoxType>() ||
+ fir::getBase(rhs).getType().isa<fir::BoxType>();
+ auto recTy = baseTy.dyn_cast<fir::RecordType>();
+ assert(recTy && "must be a record type");
+ if (hasBoxOperands || !recordTypeCanBeMemCopied(recTy)) {
+ auto to = fir::getBase(builder.createBox(loc, lhs));
+ auto from = fir::getBase(builder.createBox(loc, rhs));
+ // The runtime entry point may modify the LHS descriptor if it is
+ // an allocatable. Allocatable assignment is handle elsewhere in lowering,
+ // so just create a fir.ref<fir.box<>> from the fir.box to comply with the
+ // runtime interface, but assume the fir.box is unchanged.
+ // TODO: does this holds true with polymorphic entities ?
+ auto toMutableBox = builder.createTemporary(loc, to.getType());
+ builder.create<fir::StoreOp>(loc, to, toMutableBox);
+ fir::runtime::genAssign(builder, loc, toMutableBox, from);
+ return;
+ }
+ // Otherwise, the derived type has compile time constant size and for which
+ // the component by component assignment can be replaced by a memory copy.
+ // Since we do not know the size of the derived type in lowering, do a
+ // component by component assignment. Note that a single fir.load/fir.store
+ // could be used on "small" record types, but as the type size grows, this
+ // leads to issues in LLVM (long compile times, long IR files, and even
+ // asserts at some point). Since there is no good size boundary, just always
+ // use component by component assignment here.
+ genComponentByComponentAssignment(builder, loc, lhs, rhs);
+}
+
+mlir::Value fir::factory::genLenOfCharacter(
+ fir::FirOpBuilder &builder, mlir::Location loc, fir::ArrayLoadOp arrLoad,
+ llvm::ArrayRef<mlir::Value> path, llvm::ArrayRef<mlir::Value> substring) {
+ llvm::SmallVector<mlir::Value> typeParams(arrLoad.getTypeparams());
+ return genLenOfCharacter(builder, loc,
+ arrLoad.getType().cast<fir::SequenceType>(),
+ arrLoad.getMemref(), typeParams, path, substring);
+}
+
+mlir::Value fir::factory::genLenOfCharacter(
+ fir::FirOpBuilder &builder, mlir::Location loc, fir::SequenceType seqTy,
+ mlir::Value memref, llvm::ArrayRef<mlir::Value> typeParams,
+ llvm::ArrayRef<mlir::Value> path, llvm::ArrayRef<mlir::Value> substring) {
+ auto idxTy = builder.getIndexType();
+ auto zero = builder.createIntegerConstant(loc, idxTy, 0);
+ auto saturatedDiff = [&](mlir::Value lower, mlir::Value upper) {
+ auto
diff = builder.create<mlir::arith::SubIOp>(loc, upper, lower);
+ auto one = builder.createIntegerConstant(loc, idxTy, 1);
+ auto size = builder.create<mlir::arith::AddIOp>(loc,
diff , one);
+ auto cmp = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::sgt, size, zero);
+ return builder.create<mlir::arith::SelectOp>(loc, cmp, size, zero);
+ };
+ if (substring.size() == 2) {
+ auto upper = builder.createConvert(loc, idxTy, substring.back());
+ auto lower = builder.createConvert(loc, idxTy, substring.front());
+ return saturatedDiff(lower, upper);
+ }
+ auto lower = zero;
+ if (substring.size() == 1)
+ lower = builder.createConvert(loc, idxTy, substring.front());
+ auto eleTy = fir::applyPathToType(seqTy, path);
+ if (!fir::hasDynamicSize(eleTy)) {
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ // Use LEN from the type.
+ return builder.createIntegerConstant(loc, idxTy, charTy.getLen());
+ }
+ // Do we need to support !fir.array<!fir.char<k,n>>?
+ fir::emitFatalError(loc,
+ "application of path did not result in a !fir.char");
+ }
+ if (fir::isa_box_type(memref.getType())) {
+ if (memref.getType().isa<fir::BoxCharType>())
+ return builder.create<fir::BoxCharLenOp>(loc, idxTy, memref);
+ if (memref.getType().isa<fir::BoxType>())
+ return CharacterExprHelper(builder, loc).readLengthFromBox(memref);
+ fir::emitFatalError(loc, "memref has wrong type");
+ }
+ if (typeParams.empty()) {
+ fir::emitFatalError(loc, "array_load must have typeparams");
+ }
+ if (fir::isa_char(seqTy.getEleTy())) {
+ assert(typeParams.size() == 1 && "too many typeparams");
+ return typeParams.front();
+ }
+ TODO(loc, "LEN of character must be computed at runtime");
+}
diff --git a/flang/test/Lower/allocatable-assignment.f90 b/flang/test/Lower/allocatable-assignment.f90
index e151d305f229c..945b6d0ccc9b1 100644
--- a/flang/test/Lower/allocatable-assignment.f90
+++ b/flang/test/Lower/allocatable-assignment.f90
@@ -78,4 +78,462 @@ subroutine test_simple_local_scalar()
x = 42.
end subroutine
+! -----------------------------------------------------------------------------
+! Test character scalar RHS
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_deferred_char_scalar(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>{{.*}}) {
+subroutine test_deferred_char_scalar(x)
+ character(:), allocatable :: x
+! CHECK: %[[VAL_1:.*]] = fir.address_of(@_QQ{{.*}}) : !fir.ref<!fir.char<1,12>>
+! CHECK: %[[VAL_2:.*]] = arith.constant 12 : index
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
+! CHECK: %[[VAL_4:.*]] = fir.box_addr %[[VAL_3]] : (!fir.box<!fir.heap<!fir.char<1,?>>>) -> !fir.heap<!fir.char<1,?>>
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (!fir.heap<!fir.char<1,?>>) -> i64
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_7:.*]] = arith.cmpi ne, %[[VAL_5]], %[[VAL_6]] : i64
+! CHECK: %[[VAL_8:.*]]:2 = fir.if %[[VAL_7]] -> (i1, !fir.heap<!fir.char<1,?>>) {
+! CHECK: %[[VAL_9:.*]] = arith.constant false
+! CHECK: %[[VAL_10:.*]] = fir.box_elesize %[[VAL_3]] : (!fir.box<!fir.heap<!fir.char<1,?>>>) -> index
+! CHECK: %[[VAL_11:.*]] = arith.cmpi ne, %[[VAL_10]], %[[VAL_2]] : index
+! CHECK: %[[VAL_12:.*]] = arith.select %[[VAL_11]], %[[VAL_11]], %[[VAL_9]] : i1
+! CHECK: %[[VAL_13:.*]] = fir.if %[[VAL_12]] -> (!fir.heap<!fir.char<1,?>>) {
+! CHECK: %[[VAL_14:.*]] = fir.allocmem !fir.char<1,?>(%[[VAL_2]] : index) {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_14]] : !fir.heap<!fir.char<1,?>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_4]] : !fir.heap<!fir.char<1,?>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_12]], %[[VAL_15:.*]] : i1, !fir.heap<!fir.char<1,?>>
+! CHECK: } else {
+! CHECK: %[[VAL_16:.*]] = arith.constant true
+! CHECK: %[[VAL_17:.*]] = fir.allocmem !fir.char<1,?>(%[[VAL_2]] : index) {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_16]], %[[VAL_17]] : i1, !fir.heap<!fir.char<1,?>>
+! CHECK: }
+
+! character assignment ...
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_8]]#1 : (!fir.heap<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_24]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! character assignment ...
+
+! CHECK: fir.if %[[VAL_8]]#0 {
+! CHECK: fir.if %[[VAL_7]] {
+! CHECK: fir.freemem %[[VAL_4]]
+! CHECK: }
+! CHECK: %[[VAL_36:.*]] = fir.embox %[[VAL_8]]#1 typeparams %[[VAL_2]] : (!fir.heap<!fir.char<1,?>>, index) -> !fir.box<!fir.heap<!fir.char<1,?>>>
+! CHECK: fir.store %[[VAL_36]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
+! CHECK: }
+ x = "Hello world!"
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_cst_char_scalar(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.char<1,10>>>>{{.*}}) {
+subroutine test_cst_char_scalar(x)
+ character(10), allocatable :: x
+! CHECK: %[[VAL_1:.*]] = arith.constant 10 : index
+! CHECK: %[[VAL_2:.*]] = fir.address_of(@_QQ{{.*}}) : !fir.ref<!fir.char<1,12>>
+! CHECK: %[[VAL_3:.*]] = arith.constant 12 : index
+! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,10>>>>
+! CHECK: %[[VAL_5:.*]] = fir.box_addr %[[VAL_4]] : (!fir.box<!fir.heap<!fir.char<1,10>>>) -> !fir.heap<!fir.char<1,10>>
+! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (!fir.heap<!fir.char<1,10>>) -> i64
+! CHECK: %[[VAL_7:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_8:.*]] = arith.cmpi ne, %[[VAL_6]], %[[VAL_7]] : i64
+! CHECK: %[[VAL_9:.*]]:2 = fir.if %[[VAL_8]] -> (i1, !fir.heap<!fir.char<1,10>>) {
+! CHECK: %[[VAL_10:.*]] = arith.constant false
+! CHECK: %[[VAL_11:.*]] = fir.if %[[VAL_10]] -> (!fir.heap<!fir.char<1,10>>) {
+! CHECK: %[[VAL_12:.*]] = fir.allocmem !fir.char<1,10> {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_12]] : !fir.heap<!fir.char<1,10>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_5]] : !fir.heap<!fir.char<1,10>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_10]], %[[VAL_13:.*]] : i1, !fir.heap<!fir.char<1,10>>
+! CHECK: } else {
+! CHECK: %[[VAL_14:.*]] = arith.constant true
+! CHECK: %[[VAL_15:.*]] = fir.allocmem !fir.char<1,10> {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_14]], %[[VAL_15]] : i1, !fir.heap<!fir.char<1,10>>
+! CHECK: }
+
+! character assignment ...
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_9]]#1 : (!fir.heap<!fir.char<1,10>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_24]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! character assignment ...
+
+! CHECK: fir.if %[[VAL_9]]#0 {
+! CHECK: fir.if %[[VAL_8]] {
+! CHECK: fir.freemem %[[VAL_5]]
+! CHECK: }
+! CHECK: %[[VAL_34:.*]] = fir.embox %[[VAL_9]]#1 : (!fir.heap<!fir.char<1,10>>) -> !fir.box<!fir.heap<!fir.char<1,10>>>
+! CHECK: fir.store %[[VAL_34]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,10>>>>
+! CHECK: }
+ x = "Hello world!"
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_dyn_char_scalar(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>{{.*}},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32>{{.*}}) {
+subroutine test_dyn_char_scalar(x, n)
+ integer :: n
+ character(n), allocatable :: x
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[VAL_3:.*]] = fir.address_of(@_QQcl.48656C6C6F20776F726C6421) : !fir.ref<!fir.char<1,12>>
+! CHECK: %[[VAL_4:.*]] = arith.constant 12 : index
+! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
+! CHECK: %[[VAL_6:.*]] = fir.box_addr %[[VAL_5]] : (!fir.box<!fir.heap<!fir.char<1,?>>>) -> !fir.heap<!fir.char<1,?>>
+! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (!fir.heap<!fir.char<1,?>>) -> i64
+! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_9:.*]] = arith.cmpi ne, %[[VAL_7]], %[[VAL_8]] : i64
+! CHECK: %[[VAL_10:.*]]:2 = fir.if %[[VAL_9]] -> (i1, !fir.heap<!fir.char<1,?>>) {
+! CHECK: %[[VAL_11:.*]] = arith.constant false
+! CHECK: %[[VAL_12:.*]] = fir.if %[[VAL_11]] -> (!fir.heap<!fir.char<1,?>>) {
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_2]] : (i32) -> index
+! CHECK: %[[VAL_14:.*]] = fir.allocmem !fir.char<1,?>(%[[VAL_13]] : index) {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_14]] : !fir.heap<!fir.char<1,?>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_6]] : !fir.heap<!fir.char<1,?>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_11]], %[[VAL_15:.*]] : i1, !fir.heap<!fir.char<1,?>>
+! CHECK: } else {
+! CHECK: %[[VAL_16:.*]] = arith.constant true
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_2]] : (i32) -> index
+! CHECK: %[[VAL_18:.*]] = fir.allocmem !fir.char<1,?>(%[[VAL_17]] : index) {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_16]], %[[VAL_18]] : i1, !fir.heap<!fir.char<1,?>>
+! CHECK: }
+
+! character assignment ...
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_10]]#1 : (!fir.heap<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_24]], %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! character assignment ...
+
+! CHECK: fir.if %[[VAL_10]]#0 {
+! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_2]] : (i32) -> index
+! CHECK: fir.if %[[VAL_9]] {
+! CHECK: fir.freemem %[[VAL_6]]
+! CHECK: }
+! CHECK: %[[VAL_40:.*]] = fir.embox %[[VAL_10]]#1 typeparams %[[VAL_39]] : (!fir.heap<!fir.char<1,?>>, index) -> !fir.box<!fir.heap<!fir.char<1,?>>>
+! CHECK: fir.store %[[VAL_40]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
+! CHECK: }
+ x = "Hello world!"
+end subroutine
+
+! -----------------------------------------------------------------------------
+! Test numeric/logical array RHS
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_from_cst_shape_array(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>{{.*}},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.array<2x3xf32>>{{.*}}) {
+subroutine test_from_cst_shape_array(x, y)
+ real, allocatable :: x(:, :)
+ real :: y(2, 3)
+! CHECK: %[[VAL_2_0:.*]] = arith.constant 2 : index
+! CHECK: %[[VAL_3_0:.*]] = arith.constant 3 : index
+! CHECK: %[[VAL_2:.*]] = arith.constant 2 : index
+! CHECK: %[[VAL_3:.*]] = arith.constant 3 : index
+! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: %[[VAL_7:.*]] = fir.box_addr %[[VAL_6]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
+! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]] : (!fir.heap<!fir.array<?x?xf32>>) -> i64
+! CHECK: %[[VAL_9:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_10:.*]] = arith.cmpi ne, %[[VAL_8]], %[[VAL_9]] : i64
+! CHECK: %[[VAL_11:.*]]:2 = fir.if %[[VAL_10]] -> (i1, !fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_12:.*]] = arith.constant false
+! CHECK: %[[VAL_13:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_14:.*]]:3 = fir.box_dims %[[VAL_6]], %[[VAL_13]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_15:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_16:.*]]:3 = fir.box_dims %[[VAL_6]], %[[VAL_15]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_17:.*]] = arith.cmpi ne, %[[VAL_14]]#1, %[[VAL_2]] : index
+! CHECK: %[[VAL_18:.*]] = arith.select %[[VAL_17]], %[[VAL_17]], %[[VAL_12]] : i1
+! CHECK: %[[VAL_19:.*]] = arith.cmpi ne, %[[VAL_16]]#1, %[[VAL_3]] : index
+! CHECK: %[[VAL_20:.*]] = arith.select %[[VAL_19]], %[[VAL_19]], %[[VAL_18]] : i1
+! CHECK: %[[VAL_21:.*]] = fir.if %[[VAL_20]] -> (!fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_22:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_2]], %[[VAL_3]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_22]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_7]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_20]], %[[VAL_23:.*]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: %[[VAL_24:.*]] = arith.constant true
+! CHECK: %[[VAL_25:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_2]], %[[VAL_3]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_24]], %[[VAL_25]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+
+! CHECK: %[[VAL_26:.*]] = fir.shape %[[VAL_2]], %[[VAL_3]] : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_27:.*]] = fir.array_load %[[VAL_11]]#1(%[[VAL_26]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.array<?x?xf32>
+! normal array assignment ....
+! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_11]]#1 : !fir.array<?x?xf32>, !fir.array<?x?xf32>, !fir.heap<!fir.array<?x?xf32>>
+
+! CHECK: fir.if %[[VAL_11]]#0 {
+! CHECK: fir.if %[[VAL_10]] {
+! CHECK: fir.freemem %[[VAL_7]]
+! CHECK: }
+! CHECK: %[[VAL_43:.*]] = fir.shape %[[VAL_2]], %[[VAL_3]] : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_44:.*]] = fir.embox %[[VAL_11]]#1(%[[VAL_43]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.heap<!fir.array<?x?xf32>>>
+! CHECK: fir.store %[[VAL_44]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: }
+ x = y
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_from_dyn_shape_array(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>{{.*}},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.box<!fir.array<?x?xf32>>{{.*}}) {
+subroutine test_from_dyn_shape_array(x, y)
+ real, allocatable :: x(:, :)
+ real :: y(:, :)
+ x = y
+! CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_4:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_3]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+! CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_6:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_5]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: %[[VAL_8:.*]] = fir.box_addr %[[VAL_7]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.heap<!fir.array<?x?xf32>>) -> i64
+! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_11:.*]] = arith.cmpi ne, %[[VAL_9]], %[[VAL_10]] : i64
+! CHECK: %[[VAL_12:.*]]:2 = fir.if %[[VAL_11]] -> (i1, !fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_13:.*]] = arith.constant false
+! CHECK: %[[VAL_14:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_15:.*]]:3 = fir.box_dims %[[VAL_7]], %[[VAL_14]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_16:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_17:.*]]:3 = fir.box_dims %[[VAL_7]], %[[VAL_16]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_18:.*]] = arith.cmpi ne, %[[VAL_15]]#1, %[[VAL_4]]#1 : index
+! CHECK: %[[VAL_19:.*]] = arith.select %[[VAL_18]], %[[VAL_18]], %[[VAL_13]] : i1
+! CHECK: %[[VAL_20:.*]] = arith.cmpi ne, %[[VAL_17]]#1, %[[VAL_6]]#1 : index
+! CHECK: %[[VAL_21:.*]] = arith.select %[[VAL_20]], %[[VAL_20]], %[[VAL_19]] : i1
+! CHECK: %[[VAL_22:.*]] = fir.if %[[VAL_21]] -> (!fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_23:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_4]]#1, %[[VAL_6]]#1 {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_23]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_8]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_21]], %[[VAL_24:.*]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: %[[VAL_25:.*]] = arith.constant true
+! CHECK: %[[VAL_26:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_4]]#1, %[[VAL_6]]#1 {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_25]], %[[VAL_26]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+
+! CHECK: %[[VAL_27:.*]] = fir.shape %[[VAL_4]]#1, %[[VAL_6]]#1 : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_28:.*]] = fir.array_load %[[VAL_12]]#1(%[[VAL_27]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.array<?x?xf32>
+! normal array assignment ....
+! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_12]]#1 : !fir.array<?x?xf32>, !fir.array<?x?xf32>, !fir.heap<!fir.array<?x?xf32>>
+
+! CHECK: fir.if %[[VAL_12]]#0 {
+! CHECK: fir.if %[[VAL_11]] {
+! CHECK: fir.freemem %[[VAL_8]]
+! CHECK: }
+! CHECK: %[[VAL_44:.*]] = fir.shape %[[VAL_4]]#1, %[[VAL_6]]#1 : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_45:.*]] = fir.embox %[[VAL_12]]#1(%[[VAL_44]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.heap<!fir.array<?x?xf32>>>
+! CHECK: fir.store %[[VAL_45]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: }
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_with_lbounds(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>{{.*}},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.box<!fir.array<?x?xf32>>{{.*}}) {
+subroutine test_with_lbounds(x, y)
+ real, allocatable :: x(:, :)
+ real :: y(10:, 20:)
+! CHECK: %[[VAL_2:.*]] = arith.constant 10 : i64
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (i64) -> index
+! CHECK: %[[VAL_4:.*]] = arith.constant 20 : i64
+! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i64) -> index
+! CHECK: %[[VAL_8:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_9:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_8]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+! CHECK: %[[VAL_10:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_11:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_10]] : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
+! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: %[[VAL_13:.*]] = fir.box_addr %[[VAL_12]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
+! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (!fir.heap<!fir.array<?x?xf32>>) -> i64
+! CHECK: %[[VAL_15:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_16:.*]] = arith.cmpi ne, %[[VAL_14]], %[[VAL_15]] : i64
+! CHECK: %[[VAL_17:.*]]:2 = fir.if %[[VAL_16]] -> (i1, !fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_18:.*]] = arith.constant false
+! CHECK: %[[VAL_19:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_20:.*]]:3 = fir.box_dims %[[VAL_12]], %[[VAL_19]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_21:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_22:.*]]:3 = fir.box_dims %[[VAL_12]], %[[VAL_21]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_23:.*]] = arith.cmpi ne, %[[VAL_20]]#1, %[[VAL_9]]#1 : index
+! CHECK: %[[VAL_24:.*]] = arith.select %[[VAL_23]], %[[VAL_23]], %[[VAL_18]] : i1
+! CHECK: %[[VAL_25:.*]] = arith.cmpi ne, %[[VAL_22]]#1, %[[VAL_11]]#1 : index
+! CHECK: %[[VAL_26:.*]] = arith.select %[[VAL_25]], %[[VAL_25]], %[[VAL_24]] : i1
+! CHECK: %[[VAL_27:.*]] = fir.if %[[VAL_26]] -> (!fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_28:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_9]]#1, %[[VAL_11]]#1 {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_28]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_13]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_26]], %[[VAL_29:.*]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: %[[VAL_30:.*]] = arith.constant true
+! CHECK: %[[VAL_31:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_9]]#1, %[[VAL_11]]#1 {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_30]], %[[VAL_31]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+
+! CHECK: %[[VAL_32:.*]] = fir.shape %[[VAL_9]]#1, %[[VAL_11]]#1 : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_33:.*]] = fir.array_load %[[VAL_17]]#1(%[[VAL_32]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.array<?x?xf32>
+! normal array assignment ....
+! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_17]]#1 : !fir.array<?x?xf32>, !fir.array<?x?xf32>, !fir.heap<!fir.array<?x?xf32>>
+
+! CHECK: fir.if %[[VAL_17]]#0 {
+! CHECK: fir.if %[[VAL_16]] {
+! CHECK: fir.freemem %[[VAL_13]]
+! CHECK: }
+! CHECK: %[[VAL_49:.*]] = fir.shape_shift %[[VAL_3]], %[[VAL_9]]#1, %[[VAL_5]], %[[VAL_11]]#1 : (index, index, index, index) -> !fir.shapeshift<2>
+! CHECK: %[[VAL_50:.*]] = fir.embox %[[VAL_17]]#1(%[[VAL_49]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shapeshift<2>) -> !fir.box<!fir.heap<!fir.array<?x?xf32>>>
+! CHECK: fir.store %[[VAL_50]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: }
+ x = y
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_scalar_rhs(
+subroutine test_scalar_rhs(x, y)
+ real, allocatable :: x(:)
+ real :: y
+ ! CHECK: fir.if %{{.*}} -> {{.*}} {
+ ! CHECK: fir.if %false -> {{.*}} {
+ ! CHECK: }
+ ! CHECK: } else {
+ ! CHECK: %[[error_msg_addr:.*]] = fir.address_of(@[[error_message:.*]]) : !fir.ref<!fir.char<1,76>>
+ ! CHECK: %[[msg_addr_cast:.*]] = fir.convert %[[error_msg_addr]] : (!fir.ref<!fir.char<1,76>>) -> !fir.ref<i8>
+ ! CHECK: %15 = fir.call @_FortranAReportFatalUserError(%[[msg_addr_cast]], %{{.*}}, %{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i32) -> none
+ ! CHECK-NOT: allocmem
+ ! CHECK: }
+ x = y
+end subroutine
+
+! -----------------------------------------------------------------------------
+! Test character array RHS
+! -----------------------------------------------------------------------------
+
+! CHECK: func @_QMalloc_assignPtest_cst_char_rhs_scalar(
+subroutine test_cst_char_rhs_scalar(x)
+ character(10), allocatable :: x(:)
+ x = "Hello world!"
+ ! CHECK: fir.if %{{.*}} -> {{.*}} {
+ ! CHECK: fir.if %false -> {{.*}} {
+ ! CHECK: }
+ ! CHECK: } else {
+ ! CHECK: fir.call @_FortranAReportFatalUserError
+ ! CHECK-NOT: allocmem
+ ! CHECK: }
+end subroutine
+
+! CHECK: func @_QMalloc_assignPtest_dyn_char_rhs_scalar(
+subroutine test_dyn_char_rhs_scalar(x, n)
+ integer :: n
+ character(n), allocatable :: x(:)
+ x = "Hello world!"
+ ! CHECK: fir.if %{{.*}} -> {{.*}} {
+ ! CHECK: fir.if %false -> {{.*}} {
+ ! CHECK: }
+ ! CHECK: } else {
+ ! CHECK: fir.call @_FortranAReportFatalUserError
+ ! CHECK-NOT: allocmem
+ ! CHECK: }
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_cst_char(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>>{{.*}},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.boxchar<1>{{.*}}) {
+subroutine test_cst_char(x, c)
+ character(10), allocatable :: x(:)
+ character(12) :: c(20)
+! CHECK: %[[VAL_2:.*]]:2 = fir.unboxchar %[[VAL_1]] : (!fir.boxchar<1>) -> (!fir.ref<!fir.char<1,?>>, index)
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]]#0 : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<20x!fir.char<1,12>>>
+! CHECK: %[[VAL_4_0:.*]] = arith.constant 20 : index
+! CHECK: %[[VAL_4:.*]] = arith.constant 20 : index
+! CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4_0]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>>
+! CHECK: %[[VAL_8:.*]] = fir.box_addr %[[VAL_7]] : (!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>) -> !fir.heap<!fir.array<?x!fir.char<1,10>>>
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (!fir.heap<!fir.array<?x!fir.char<1,10>>>) -> i64
+! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_11:.*]] = arith.cmpi ne, %[[VAL_9]], %[[VAL_10]] : i64
+! CHECK: %[[VAL_12:.*]]:2 = fir.if %[[VAL_11]] -> (i1, !fir.heap<!fir.array<?x!fir.char<1,10>>>) {
+! CHECK: %[[VAL_13:.*]] = arith.constant false
+! CHECK: %[[VAL_14:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_15:.*]]:3 = fir.box_dims %[[VAL_7]], %[[VAL_14]] : (!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_16:.*]] = arith.cmpi ne, %[[VAL_15]]#1, %[[VAL_4]] : index
+! CHECK: %[[VAL_17:.*]] = arith.select %[[VAL_16]], %[[VAL_16]], %[[VAL_13]] : i1
+! CHECK: %[[VAL_18:.*]] = fir.if %[[VAL_17]] -> (!fir.heap<!fir.array<?x!fir.char<1,10>>>) {
+! CHECK: %[[VAL_19:.*]] = fir.allocmem !fir.array<?x!fir.char<1,10>>, %[[VAL_4]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_19]] : !fir.heap<!fir.array<?x!fir.char<1,10>>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_8]] : !fir.heap<!fir.array<?x!fir.char<1,10>>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_17]], %[[VAL_20:.*]] : i1, !fir.heap<!fir.array<?x!fir.char<1,10>>>
+! CHECK: } else {
+! CHECK: %[[VAL_21:.*]] = arith.constant true
+! CHECK: %[[VAL_22:.*]] = fir.allocmem !fir.array<?x!fir.char<1,10>>, %[[VAL_4]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_21]], %[[VAL_22]] : i1, !fir.heap<!fir.array<?x!fir.char<1,10>>>
+! CHECK: }
+
+! CHECK: %[[VAL_23:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_24:.*]] = fir.array_load %[[VAL_12]]#1(%[[VAL_23]]) : (!fir.heap<!fir.array<?x!fir.char<1,10>>>, !fir.shape<1>) -> !fir.array<?x!fir.char<1,10>>
+! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_12]]#1 : !fir.array<?x!fir.char<1,10>>, !fir.array<?x!fir.char<1,10>>, !fir.heap<!fir.array<?x!fir.char<1,10>>>
+! CHECK: fir.if %[[VAL_12]]#0 {
+! CHECK: fir.if %[[VAL_11]] {
+! CHECK: fir.freemem %[[VAL_8]]
+! CHECK: }
+! CHECK: %[[VAL_36:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_37:.*]] = fir.embox %[[VAL_12]]#1(%[[VAL_36]]) : (!fir.heap<!fir.array<?x!fir.char<1,10>>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>
+! CHECK: fir.store %[[VAL_37]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>>
+! CHECK: }
+ x = c
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_dyn_char(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>{{.*}}, %[[VAL_1:.*]]: !fir.ref<i32>{{.*}}, %[[VAL_2:.*]]: !fir.boxchar<1>{{.*}}) {
+subroutine test_dyn_char(x, n, c)
+ integer :: n
+ character(n), allocatable :: x(:)
+ character(*) :: c(20)
+! CHECK: %[[VAL_3:.*]]:2 = fir.unboxchar %[[VAL_2]] : (!fir.boxchar<1>) -> (!fir.ref<!fir.char<1,?>>, index)
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<20x!fir.char<1,?>>>
+! CHECK: %[[VAL_5_0:.*]] = arith.constant 20 : index
+! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[VAL_5:.*]] = arith.constant 20 : index
+! CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_5_0]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
+! CHECK: %[[VAL_10:.*]] = fir.box_addr %[[VAL_9]] : (!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>) -> !fir.heap<!fir.array<?x!fir.char<1,?>>>
+! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]] : (!fir.heap<!fir.array<?x!fir.char<1,?>>>) -> i64
+! CHECK: %[[VAL_12:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_13:.*]] = arith.cmpi ne, %[[VAL_11]], %[[VAL_12]] : i64
+! CHECK: %[[VAL_14:.*]]:2 = fir.if %[[VAL_13]] -> (i1, !fir.heap<!fir.array<?x!fir.char<1,?>>>) {
+! CHECK: %[[VAL_15:.*]] = arith.constant false
+! CHECK: %[[VAL_16:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_17:.*]]:3 = fir.box_dims %[[VAL_9]], %[[VAL_16]] : (!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_18:.*]] = arith.cmpi ne, %[[VAL_17]]#1, %[[VAL_5]] : index
+! CHECK: %[[VAL_19:.*]] = arith.select %[[VAL_18]], %[[VAL_18]], %[[VAL_15]] : i1
+! CHECK: %[[VAL_20:.*]] = fir.if %[[VAL_19]] -> (!fir.heap<!fir.array<?x!fir.char<1,?>>>) {
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_6]] : (i32) -> index
+! CHECK: %[[VAL_22:.*]] = fir.allocmem !fir.array<?x!fir.char<1,?>>(%[[VAL_21]] : index), %[[VAL_5]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_22]] : !fir.heap<!fir.array<?x!fir.char<1,?>>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_10]] : !fir.heap<!fir.array<?x!fir.char<1,?>>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_19]], %[[VAL_23:.*]] : i1, !fir.heap<!fir.array<?x!fir.char<1,?>>>
+! CHECK: } else {
+! CHECK: %[[VAL_24:.*]] = arith.constant true
+! CHECK: %[[VAL_25:.*]] = fir.convert %[[VAL_6]] : (i32) -> index
+! CHECK: %[[VAL_26:.*]] = fir.allocmem !fir.array<?x!fir.char<1,?>>(%[[VAL_25]] : index), %[[VAL_5]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_24]], %[[VAL_26]] : i1, !fir.heap<!fir.array<?x!fir.char<1,?>>>
+! CHECK: }
+
+! CHECK: %[[VAL_27:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_28:.*]] = fir.array_load %[[VAL_14]]#1(%[[VAL_27]]) typeparams %[[VAL_6]] : (!fir.heap<!fir.array<?x!fir.char<1,?>>>, !fir.shape<1>, i32) -> !fir.array<?x!fir.char<1,?>>
+! normal array assignment ....
+! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_14]]#1 typeparams %[[VAL_6]] : !fir.array<?x!fir.char<1,?>>, !fir.array<?x!fir.char<1,?>>, !fir.heap<!fir.array<?x!fir.char<1,?>>>, i32
+
+! CHECK: fir.if %[[VAL_14]]#0 {
+! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_6]] : (i32) -> index
+! CHECK: fir.if %[[VAL_13]] {
+! CHECK: fir.freemem %[[VAL_10]]
+! CHECK: }
+! CHECK: %[[VAL_40:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_41:.*]] = fir.embox %[[VAL_14]]#1(%[[VAL_40]]) typeparams %[[VAL_39]] : (!fir.heap<!fir.array<?x!fir.char<1,?>>>, !fir.shape<1>, index) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>
+! CHECK: fir.store %[[VAL_41]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
+! CHECK: }
+ x = c
+end subroutine
+
end module
More information about the flang-commits
mailing list