[flang-commits] [flang] dc48849 - [Flang] NFC: Rearrange conversion patterns in Codegen.cpp
Kiran Chandramohan via flang-commits
flang-commits at lists.llvm.org
Wed Mar 2 03:56:10 PST 2022
Author: Kiran Chandramohan
Date: 2022-03-02T11:55:45Z
New Revision: dc48849fccce30a7f78b5a0867a622681bf39dd6
URL: https://github.com/llvm/llvm-project/commit/dc48849fccce30a7f78b5a0867a622681bf39dd6
DIFF: https://github.com/llvm/llvm-project/commit/dc48849fccce30a7f78b5a0867a622681bf39dd6.diff
LOG: [Flang] NFC: Rearrange conversion patterns in Codegen.cpp
Minor rearrangment in the order of conversion patterns to identify
differences.
Reviewed By: clementval, schweitz
Differential Revision: https://reviews.llvm.org/D120721
Added:
Modified:
flang/lib/Optimizer/CodeGen/CodeGen.cpp
Removed:
################################################################################
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 51ba5263063af..140259f4e9016 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -97,6 +97,27 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr);
}
+ /// Perform an extension or truncation as needed on an integer value. Lowering
+ /// to the specific target may involve some sign-extending or truncation of
+ /// values, particularly to fit them from abstract box types to the
+ /// appropriate reified structures.
+ mlir::Value integerCast(mlir::Location loc,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Type ty, mlir::Value val) const {
+ auto valTy = val.getType();
+ // If the value was not yet lowered, lower its type so that it can
+ // be used in getPrimitiveTypeSizeInBits.
+ if (!valTy.isa<mlir::IntegerType>())
+ valTy = convertType(valTy);
+ auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
+ auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
+ if (toSize < fromSize)
+ return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
+ if (toSize > fromSize)
+ return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
+ return val;
+ }
+
/// Construct code sequence to extract the specifc value from a `fir.box`.
mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box,
mlir::Type resultTy,
@@ -174,23 +195,6 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
}
- // Load the attribute from the \p box and perform a check against \p maskValue
- // The final comparison is implemented as `(attribute & maskValue) != 0`.
- mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box,
- mlir::ConversionPatternRewriter &rewriter,
- unsigned maskValue) const {
- mlir::Type attrTy = rewriter.getI32Type();
- mlir::Value attribute =
- getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox);
- mlir::LLVM::ConstantOp attrMask =
- genConstantOffset(loc, rewriter, maskValue);
- auto maskRes =
- rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask);
- mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
- return rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0);
- }
-
// Get the element type given an LLVM type that is of the form
// [llvm.ptr](array|struct|vector)+ and the provided indexes.
static mlir::Type getBoxEleTy(mlir::Type type,
@@ -219,6 +223,23 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
return getBoxEleTy(type, {kAddrPosInBox});
}
+ // Load the attribute from the \p box and perform a check against \p maskValue
+ // The final comparison is implemented as `(attribute & maskValue) != 0`.
+ mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter,
+ unsigned maskValue) const {
+ mlir::Type attrTy = rewriter.getI32Type();
+ mlir::Value attribute =
+ getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox);
+ mlir::LLVM::ConstantOp attrMask =
+ genConstantOffset(loc, rewriter, maskValue);
+ auto maskRes =
+ rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask);
+ mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
+ return rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0);
+ }
+
template <typename... ARGS>
mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty,
mlir::ConversionPatternRewriter &rewriter,
@@ -227,27 +248,6 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv);
}
- /// Perform an extension or truncation as needed on an integer value. Lowering
- /// to the specific target may involve some sign-extending or truncation of
- /// values, particularly to fit them from abstract box types to the
- /// appropriate reified structures.
- mlir::Value integerCast(mlir::Location loc,
- mlir::ConversionPatternRewriter &rewriter,
- mlir::Type ty, mlir::Value val) const {
- auto valTy = val.getType();
- // If the value was not yet lowered, lower its type so that it can
- // be used in getPrimitiveTypeSizeInBits.
- if (!valTy.isa<mlir::IntegerType>())
- valTy = convertType(valTy);
- auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
- auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
- if (toSize < fromSize)
- return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
- if (toSize > fromSize)
- return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
- return val;
- }
-
fir::LLVMTypeConverter &lowerTy() const {
return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter());
}
@@ -272,34 +272,6 @@ class FIROpAndTypeConversion : public FIROpConversion<FromOp> {
mlir::ConversionPatternRewriter &rewriter) const = 0;
};
-/// Create value signaling an absent optional argument in a call, e.g.
-/// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>`
-struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::AbsentOp absent, OpAdaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::Type ty = convertType(absent.getType());
- mlir::Location loc = absent.getLoc();
-
- if (absent.getType().isa<fir::BoxCharType>()) {
- auto structTy = ty.cast<mlir::LLVM::LLVMStructType>();
- assert(!structTy.isOpaque() && !structTy.getBody().empty());
- auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
- auto nullField =
- rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]);
- mlir::MLIRContext *ctx = absent.getContext();
- auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
- rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
- absent, ty, undefStruct, nullField, c0);
- } else {
- rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty);
- }
- return success();
- }
-};
-
// Lower `fir.address_of` operation to `llvm.address_of` operation.
struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> {
using FIROpConversion::FIROpConversion;
@@ -404,7 +376,20 @@ struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> {
return success();
}
};
+} // namespace
+
+/// Construct an `llvm.extractvalue` instruction. It will return value at
+/// element \p x from \p tuple.
+static mlir::LLVM::ExtractValueOp
+genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::MLIRContext *ctx, int x) {
+ auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x));
+ auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x];
+ return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx);
+}
+namespace {
/// Lower `fir.box_addr` to the sequence of operations to extract the first
/// element of the box.
struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> {
@@ -428,6 +413,29 @@ struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> {
}
};
+/// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the
+/// boxchar.
+struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> {
+ using FIROpConversion::FIROpConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::Value boxChar = adaptor.getOperands()[0];
+ mlir::Location loc = boxChar.getLoc();
+ mlir::MLIRContext *ctx = boxChar.getContext();
+ mlir::Type returnValTy = boxCharLen.getResult().getType();
+
+ constexpr int boxcharLenIdx = 1;
+ mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex(
+ loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx);
+ mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len);
+ rewriter.replaceOp(boxCharLen, lenAfterCast);
+
+ return success();
+ }
+};
+
/// Lower `fir.box_dims` to a sequence of operations to extract the requested
/// dimension infomartion from the boxed value.
/// Result in a triple set of GEPs and loads.
@@ -536,39 +544,6 @@ struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> {
}
};
-/// Lower `fir.string_lit` to LLVM IR dialect operation.
-struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- auto ty = convertType(constop.getType());
- auto attr = constop.getValue();
- if (attr.isa<mlir::StringAttr>()) {
- rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr);
- return success();
- }
-
- auto arr = attr.cast<mlir::ArrayAttr>();
- auto charTy = constop.getType().cast<fir::CharacterType>();
- unsigned bits = lowerTy().characterBitsize(charTy);
- mlir::Type intTy = rewriter.getIntegerType(bits);
- auto attrs = llvm::map_range(
- arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute {
- return mlir::IntegerAttr::get(
- intTy,
- attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits));
- });
- mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy);
- auto denseAttr = mlir::DenseElementsAttr::get(
- vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs));
- rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty,
- denseAttr);
- return success();
- }
-};
-
/// Lower `fir.boxproc_host` operation. Extracts the host pointer from the
/// boxproc.
/// TODO: Part of supporting Fortran 2003 procedure pointers.
@@ -603,6 +578,39 @@ struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> {
}
};
+/// Lower `fir.string_lit` to LLVM IR dialect operation.
+struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> {
+ using FIROpConversion::FIROpConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ auto ty = convertType(constop.getType());
+ auto attr = constop.getValue();
+ if (attr.isa<mlir::StringAttr>()) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr);
+ return success();
+ }
+
+ auto arr = attr.cast<mlir::ArrayAttr>();
+ auto charTy = constop.getType().cast<fir::CharacterType>();
+ unsigned bits = lowerTy().characterBitsize(charTy);
+ mlir::Type intTy = rewriter.getIntegerType(bits);
+ auto attrs = llvm::map_range(
+ arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute {
+ return mlir::IntegerAttr::get(
+ intTy,
+ attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits));
+ });
+ mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy);
+ auto denseAttr = mlir::DenseElementsAttr::get(
+ vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs));
+ rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty,
+ denseAttr);
+ return success();
+ }
+};
+
// `fir.call` -> `llvm.call`
struct CallOpConversion : public FIROpConversion<fir::CallOp> {
using FIROpConversion::FIROpConversion;
@@ -879,15 +887,38 @@ struct LenParamIndexOpConversion
}
};
-/// Lower `fir.gentypedesc` to a global constant.
-struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> {
+/// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of
+/// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element
+/// in this struct is a pointer. Its type is determined from `KIND`. The 2nd
+/// element is the length of the character buffer (`#n`).
+struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor,
+ matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen");
- return failure();
+ mlir::ValueRange operands = adaptor.getOperands();
+ MLIRContext *ctx = emboxChar.getContext();
+
+ mlir::Value charBuffer = operands[0];
+ mlir::Value charBufferLen = operands[1];
+
+ mlir::Location loc = emboxChar.getLoc();
+ mlir::Type llvmStructTy = convertType(emboxChar.getType());
+ auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy);
+
+ mlir::Type lenTy =
+ llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1];
+ mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen);
+
+ auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
+ auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
+ auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>(
+ loc, llvmStructTy, llvmStruct, charBuffer, c0);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
+ emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1);
+
+ return success();
}
};
} // namespace
@@ -1012,438 +1043,14 @@ struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> {
return success();
}
};
+} // namespace
-/// Convert `fir.end`
-struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> {
- using FIROpConversion::FIROpConversion;
+namespace {} // namespace
- mlir::LogicalResult
- matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- TODO(firEnd.getLoc(), "fir.end codegen");
- return failure();
- }
-};
-
-/// Lower `fir.has_value` operation to `llvm.return` operation.
-struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands());
- return success();
- }
-};
-
-/// Lower `fir.global` operation to `llvm.global` operation.
-/// `fir.insert_on_range` operations are replaced with constant dense attribute
-/// if they are applied on the full range.
-struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- auto tyAttr = convertType(global.getType());
- if (global.getType().isa<fir::BoxType>())
- tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType();
- auto loc = global.getLoc();
- mlir::Attribute initAttr{};
- if (global.getInitVal())
- initAttr = global.getInitVal().getValue();
- auto linkage = convertLinkage(global.getLinkName());
- auto isConst = global.getConstant().hasValue();
- auto g = rewriter.create<mlir::LLVM::GlobalOp>(
- loc, tyAttr, isConst, linkage, global.getSymName(), initAttr);
- auto &gr = g.getInitializerRegion();
- rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end());
- if (!gr.empty()) {
- // Replace insert_on_range with a constant dense attribute if the
- // initialization is on the full range.
- auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>();
- for (auto insertOp : insertOnRangeOps) {
- if (isFullRange(insertOp.getCoor(), insertOp.getType())) {
- auto seqTyAttr = convertType(insertOp.getType());
- auto *op = insertOp.getVal().getDefiningOp();
- auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op);
- if (!constant) {
- auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op);
- if (!convertOp)
- continue;
- constant = cast<mlir::arith::ConstantOp>(
- convertOp.getValue().getDefiningOp());
- }
- mlir::Type vecType = mlir::VectorType::get(
- insertOp.getType().getShape(), constant.getType());
- auto denseAttr = mlir::DenseElementsAttr::get(
- vecType.cast<ShapedType>(), constant.getValue());
- rewriter.setInsertionPointAfter(insertOp);
- rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(
- insertOp, seqTyAttr, denseAttr);
- }
- }
- }
- rewriter.eraseOp(global);
- return success();
- }
-
- bool isFullRange(mlir::DenseIntElementsAttr indexes,
- fir::SequenceType seqTy) const {
- auto extents = seqTy.getShape();
- if (indexes.size() / 2 != static_cast<int64_t>(extents.size()))
- return false;
- auto cur_index = indexes.value_begin<int64_t>();
- for (unsigned i = 0; i < indexes.size(); i += 2) {
- if (*(cur_index++) != 0)
- return false;
- if (*(cur_index++) != extents[i / 2] - 1)
- return false;
- }
- return true;
- }
-
- // TODO: String comparaison should be avoided. Replace linkName with an
- // enumeration.
- mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const {
- if (optLinkage.hasValue()) {
- auto name = optLinkage.getValue();
- if (name == "internal")
- return mlir::LLVM::Linkage::Internal;
- if (name == "linkonce")
- return mlir::LLVM::Linkage::Linkonce;
- if (name == "common")
- return mlir::LLVM::Linkage::Common;
- if (name == "weak")
- return mlir::LLVM::Linkage::Weak;
- }
- return mlir::LLVM::Linkage::External;
- }
-};
-} // namespace
-
-static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
- Optional<mlir::ValueRange> destOps,
- mlir::ConversionPatternRewriter &rewriter,
- mlir::Block *newBlock) {
- if (destOps.hasValue())
- rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(),
- newBlock, mlir::ValueRange());
- else
- rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock);
-}
-
-template <typename A, typename B>
-static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps,
- mlir::ConversionPatternRewriter &rewriter) {
- if (destOps.hasValue())
- rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(),
- dest);
- else
- rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest);
-}
-
-static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
- mlir::Block *dest,
- Optional<mlir::ValueRange> destOps,
- mlir::ConversionPatternRewriter &rewriter) {
- auto *thisBlock = rewriter.getInsertionBlock();
- auto *newBlock = createBlock(rewriter, dest);
- rewriter.setInsertionPointToEnd(thisBlock);
- genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock);
- rewriter.setInsertionPointToEnd(newBlock);
-}
-
-namespace {
-/// Conversion of `fir.select_case`
-///
-/// The `fir.select_case` operation is converted to a if-then-else ladder.
-/// Depending on the case condition type, one or several comparison and
-/// conditional branching can be generated.
-///
-/// A a point value case such as `case(4)`, a lower bound case such as
-/// `case(5:)` or an upper bound case such as `case(:3)` are converted to a
-/// simple comparison between the selector value and the constant value in the
-/// case. The block associated with the case condition is then executed if
-/// the comparison succeed otherwise it branch to the next block with the
-/// comparison for the the next case conditon.
-///
-/// A closed interval case condition such as `case(7:10)` is converted with a
-/// first comparison and conditional branching for the lower bound. If
-/// successful, it branch to a second block with the comparison for the
-/// upper bound in the same case condition.
-///
-/// TODO: lowering of CHARACTER type cases is not handled yet.
-struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- unsigned conds = caseOp.getNumConditions();
- llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue();
- // Type can be CHARACTER, INTEGER, or LOGICAL (C1145)
- auto ty = caseOp.getSelector().getType();
- if (ty.isa<fir::CharacterType>()) {
- TODO(caseOp.getLoc(), "fir.select_case codegen with character type");
- return failure();
- }
- mlir::Value selector = caseOp.getSelector(adaptor.getOperands());
- auto loc = caseOp.getLoc();
- for (unsigned t = 0; t != conds; ++t) {
- mlir::Block *dest = caseOp.getSuccessor(t);
- llvm::Optional<mlir::ValueRange> destOps =
- caseOp.getSuccessorOperands(adaptor.getOperands(), t);
- llvm::Optional<mlir::ValueRange> cmpOps =
- *caseOp.getCompareOperands(adaptor.getOperands(), t);
- mlir::Value caseArg = *(cmpOps.getValue().begin());
- mlir::Attribute attr = cases[t];
- if (attr.isa<fir::PointIntervalAttr>()) {
- auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg);
- genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
- continue;
- }
- if (attr.isa<fir::LowerBoundAttr>()) {
- auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
- genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
- continue;
- }
- if (attr.isa<fir::UpperBoundAttr>()) {
- auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg);
- genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
- continue;
- }
- if (attr.isa<fir::ClosedIntervalAttr>()) {
- auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
- auto *thisBlock = rewriter.getInsertionBlock();
- auto *newBlock1 = createBlock(rewriter, dest);
- auto *newBlock2 = createBlock(rewriter, dest);
- rewriter.setInsertionPointToEnd(thisBlock);
- rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2);
- rewriter.setInsertionPointToEnd(newBlock1);
- mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1);
- auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0);
- genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2);
- rewriter.setInsertionPointToEnd(newBlock2);
- continue;
- }
- assert(attr.isa<mlir::UnitAttr>());
- assert((t + 1 == conds) && "unit must be last");
- genBrOp(caseOp, dest, destOps, rewriter);
- }
- return success();
- }
-};
-} // namespace
-
-template <typename OP>
-static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select,
- typename OP::Adaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) {
- unsigned conds = select.getNumConditions();
- auto cases = select.getCases().getValue();
- mlir::Value selector = adaptor.getSelector();
- auto loc = select.getLoc();
- assert(conds > 0 && "select must have cases");
-
- llvm::SmallVector<mlir::Block *> destinations;
- llvm::SmallVector<mlir::ValueRange> destinationsOperands;
- mlir::Block *defaultDestination;
- mlir::ValueRange defaultOperands;
- llvm::SmallVector<int32_t> caseValues;
-
- for (unsigned t = 0; t != conds; ++t) {
- mlir::Block *dest = select.getSuccessor(t);
- auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t);
- const mlir::Attribute &attr = cases[t];
- if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) {
- destinations.push_back(dest);
- destinationsOperands.push_back(destOps.hasValue() ? *destOps
- : ValueRange());
- caseValues.push_back(intAttr.getInt());
- continue;
- }
- assert(attr.template dyn_cast_or_null<mlir::UnitAttr>());
- assert((t + 1 == conds) && "unit must be last");
- defaultDestination = dest;
- defaultOperands = destOps.hasValue() ? *destOps : ValueRange();
- }
-
- // LLVM::SwitchOp takes a i32 type for the selector.
- if (select.getSelector().getType() != rewriter.getI32Type())
- selector =
- rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector);
-
- rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>(
- select, selector,
- /*defaultDestination=*/defaultDestination,
- /*defaultOperands=*/defaultOperands,
- /*caseValues=*/caseValues,
- /*caseDestinations=*/destinations,
- /*caseOperands=*/destinationsOperands,
- /*branchWeights=*/ArrayRef<int32_t>());
-}
-
-namespace {
-/// conversion of fir::SelectOp to an if-then-else ladder
-struct SelectOpConversion : public FIROpConversion<fir::SelectOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter);
- return success();
- }
-};
-
-/// `fir.load` --> `llvm.load`
-struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- // fir.box is a special case because it is considered as an ssa values in
- // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box>
- // and fir.box end up being the same llvm types and loading a
- // fir.ref<fir.box> is actually a no op in LLVM.
- if (load.getType().isa<fir::BoxType>()) {
- rewriter.replaceOp(load, adaptor.getOperands()[0]);
- } else {
- mlir::Type ty = convertType(load.getType());
- ArrayRef<NamedAttribute> at = load->getAttrs();
- rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>(
- load, ty, adaptor.getOperands(), at);
- }
- return success();
- }
-};
-
-/// Lower `fir.no_reassoc` to LLVM IR dialect.
-/// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast
-/// math flags?
-struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]);
- return success();
- }
-};
-
-/// Lower `fir.select_type` to LLVM IR dialect.
-struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::emitError(select.getLoc(),
- "fir.select_type should have already been converted");
- return failure();
- }
-};
-
-/// conversion of fir::SelectRankOp to an if-then-else ladder
-struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter);
- return success();
- }
-};
-
-/// `fir.store` --> `llvm.store`
-struct StoreOpConversion : public FIROpConversion<fir::StoreOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- if (store.getValue().getType().isa<fir::BoxType>()) {
- // fir.box value is actually in memory, load it first before storing it.
- mlir::Location loc = store.getLoc();
- mlir::Type boxPtrTy = adaptor.getOperands()[0].getType();
- auto val = rewriter.create<mlir::LLVM::LoadOp>(
- loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(),
- adaptor.getOperands()[0]);
- rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>(
- store, val, adaptor.getOperands()[1]);
- } else {
- rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>(
- store, adaptor.getOperands()[0], adaptor.getOperands()[1]);
- }
- return success();
- }
-};
-
-/// convert to LLVM IR dialect `undef`
-struct UndefOpConversion : public FIROpConversion<fir::UndefOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::UndefOp undef, OpAdaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>(
- undef, convertType(undef.getType()));
- return success();
- }
-};
-
-/// `fir.unreachable` --> `llvm.unreachable`
-struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach);
- return success();
- }
-};
-
-struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::ZeroOp zero, OpAdaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::Type ty = convertType(zero.getType());
- if (ty.isa<mlir::LLVM::LLVMPointerType>()) {
- rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty);
- } else if (ty.isa<mlir::IntegerType>()) {
- rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
- zero, ty, mlir::IntegerAttr::get(zero.getType(), 0));
- } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) {
- rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
- zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0));
- } else {
- // TODO: create ConstantAggregateZero for FIR aggregate/array types.
- return rewriter.notifyMatchFailure(
- zero,
- "conversion of fir.zero with aggregate type not implemented yet");
- }
- return success();
- }
-};
-} // namespace
-
-/// Common base class for embox to descriptor conversion.
-template <typename OP>
-struct EmboxCommonConversion : public FIROpConversion<OP> {
- using FIROpConversion<OP>::FIROpConversion;
+/// Common base class for embox to descriptor conversion.
+template <typename OP>
+struct EmboxCommonConversion : public FIROpConversion<OP> {
+ using FIROpConversion<OP>::FIROpConversion;
// Find the LLVMFuncOp in whose entry block the alloca should be inserted.
// The order to find the LLVMFuncOp is as follows:
@@ -1804,22 +1411,9 @@ struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> {
"fir.embox codegen of derived with length parameters");
return failure();
}
- auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest);
- rewriter.replaceOp(embox, result);
- return success();
- }
-};
-
-/// Lower `fir.emboxproc` operation. Creates a procedure box.
-/// TODO: Part of supporting Fortran 2003 procedure pointers.
-struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> {
- using FIROpConversion::FIROpConversion;
-
- mlir::LogicalResult
- matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- TODO(emboxproc.getLoc(), "fir.emboxproc codegen");
- return failure();
+ auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest);
+ rewriter.replaceOp(embox, result);
+ return success();
}
};
@@ -1936,919 +1530,1383 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
operands[sliceOffset + 2], zero, i64Ty);
dest = insertExtent(rewriter, loc, dest, descIdx, extent);
- // store step (scaled by shaped extent)
+ // store step (scaled by shaped extent)
+
+ mlir::Value step = hasSubcomp ? stepExpr : prevDim;
+ if (hasSlice)
+ step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step,
+ operands[sliceOffset + 2]);
+ dest = insertStride(rewriter, loc, dest, descIdx, step);
+ ++descIdx;
+ }
+
+ // compute the stride and offset for the next natural dimension
+ prevDim =
+ rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent);
+ if (constRows == 0)
+ prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff,
+ outerExtent);
+
+ // increment iterators
+ ++shapeOffset;
+ if (hasShift)
+ ++shiftOffset;
+ if (hasSlice)
+ sliceOffset += 3;
+ }
+ if (hasSlice || hasSubcomp || !xbox.substr().empty()) {
+ llvm::SmallVector<mlir::Value> args = {ptrOffset};
+ args.append(gepArgs.rbegin(), gepArgs.rend());
+ if (hasSubcomp) {
+ // For each field in the path add the offset to base via the args list.
+ // In the most general case, some offsets must be computed since
+ // they are not be known until runtime.
+ if (fir::hasDynamicSize(fir::unwrapSequenceType(
+ fir::unwrapPassByRefType(xbox.memref().getType()))))
+ TODO(loc, "fir.embox codegen dynamic size component in derived type");
+ args.append(operands.begin() + xbox.subcomponentOffset(),
+ operands.begin() + xbox.subcomponentOffset() +
+ xbox.subcomponent().size());
+ }
+ base =
+ rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args);
+ if (!xbox.substr().empty())
+ base = shiftSubstringBase(rewriter, loc, base,
+ operands[xbox.substrOffset()]);
+ }
+ dest = insertBaseAddress(rewriter, loc, dest, base);
+ if (isDerivedTypeWithLenParams(boxTy))
+ TODO(loc, "fir.embox codegen of derived with length parameters");
+
+ mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest);
+ rewriter.replaceOp(xbox, result);
+ return success();
+ }
+};
+
+/// Create a new box given a box reference.
+struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
+ using EmboxCommonConversion::EmboxCommonConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::Location loc = rebox.getLoc();
+ mlir::Type idxTy = lowerTy().indexType();
+ mlir::Value loweredBox = adaptor.getOperands()[0];
+ mlir::ValueRange operands = adaptor.getOperands();
+
+ // Create new descriptor and fill its non-shape related data.
+ llvm::SmallVector<mlir::Value, 2> lenParams;
+ mlir::Type inputEleTy = getInputEleTy(rebox);
+ if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) {
+ mlir::Value len =
+ loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter);
+ if (charTy.getFKind() != 1) {
+ mlir::Value width =
+ genConstantIndex(loc, idxTy, rewriter, charTy.getFKind());
+ len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width);
+ }
+ lenParams.emplace_back(len);
+ } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) {
+ if (recTy.getNumLenParams() != 0)
+ TODO(loc, "reboxing descriptor of derived type with length parameters");
+ }
+ auto [boxTy, dest, eleSize] =
+ consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams);
+
+ // Read input extents, strides, and base address
+ llvm::SmallVector<mlir::Value> inputExtents;
+ llvm::SmallVector<mlir::Value> inputStrides;
+ const unsigned inputRank = rebox.getRank();
+ for (unsigned i = 0; i < inputRank; ++i) {
+ mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i);
+ SmallVector<mlir::Value, 3> dimInfo =
+ getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter);
+ inputExtents.emplace_back(dimInfo[1]);
+ inputStrides.emplace_back(dimInfo[2]);
+ }
+
+ mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType());
+ mlir::Value baseAddr =
+ loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter);
+
+ if (!rebox.slice().empty() || !rebox.subcomponent().empty())
+ return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides,
+ operands, rewriter);
+ return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides,
+ operands, rewriter);
+ }
+
+private:
+ /// Write resulting shape and base address in descriptor, and replace rebox
+ /// op.
+ mlir::LogicalResult
+ finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
+ mlir::ValueRange lbounds, mlir::ValueRange extents,
+ mlir::ValueRange strides,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Location loc = rebox.getLoc();
+ mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1);
+ for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) {
+ unsigned dim = iter.index();
+ mlir::Value lb = lbounds.empty() ? one : lbounds[dim];
+ dest = insertLowerBound(rewriter, loc, dest, dim, lb);
+ dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value()));
+ dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value()));
+ }
+ dest = insertBaseAddress(rewriter, loc, dest, base);
+ mlir::Value result =
+ placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest);
+ rewriter.replaceOp(rebox, result);
+ return success();
+ }
+
+ // Apply slice given the base address, extents and strides of the input box.
+ mlir::LogicalResult
+ sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
+ mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
+ mlir::ValueRange operands,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Location loc = rebox.getLoc();
+ mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
+ mlir::Type idxTy = lowerTy().indexType();
+ mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
+ // Apply subcomponent and substring shift on base address.
+ if (!rebox.subcomponent().empty() || !rebox.substr().empty()) {
+ // Cast to inputEleTy* so that a GEP can be used.
+ mlir::Type inputEleTy = getInputEleTy(rebox);
+ auto llvmElePtrTy =
+ mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy));
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base);
+
+ if (!rebox.subcomponent().empty()) {
+ llvm::SmallVector<mlir::Value> gepOperands = {zero};
+ for (unsigned i = 0; i < rebox.subcomponent().size(); ++i)
+ gepOperands.push_back(operands[rebox.subcomponentOffset() + i]);
+ base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands);
+ }
+ if (!rebox.substr().empty())
+ base = shiftSubstringBase(rewriter, loc, base,
+ operands[rebox.substrOffset()]);
+ }
+
+ if (rebox.slice().empty())
+ // The array section is of the form array[%component][substring], keep
+ // the input array extents and strides.
+ return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
+ inputExtents, inputStrides, rewriter);
+
+ // Strides from the fir.box are in bytes.
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+
+ // The slice is of the form array(i:j:k)[%component]. Compute new extents
+ // and strides.
+ llvm::SmallVector<mlir::Value> slicedExtents;
+ llvm::SmallVector<mlir::Value> slicedStrides;
+ mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
+ const bool sliceHasOrigins = !rebox.shift().empty();
+ unsigned sliceOps = rebox.sliceOffset();
+ unsigned shiftOps = rebox.shiftOffset();
+ auto strideOps = inputStrides.begin();
+ const unsigned inputRank = inputStrides.size();
+ for (unsigned i = 0; i < inputRank;
+ ++i, ++strideOps, ++shiftOps, sliceOps += 3) {
+ mlir::Value sliceLb =
+ integerCast(loc, rewriter, idxTy, operands[sliceOps]);
+ mlir::Value inputStride = *strideOps; // already idxTy
+ // Apply origin shift: base += (lb-shift)*input_stride
+ mlir::Value sliceOrigin =
+ sliceHasOrigins
+ ? integerCast(loc, rewriter, idxTy, operands[shiftOps])
+ : one;
+ mlir::Value
diff =
+ rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin);
+ mlir::Value offset =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
diff , inputStride);
+ base = genGEP(loc, voidPtrTy, rewriter, base, offset);
+ // Apply upper bound and step if this is a triplet. Otherwise, the
+ // dimension is dropped and no extents/strides are computed.
+ mlir::Value upper = operands[sliceOps + 1];
+ const bool isTripletSlice =
+ !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp());
+ if (isTripletSlice) {
+ mlir::Value step =
+ integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]);
+ // extent = ub-lb+step/step
+ mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper);
+ mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb,
+ sliceUb, step, zero, idxTy);
+ slicedExtents.emplace_back(extent);
+ // stride = step*input_stride
+ mlir::Value stride =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride);
+ slicedStrides.emplace_back(stride);
+ }
+ }
+ return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
+ slicedExtents, slicedStrides, rewriter);
+ }
+
+ /// Apply a new shape to the data described by a box given the base address,
+ /// extents and strides of the box.
+ mlir::LogicalResult
+ reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
+ mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
+ mlir::ValueRange operands,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(),
+ operands.begin() + rebox.shiftOffset() +
+ rebox.shift().size()};
+ if (rebox.shape().empty()) {
+ // Only setting new lower bounds.
+ return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents,
+ inputStrides, rewriter);
+ }
+
+ mlir::Location loc = rebox.getLoc();
+ // Strides from the fir.box are in bytes.
+ mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+
+ llvm::SmallVector<mlir::Value> newStrides;
+ llvm::SmallVector<mlir::Value> newExtents;
+ mlir::Type idxTy = lowerTy().indexType();
+ // First stride from input box is kept. The rest is assumed contiguous
+ // (it is not possible to reshape otherwise). If the input is scalar,
+ // which may be OK if all new extents are ones, the stride does not
+ // matter, use one.
+ mlir::Value stride = inputStrides.empty()
+ ? genConstantIndex(loc, idxTy, rewriter, 1)
+ : inputStrides[0];
+ for (unsigned i = 0; i < rebox.shape().size(); ++i) {
+ mlir::Value rawExtent = operands[rebox.shapeOffset() + i];
+ mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent);
+ newExtents.emplace_back(extent);
+ newStrides.emplace_back(stride);
+ // nextStride = extent * stride;
+ stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride);
+ }
+ return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides,
+ rewriter);
+ }
+
+ /// Return scalar element type of the input box.
+ static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) {
+ auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType());
+ if (auto seqTy = ty.dyn_cast<fir::SequenceType>())
+ return seqTy.getEleTy();
+ return ty;
+ }
+};
- mlir::Value step = hasSubcomp ? stepExpr : prevDim;
- if (hasSlice)
- step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step,
- operands[sliceOffset + 2]);
- dest = insertStride(rewriter, loc, dest, descIdx, step);
- ++descIdx;
- }
+/// Lower `fir.emboxproc` operation. Creates a procedure box.
+/// TODO: Part of supporting Fortran 2003 procedure pointers.
+struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> {
+ using FIROpConversion::FIROpConversion;
- // compute the stride and offset for the next natural dimension
- prevDim =
- rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent);
- if (constRows == 0)
- prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff,
- outerExtent);
+ mlir::LogicalResult
+ matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ TODO(emboxproc.getLoc(), "fir.emboxproc codegen");
+ return failure();
+ }
+};
- // increment iterators
- ++shapeOffset;
- if (hasShift)
- ++shiftOffset;
- if (hasSlice)
- sliceOffset += 3;
+// Code shared between insert_value and extract_value Ops.
+struct ValueOpCommon {
+ // Translate the arguments pertaining to any multidimensional array to
+ // row-major order for LLVM-IR.
+ static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs,
+ mlir::Type ty) {
+ assert(ty && "type is null");
+ const auto end = attrs.size();
+ for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) {
+ if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
+ const auto dim = getDimension(seq);
+ if (dim > 1) {
+ auto ub = std::min(i + dim, end);
+ std::reverse(attrs.begin() + i, attrs.begin() + ub);
+ i += dim - 1;
+ }
+ ty = getArrayElementType(seq);
+ } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) {
+ ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()];
+ } else {
+ llvm_unreachable("index into invalid type");
+ }
}
- if (hasSlice || hasSubcomp || !xbox.substr().empty()) {
- llvm::SmallVector<mlir::Value> args = {ptrOffset};
- args.append(gepArgs.rbegin(), gepArgs.rend());
- if (hasSubcomp) {
- // For each field in the path add the offset to base via the args list.
- // In the most general case, some offsets must be computed since
- // they are not be known until runtime.
- if (fir::hasDynamicSize(fir::unwrapSequenceType(
- fir::unwrapPassByRefType(xbox.memref().getType()))))
- TODO(loc, "fir.embox codegen dynamic size component in derived type");
- args.append(operands.begin() + xbox.subcomponentOffset(),
- operands.begin() + xbox.subcomponentOffset() +
- xbox.subcomponent().size());
+ }
+
+ static llvm::SmallVector<mlir::Attribute>
+ collectIndices(mlir::ConversionPatternRewriter &rewriter,
+ mlir::ArrayAttr arrAttr) {
+ llvm::SmallVector<mlir::Attribute> attrs;
+ for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) {
+ if (i->isa<mlir::IntegerAttr>()) {
+ attrs.push_back(*i);
+ } else {
+ auto fieldName = i->cast<mlir::StringAttr>().getValue();
+ ++i;
+ auto ty = i->cast<mlir::TypeAttr>().getValue();
+ auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName);
+ attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index));
}
- base =
- rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args);
- if (!xbox.substr().empty())
- base = shiftSubstringBase(rewriter, loc, base,
- operands[xbox.substrOffset()]);
}
- dest = insertBaseAddress(rewriter, loc, dest, base);
- if (isDerivedTypeWithLenParams(boxTy))
- TODO(loc, "fir.embox codegen of derived with length parameters");
+ return attrs;
+ }
- mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest);
- rewriter.replaceOp(xbox, result);
+private:
+ static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) {
+ unsigned result = 1;
+ for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>();
+ eleTy;
+ eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>())
+ ++result;
+ return result;
+ }
+
+ static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) {
+ auto eleTy = ty.getElementType();
+ while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
+ eleTy = arrTy.getElementType();
+ return eleTy;
+ }
+};
+
+namespace {
+/// Extract a subobject value from an ssa-value of aggregate type
+struct ExtractValueOpConversion
+ : public FIROpAndTypeConversion<fir::ExtractValueOp>,
+ public ValueOpCommon {
+ using FIROpAndTypeConversion::FIROpAndTypeConversion;
+
+ mlir::LogicalResult
+ doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ auto attrs = collectIndices(rewriter, extractVal.getCoor());
+ toRowMajor(attrs, adaptor.getOperands()[0].getType());
+ auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(
+ extractVal, ty, adaptor.getOperands()[0], position);
return success();
}
};
-/// Create a new box given a box reference.
-struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
- using EmboxCommonConversion::EmboxCommonConversion;
+/// InsertValue is the generalized instruction for the composition of new
+/// aggregate type values.
+struct InsertValueOpConversion
+ : public FIROpAndTypeConversion<fir::InsertValueOp>,
+ public ValueOpCommon {
+ using FIROpAndTypeConversion::FIROpAndTypeConversion;
mlir::LogicalResult
- matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::Location loc = rebox.getLoc();
- mlir::Type idxTy = lowerTy().indexType();
- mlir::Value loweredBox = adaptor.getOperands()[0];
- mlir::ValueRange operands = adaptor.getOperands();
+ doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ auto attrs = collectIndices(rewriter, insertVal.getCoor());
+ toRowMajor(attrs, adaptor.getOperands()[0].getType());
+ auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
+ insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1],
+ position);
+ return success();
+ }
+};
- // Create new descriptor and fill its non-shape related data.
- llvm::SmallVector<mlir::Value, 2> lenParams;
- mlir::Type inputEleTy = getInputEleTy(rebox);
- if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) {
- mlir::Value len =
- loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter);
- if (charTy.getFKind() != 1) {
- mlir::Value width =
- genConstantIndex(loc, idxTy, rewriter, charTy.getFKind());
- len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width);
+/// InsertOnRange inserts a value into a sequence over a range of offsets.
+struct InsertOnRangeOpConversion
+ : public FIROpAndTypeConversion<fir::InsertOnRangeOp> {
+ using FIROpAndTypeConversion::FIROpAndTypeConversion;
+
+ // Increments an array of subscripts in a row major fasion.
+ void incrementSubscripts(const SmallVector<uint64_t> &dims,
+ SmallVector<uint64_t> &subscripts) const {
+ for (size_t i = dims.size(); i > 0; --i) {
+ if (++subscripts[i - 1] < dims[i - 1]) {
+ return;
}
- lenParams.emplace_back(len);
- } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) {
- if (recTy.getNumLenParams() != 0)
- TODO(loc, "reboxing descriptor of derived type with length parameters");
+ subscripts[i - 1] = 0;
}
- auto [boxTy, dest, eleSize] =
- consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams);
+ }
- // Read input extents, strides, and base address
- llvm::SmallVector<mlir::Value> inputExtents;
- llvm::SmallVector<mlir::Value> inputStrides;
- const unsigned inputRank = rebox.getRank();
- for (unsigned i = 0; i < inputRank; ++i) {
- mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i);
- SmallVector<mlir::Value, 3> dimInfo =
- getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter);
- inputExtents.emplace_back(dimInfo[1]);
- inputStrides.emplace_back(dimInfo[2]);
+ mlir::LogicalResult
+ doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+
+ llvm::SmallVector<uint64_t> dims;
+ auto type = adaptor.getOperands()[0].getType();
+
+ // Iteratively extract the array dimensions from the type.
+ while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
+ dims.push_back(t.getNumElements());
+ type = t.getElementType();
}
- mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType());
- mlir::Value baseAddr =
- loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter);
+ SmallVector<uint64_t> lBounds;
+ SmallVector<uint64_t> uBounds;
- if (!rebox.slice().empty() || !rebox.subcomponent().empty())
- return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides,
- operands, rewriter);
- return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides,
- operands, rewriter);
- }
+ // Unzip the upper and lower bound and convert to a row major format.
+ mlir::DenseIntElementsAttr coor = range.getCoor();
+ auto reversedCoor = llvm::reverse(coor.getValues<int64_t>());
+ for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) {
+ uBounds.push_back(*i++);
+ lBounds.push_back(*i);
+ }
-private:
- /// Write resulting shape and base address in descriptor, and replace rebox
- /// op.
- mlir::LogicalResult
- finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
- mlir::ValueRange lbounds, mlir::ValueRange extents,
- mlir::ValueRange strides,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Location loc = rebox.getLoc();
- mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1);
- for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) {
- unsigned dim = iter.index();
- mlir::Value lb = lbounds.empty() ? one : lbounds[dim];
- dest = insertLowerBound(rewriter, loc, dest, dim, lb);
- dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value()));
- dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value()));
+ auto &subscripts = lBounds;
+ auto loc = range.getLoc();
+ mlir::Value lastOp = adaptor.getOperands()[0];
+ mlir::Value insertVal = adaptor.getOperands()[1];
+
+ auto i64Ty = rewriter.getI64Type();
+ while (subscripts != uBounds) {
+ // Convert uint64_t's to Attribute's.
+ SmallVector<mlir::Attribute> subscriptAttrs;
+ for (const auto &subscript : subscripts)
+ subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript));
+ lastOp = rewriter.create<mlir::LLVM::InsertValueOp>(
+ loc, ty, lastOp, insertVal,
+ ArrayAttr::get(range.getContext(), subscriptAttrs));
+
+ incrementSubscripts(dims, subscripts);
}
- dest = insertBaseAddress(rewriter, loc, dest, base);
- mlir::Value result =
- placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest);
- rewriter.replaceOp(rebox, result);
+
+ // Convert uint64_t's to Attribute's.
+ SmallVector<mlir::Attribute> subscriptAttrs;
+ for (const auto &subscript : subscripts)
+ subscriptAttrs.push_back(
+ IntegerAttr::get(rewriter.getI64Type(), subscript));
+ mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs);
+
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
+ range, ty, lastOp, insertVal,
+ ArrayAttr::get(range.getContext(), arrayRef));
+
return success();
}
+};
+} // namespace
+
+namespace {
+/// XArrayCoor is the address arithmetic on a dynamically shaped, sliced,
+/// shifted etc. array.
+/// (See the static restriction on coordinate_of.) array_coor determines the
+/// coordinate (location) of a specific element.
+struct XArrayCoorOpConversion
+ : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
+ using FIROpAndTypeConversion::FIROpAndTypeConversion;
- // Apply slice given the base address, extents and strides of the input box.
mlir::LogicalResult
- sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
- mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
- mlir::ValueRange operands,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Location loc = rebox.getLoc();
- mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
+ doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ auto loc = coor.getLoc();
+ mlir::ValueRange operands = adaptor.getOperands();
+ unsigned rank = coor.getRank();
+ assert(coor.indices().size() == rank);
+ assert(coor.shape().empty() || coor.shape().size() == rank);
+ assert(coor.shift().empty() || coor.shift().size() == rank);
+ assert(coor.slice().empty() || coor.slice().size() == 3 * rank);
mlir::Type idxTy = lowerTy().indexType();
+ mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
+ mlir::Value prevExt = one;
mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
- // Apply subcomponent and substring shift on base address.
- if (!rebox.subcomponent().empty() || !rebox.substr().empty()) {
- // Cast to inputEleTy* so that a GEP can be used.
- mlir::Type inputEleTy = getInputEleTy(rebox);
- auto llvmElePtrTy =
- mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy));
- base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base);
+ mlir::Value offset = zero;
+ const bool isShifted = !coor.shift().empty();
+ const bool isSliced = !coor.slice().empty();
+ const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>();
- if (!rebox.subcomponent().empty()) {
- llvm::SmallVector<mlir::Value> gepOperands = {zero};
- for (unsigned i = 0; i < rebox.subcomponent().size(); ++i)
- gepOperands.push_back(operands[rebox.subcomponentOffset() + i]);
- base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands);
+ auto indexOps = coor.indices().begin();
+ auto shapeOps = coor.shape().begin();
+ auto shiftOps = coor.shift().begin();
+ auto sliceOps = coor.slice().begin();
+ // For each dimension of the array, generate the offset calculation.
+ for (unsigned i = 0; i < rank;
+ ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) {
+ mlir::Value index =
+ integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]);
+ mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy,
+ operands[coor.shiftOffset() + i])
+ : one;
+ mlir::Value step = one;
+ bool normalSlice = isSliced;
+ // Compute zero based index in dimension i of the element, applying
+ // potential triplets and lower bounds.
+ if (isSliced) {
+ mlir::Value ub = *(sliceOps + 1);
+ normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp());
+ if (normalSlice)
+ step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2));
+ }
+ auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb);
+ mlir::Value
diff =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step);
+ if (normalSlice) {
+ mlir::Value sliceLb =
+ integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]);
+ auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb);
+
diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy,
diff , adj);
+ }
+ // Update the offset given the stride and the zero based index `
diff `
+ // that was just computed.
+ if (baseIsBoxed) {
+ // Use stride in bytes from the descriptor.
+ mlir::Value stride =
+ loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter);
+ auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
diff , stride);
+ offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
+ } else {
+ // Use stride computed at last iteration.
+ auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
diff , prevExt);
+ offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
+ // Compute next stride assuming contiguity of the base array
+ // (in element number).
+ auto nextExt =
+ integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]);
+ prevExt =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt);
}
- if (!rebox.substr().empty())
- base = shiftSubstringBase(rewriter, loc, base,
- operands[rebox.substrOffset()]);
}
- if (rebox.slice().empty())
- // The array section is of the form array[%component][substring], keep
- // the input array extents and strides.
- return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
- inputExtents, inputStrides, rewriter);
-
- // Strides from the fir.box are in bytes.
- base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+ // Add computed offset to the base address.
+ if (baseIsBoxed) {
+ // Working with byte offsets. The base address is read from the fir.box.
+ // and need to be casted to i8* to do the pointer arithmetic.
+ mlir::Type baseTy =
+ getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType());
+ mlir::Value base =
+ loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter);
+ mlir::Type voidPtrTy = getVoidPtrType();
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+ llvm::SmallVector<mlir::Value> args{offset};
+ auto addr =
+ rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args);
+ if (coor.subcomponent().empty()) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr);
+ return success();
+ }
+ auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr);
+ args.clear();
+ args.push_back(zero);
+ if (!coor.lenParams().empty()) {
+ // If type parameters are present, then we don't want to use a GEPOp
+ // as below, as the LLVM struct type cannot be statically defined.
+ TODO(loc, "derived type with type parameters");
+ }
+ // TODO: array offset subcomponents must be converted to LLVM's
+ // row-major layout here.
+ for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
+ args.push_back(operands[i]);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted,
+ args);
+ return success();
+ }
- // The slice is of the form array(i:j:k)[%component]. Compute new extents
- // and strides.
- llvm::SmallVector<mlir::Value> slicedExtents;
- llvm::SmallVector<mlir::Value> slicedStrides;
- mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
- const bool sliceHasOrigins = !rebox.shift().empty();
- unsigned sliceOps = rebox.sliceOffset();
- unsigned shiftOps = rebox.shiftOffset();
- auto strideOps = inputStrides.begin();
- const unsigned inputRank = inputStrides.size();
- for (unsigned i = 0; i < inputRank;
- ++i, ++strideOps, ++shiftOps, sliceOps += 3) {
- mlir::Value sliceLb =
- integerCast(loc, rewriter, idxTy, operands[sliceOps]);
- mlir::Value inputStride = *strideOps; // already idxTy
- // Apply origin shift: base += (lb-shift)*input_stride
- mlir::Value sliceOrigin =
- sliceHasOrigins
- ? integerCast(loc, rewriter, idxTy, operands[shiftOps])
- : one;
- mlir::Value
diff =
- rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin);
- mlir::Value offset =
- rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
diff , inputStride);
- base = genGEP(loc, voidPtrTy, rewriter, base, offset);
- // Apply upper bound and step if this is a triplet. Otherwise, the
- // dimension is dropped and no extents/strides are computed.
- mlir::Value upper = operands[sliceOps + 1];
- const bool isTripletSlice =
- !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp());
- if (isTripletSlice) {
- mlir::Value step =
- integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]);
- // extent = ub-lb+step/step
- mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper);
- mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb,
- sliceUb, step, zero, idxTy);
- slicedExtents.emplace_back(extent);
- // stride = step*input_stride
- mlir::Value stride =
- rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride);
- slicedStrides.emplace_back(stride);
+ // The array was not boxed, so it must be contiguous. offset is therefore an
+ // element offset and the base type is kept in the GEP unless the element
+ // type size is itself dynamic.
+ mlir::Value base;
+ if (coor.subcomponent().empty()) {
+ // No subcomponent.
+ if (!coor.lenParams().empty()) {
+ // Type parameters. Adjust element size explicitly.
+ auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType());
+ assert(eleTy && "result must be a reference-like type");
+ if (fir::characterWithDynamicLen(eleTy)) {
+ assert(coor.lenParams().size() == 1);
+ auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize(
+ eleTy.cast<fir::CharacterType>().getFKind());
+ auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8);
+ auto scaledBySize =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling);
+ auto length =
+ integerCast(loc, rewriter, idxTy,
+ adaptor.getOperands()[coor.lenParamsOffset()]);
+ offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize,
+ length);
+ } else {
+ TODO(loc, "compute size of derived type with type parameters");
+ }
}
+ // Cast the base address to a pointer to T.
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty,
+ adaptor.getOperands()[0]);
+ } else {
+ // Operand #0 must have a pointer type. For subcomponent slicing, we
+ // want to cast away the array type and have a plain struct type.
+ mlir::Type ty0 = adaptor.getOperands()[0].getType();
+ auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>();
+ assert(ptrTy && "expected pointer type");
+ mlir::Type eleTy = ptrTy.getElementType();
+ while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
+ eleTy = arrTy.getElementType();
+ auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy);
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy,
+ adaptor.getOperands()[0]);
}
- return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
- slicedExtents, slicedStrides, rewriter);
+ SmallVector<mlir::Value> args = {offset};
+ for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
+ args.push_back(operands[i]);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args);
+ return success();
}
+};
+} // namespace
+
+/// Convert to (memory) reference to a reference to a subobject.
+/// The coordinate_of op is a Swiss army knife operation that can be used on
+/// (memory) references to records, arrays, complex, etc. as well as boxes.
+/// With unboxed arrays, there is the restriction that the array have a static
+/// shape in all but the last column.
+struct CoordinateOpConversion
+ : public FIROpAndTypeConversion<fir::CoordinateOp> {
+ using FIROpAndTypeConversion::FIROpAndTypeConversion;
- /// Apply a new shape to the data described by a box given the base address,
- /// extents and strides of the box.
mlir::LogicalResult
- reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
- mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
- mlir::ValueRange operands,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(),
- operands.begin() + rebox.shiftOffset() +
- rebox.shift().size()};
- if (rebox.shape().empty()) {
- // Only setting new lower bounds.
- return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents,
- inputStrides, rewriter);
- }
+ doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::ValueRange operands = adaptor.getOperands();
- mlir::Location loc = rebox.getLoc();
- // Strides from the fir.box are in bytes.
- mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
- base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+ mlir::Location loc = coor.getLoc();
+ mlir::Value base = operands[0];
+ mlir::Type baseObjectTy = coor.getBaseType();
+ mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
+ assert(objectTy && "fir.coordinate_of expects a reference type");
- llvm::SmallVector<mlir::Value> newStrides;
- llvm::SmallVector<mlir::Value> newExtents;
- mlir::Type idxTy = lowerTy().indexType();
- // First stride from input box is kept. The rest is assumed contiguous
- // (it is not possible to reshape otherwise). If the input is scalar,
- // which may be OK if all new extents are ones, the stride does not
- // matter, use one.
- mlir::Value stride = inputStrides.empty()
- ? genConstantIndex(loc, idxTy, rewriter, 1)
- : inputStrides[0];
- for (unsigned i = 0; i < rebox.shape().size(); ++i) {
- mlir::Value rawExtent = operands[rebox.shapeOffset() + i];
- mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent);
- newExtents.emplace_back(extent);
- newStrides.emplace_back(stride);
- // nextStride = extent * stride;
- stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride);
+ // Complex type - basically, extract the real or imaginary part
+ if (fir::isa_complex(objectTy)) {
+ mlir::LLVM::ConstantOp c0 =
+ genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
+ SmallVector<mlir::Value> offs = {c0, operands[1]};
+ mlir::Value gep = genGEP(loc, ty, rewriter, base, offs);
+ rewriter.replaceOp(coor, gep);
+ return success();
}
- return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides,
- rewriter);
+
+ // Boxed type - get the base pointer from the box
+ if (baseObjectTy.dyn_cast<fir::BoxType>())
+ return doRewriteBox(coor, ty, operands, loc, rewriter);
+
+ // Reference or pointer type
+ if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>())
+ return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter);
+
+ return rewriter.notifyMatchFailure(
+ coor, "fir.coordinate_of base operand has unsupported type");
}
- /// Return scalar element type of the input box.
- static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) {
- auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType());
- if (auto seqTy = ty.dyn_cast<fir::SequenceType>())
- return seqTy.getEleTy();
- return ty;
+ unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const {
+ return fir::hasDynamicSize(ty)
+ ? op.getDefiningOp()
+ ->getAttrOfType<mlir::IntegerAttr>("field")
+ .getInt()
+ : getIntValue(op);
}
-};
-// Code shared between insert_value and extract_value Ops.
-struct ValueOpCommon {
- // Translate the arguments pertaining to any multidimensional array to
- // row-major order for LLVM-IR.
- static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs,
- mlir::Type ty) {
- assert(ty && "type is null");
- const auto end = attrs.size();
- for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) {
- if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
- const auto dim = getDimension(seq);
- if (dim > 1) {
- auto ub = std::min(i + dim, end);
- std::reverse(attrs.begin() + i, attrs.begin() + ub);
- i += dim - 1;
- }
- ty = getArrayElementType(seq);
- } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) {
- ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()];
+ int64_t getIntValue(mlir::Value val) const {
+ assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value");
+ mlir::Operation *defop = val.getDefiningOp();
+
+ if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop))
+ return constOp.value();
+ if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop))
+ if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>())
+ return attr.getValue().getSExtValue();
+ fir::emitFatalError(val.getLoc(), "must be a constant");
+ }
+
+ bool hasSubDimensions(mlir::Type type) const {
+ return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>();
+ }
+
+ /// Check whether this form of `!fir.coordinate_of` is supported. These
+ /// additional checks are required, because we are not yet able to convert
+ /// all valid forms of `!fir.coordinate_of`.
+ /// TODO: Either implement the unsupported cases or extend the verifier
+ /// in FIROps.cpp instead.
+ bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const {
+ const std::size_t numOfCoors = coors.size();
+ std::size_t i = 0;
+ bool subEle = false;
+ bool ptrEle = false;
+ for (; i < numOfCoors; ++i) {
+ mlir::Value nxtOpnd = coors[i];
+ if (auto arrTy = type.dyn_cast<fir::SequenceType>()) {
+ subEle = true;
+ i += arrTy.getDimension() - 1;
+ type = arrTy.getEleTy();
+ } else if (auto recTy = type.dyn_cast<fir::RecordType>()) {
+ subEle = true;
+ type = recTy.getType(getFieldNumber(recTy, nxtOpnd));
+ } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) {
+ subEle = true;
+ type = tupTy.getType(getIntValue(nxtOpnd));
} else {
- llvm_unreachable("index into invalid type");
+ ptrEle = true;
}
}
+ if (ptrEle)
+ return (!subEle) && (numOfCoors == 1);
+ return subEle && (i >= numOfCoors);
}
- static llvm::SmallVector<mlir::Attribute>
- collectIndices(mlir::ConversionPatternRewriter &rewriter,
- mlir::ArrayAttr arrAttr) {
- llvm::SmallVector<mlir::Attribute> attrs;
- for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) {
- if (i->isa<mlir::IntegerAttr>()) {
- attrs.push_back(*i);
+ /// Walk the abstract memory layout and determine if the path traverses any
+ /// array types with unknown shape. Return true iff all the array types have a
+ /// constant shape along the path.
+ bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const {
+ const std::size_t sz = coors.size();
+ std::size_t i = 0;
+ for (; i < sz; ++i) {
+ mlir::Value nxtOpnd = coors[i];
+ if (auto arrTy = type.dyn_cast<fir::SequenceType>()) {
+ if (fir::sequenceWithNonConstantShape(arrTy))
+ return false;
+ i += arrTy.getDimension() - 1;
+ type = arrTy.getEleTy();
+ } else if (auto strTy = type.dyn_cast<fir::RecordType>()) {
+ type = strTy.getType(getFieldNumber(strTy, nxtOpnd));
+ } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) {
+ type = strTy.getType(getIntValue(nxtOpnd));
} else {
- auto fieldName = i->cast<mlir::StringAttr>().getValue();
- ++i;
- auto ty = i->cast<mlir::TypeAttr>().getValue();
- auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName);
- attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index));
+ return true;
}
}
- return attrs;
+ return true;
}
private:
- static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) {
- unsigned result = 1;
- for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>();
- eleTy;
- eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>())
- ++result;
- return result;
- }
-
- static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) {
- auto eleTy = ty.getElementType();
- while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
- eleTy = arrTy.getElementType();
- return eleTy;
- }
-};
-
-namespace {
-/// Extract a subobject value from an ssa-value of aggregate type
-struct ExtractValueOpConversion
- : public FIROpAndTypeConversion<fir::ExtractValueOp>,
- public ValueOpCommon {
- using FIROpAndTypeConversion::FIROpAndTypeConversion;
-
mlir::LogicalResult
- doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- auto attrs = collectIndices(rewriter, extractVal.getCoor());
- toRowMajor(attrs, adaptor.getOperands()[0].getType());
- auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs);
- rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(
- extractVal, ty, adaptor.getOperands()[0], position);
- return success();
- }
-};
+ doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands,
+ mlir::Location loc,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type boxObjTy = coor.getBaseType();
+ assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`");
-/// InsertValue is the generalized instruction for the composition of new
-/// aggregate type values.
-struct InsertValueOpConversion
- : public FIROpAndTypeConversion<fir::InsertValueOp>,
- public ValueOpCommon {
- using FIROpAndTypeConversion::FIROpAndTypeConversion;
+ mlir::Value boxBaseAddr = operands[0];
- mlir::LogicalResult
- doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- auto attrs = collectIndices(rewriter, insertVal.getCoor());
- toRowMajor(attrs, adaptor.getOperands()[0].getType());
- auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs);
- rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
- insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1],
- position);
- return success();
- }
-};
+ // 1. SPECIAL CASE (uses `fir.len_param_index`):
+ // %box = ... : !fir.box<!fir.type<derived{len1:i32}>>
+ // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}>
+ // %addr = coordinate_of %box, %lenp
+ if (coor.getNumOperands() == 2) {
+ mlir::Operation *coordinateDef =
+ (*coor.getCoor().begin()).getDefiningOp();
+ if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) {
+ TODO(loc,
+ "fir.coordinate_of - fir.len_param_index is not supported yet");
+ }
+ }
-/// InsertOnRange inserts a value into a sequence over a range of offsets.
-struct InsertOnRangeOpConversion
- : public FIROpAndTypeConversion<fir::InsertOnRangeOp> {
- using FIROpAndTypeConversion::FIROpAndTypeConversion;
+ // 2. GENERAL CASE:
+ // 2.1. (`fir.array`)
+ // %box = ... : !fix.box<!fir.array<?xU>>
+ // %idx = ... : index
+ // %resultAddr = coordinate_of %box, %idx : !fir.ref<U>
+ // 2.2 (`fir.derived`)
+ // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>>
+ // %idx = ... : i32
+ // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32>
+ // 2.3 (`fir.derived` inside `fir.array`)
+ // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32,
+ // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr =
+ // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32>
+ // 2.4. TODO: Either document or disable any other case that the following
+ // implementation might convert.
+ mlir::LLVM::ConstantOp c0 =
+ genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
+ mlir::Value resultAddr =
+ loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()),
+ boxBaseAddr, rewriter);
+ auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy);
+ mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext());
- // Increments an array of subscripts in a row major fasion.
- void incrementSubscripts(const SmallVector<uint64_t> &dims,
- SmallVector<uint64_t> &subscripts) const {
- for (size_t i = dims.size(); i > 0; --i) {
- if (++subscripts[i - 1] < dims[i - 1]) {
- return;
+ for (unsigned i = 1, last = operands.size(); i < last; ++i) {
+ if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) {
+ if (i != 1)
+ TODO(loc, "fir.array nested inside other array and/or derived type");
+ // Applies byte strides from the box. Ignore lower bound from box
+ // since fir.coordinate_of indexes are zero based. Lowering takes care
+ // of lower bound aspects. This both accounts for dynamically sized
+ // types and non contiguous arrays.
+ auto idxTy = lowerTy().indexType();
+ mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0);
+ for (unsigned index = i, lastIndex = i + arrTy.getDimension();
+ index < lastIndex; ++index) {
+ mlir::Value stride =
+ loadStrideFromBox(loc, operands[0], index - i, rewriter);
+ auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
+ operands[index], stride);
+ off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off);
+ }
+ auto voidPtrBase =
+ rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr);
+ SmallVector<mlir::Value> args{off};
+ resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy,
+ voidPtrBase, args);
+ i += arrTy.getDimension() - 1;
+ currentObjTy = arrTy.getEleTy();
+ } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) {
+ auto recRefTy =
+ mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy));
+ mlir::Value nxtOpnd = operands[i];
+ auto memObj =
+ rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr);
+ llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd};
+ currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
+ auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy);
+ auto gep = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj,
+ args);
+ resultAddr =
+ rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep);
+ } else {
+ fir::emitFatalError(loc, "unexpected type in coordinate_of");
}
- subscripts[i - 1] = 0;
}
+
+ rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr);
+ return success();
}
mlir::LogicalResult
- doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
+ doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty,
+ mlir::ValueRange operands, mlir::Location loc,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type baseObjectTy = coor.getBaseType();
- llvm::SmallVector<uint64_t> dims;
- auto type = adaptor.getOperands()[0].getType();
+ mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
+ bool hasSubdimension = hasSubDimensions(currentObjTy);
+ bool columnIsDeferred = !hasSubdimension;
- // Iteratively extract the array dimensions from the type.
- while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
- dims.push_back(t.getNumElements());
- type = t.getElementType();
+ if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) {
+ TODO(loc, "unsupported combination of coordinate operands");
}
- SmallVector<uint64_t> lBounds;
- SmallVector<uint64_t> uBounds;
+ const bool hasKnownShape =
+ arraysHaveKnownShape(currentObjTy, operands.drop_front(1));
- // Unzip the upper and lower bound and convert to a row major format.
- mlir::DenseIntElementsAttr coor = range.getCoor();
- auto reversedCoor = llvm::reverse(coor.getValues<int64_t>());
- for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) {
- uBounds.push_back(*i++);
- lBounds.push_back(*i);
+ // If only the column is `?`, then we can simply place the column value in
+ // the 0-th GEP position.
+ if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) {
+ if (!hasKnownShape) {
+ const unsigned sz = arrTy.getDimension();
+ if (arraysHaveKnownShape(arrTy.getEleTy(),
+ operands.drop_front(1 + sz))) {
+ llvm::ArrayRef<int64_t> shape = arrTy.getShape();
+ bool allConst = true;
+ for (unsigned i = 0; i < sz - 1; ++i) {
+ if (shape[i] < 0) {
+ allConst = false;
+ break;
+ }
+ }
+ if (allConst)
+ columnIsDeferred = true;
+ }
+ }
}
- auto &subscripts = lBounds;
- auto loc = range.getLoc();
- mlir::Value lastOp = adaptor.getOperands()[0];
- mlir::Value insertVal = adaptor.getOperands()[1];
-
- auto i64Ty = rewriter.getI64Type();
- while (subscripts != uBounds) {
- // Convert uint64_t's to Attribute's.
- SmallVector<mlir::Attribute> subscriptAttrs;
- for (const auto &subscript : subscripts)
- subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript));
- lastOp = rewriter.create<mlir::LLVM::InsertValueOp>(
- loc, ty, lastOp, insertVal,
- ArrayAttr::get(range.getContext(), subscriptAttrs));
-
- incrementSubscripts(dims, subscripts);
+ if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) {
+ mlir::emitError(
+ loc, "fir.coordinate_of with a dynamic element size is unsupported");
+ return failure();
}
- // Convert uint64_t's to Attribute's.
- SmallVector<mlir::Attribute> subscriptAttrs;
- for (const auto &subscript : subscripts)
- subscriptAttrs.push_back(
- IntegerAttr::get(rewriter.getI64Type(), subscript));
- mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs);
-
- rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
- range, ty, lastOp, insertVal,
- ArrayAttr::get(range.getContext(), arrayRef));
-
- return success();
- }
-};
-} // namespace
-
-/// XArrayCoor is the address arithmetic on a dynamically shaped, sliced,
-/// shifted etc. array.
-/// (See the static restriction on coordinate_of.) array_coor determines the
-/// coordinate (location) of a specific element.
-struct XArrayCoorOpConversion
- : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
- using FIROpAndTypeConversion::FIROpAndTypeConversion;
+ if (hasKnownShape || columnIsDeferred) {
+ SmallVector<mlir::Value> offs;
+ if (hasKnownShape && hasSubdimension) {
+ mlir::LLVM::ConstantOp c0 =
+ genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
+ offs.push_back(c0);
+ }
+ const std::size_t sz = operands.size();
+ Optional<int> dims;
+ SmallVector<mlir::Value> arrIdx;
+ for (std::size_t i = 1; i < sz; ++i) {
+ mlir::Value nxtOpnd = operands[i];
- mlir::LogicalResult
- doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- auto loc = coor.getLoc();
- mlir::ValueRange operands = adaptor.getOperands();
- unsigned rank = coor.getRank();
- assert(coor.indices().size() == rank);
- assert(coor.shape().empty() || coor.shape().size() == rank);
- assert(coor.shift().empty() || coor.shift().size() == rank);
- assert(coor.slice().empty() || coor.slice().size() == 3 * rank);
- mlir::Type idxTy = lowerTy().indexType();
- mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
- mlir::Value prevExt = one;
- mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
- mlir::Value offset = zero;
- const bool isShifted = !coor.shift().empty();
- const bool isSliced = !coor.slice().empty();
- const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>();
+ if (!currentObjTy) {
+ mlir::emitError(loc, "invalid coordinate/check failed");
+ return failure();
+ }
- auto indexOps = coor.indices().begin();
- auto shapeOps = coor.shape().begin();
- auto shiftOps = coor.shift().begin();
- auto sliceOps = coor.slice().begin();
- // For each dimension of the array, generate the offset calculation.
- for (unsigned i = 0; i < rank;
- ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) {
- mlir::Value index =
- integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]);
- mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy,
- operands[coor.shiftOffset() + i])
- : one;
- mlir::Value step = one;
- bool normalSlice = isSliced;
- // Compute zero based index in dimension i of the element, applying
- // potential triplets and lower bounds.
- if (isSliced) {
- mlir::Value ub = *(sliceOps + 1);
- normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp());
- if (normalSlice)
- step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2));
- }
- auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb);
- mlir::Value
diff =
- rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step);
- if (normalSlice) {
- mlir::Value sliceLb =
- integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]);
- auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb);
-
diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy,
diff , adj);
- }
- // Update the offset given the stride and the zero based index `
diff `
- // that was just computed.
- if (baseIsBoxed) {
- // Use stride in bytes from the descriptor.
- mlir::Value stride =
- loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter);
- auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
diff , stride);
- offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
- } else {
- // Use stride computed at last iteration.
- auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
diff , prevExt);
- offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
- // Compute next stride assuming contiguity of the base array
- // (in element number).
- auto nextExt =
- integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]);
- prevExt =
- rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt);
- }
- }
+ // check if the i-th coordinate relates to an array
+ if (dims.hasValue()) {
+ arrIdx.push_back(nxtOpnd);
+ int dimsLeft = *dims;
+ if (dimsLeft > 1) {
+ dims = dimsLeft - 1;
+ continue;
+ }
+ currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy();
+ // append array range in reverse (FIR arrays are column-major)
+ offs.append(arrIdx.rbegin(), arrIdx.rend());
+ arrIdx.clear();
+ dims.reset();
+ continue;
+ }
+ if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) {
+ int d = arrTy.getDimension() - 1;
+ if (d > 0) {
+ dims = d;
+ arrIdx.push_back(nxtOpnd);
+ continue;
+ }
+ currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy();
+ offs.push_back(nxtOpnd);
+ continue;
+ }
- // Add computed offset to the base address.
- if (baseIsBoxed) {
- // Working with byte offsets. The base address is read from the fir.box.
- // and need to be casted to i8* to do the pointer arithmetic.
- mlir::Type baseTy =
- getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType());
- mlir::Value base =
- loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter);
- mlir::Type voidPtrTy = getVoidPtrType();
- base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
- llvm::SmallVector<mlir::Value> args{offset};
- auto addr =
- rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args);
- if (coor.subcomponent().empty()) {
- rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr);
- return success();
- }
- auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr);
- args.clear();
- args.push_back(zero);
- if (!coor.lenParams().empty()) {
- // If type parameters are present, then we don't want to use a GEPOp
- // as below, as the LLVM struct type cannot be statically defined.
- TODO(loc, "derived type with type parameters");
+ // check if the i-th coordinate relates to a field
+ if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>())
+ currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
+ else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>())
+ currentObjTy = tupTy.getType(getIntValue(nxtOpnd));
+ else
+ currentObjTy = nullptr;
+
+ offs.push_back(nxtOpnd);
}
- // TODO: array offset subcomponents must be converted to LLVM's
- // row-major layout here.
- for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
- args.push_back(operands[i]);
- rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted,
- args);
+ if (dims.hasValue())
+ offs.append(arrIdx.rbegin(), arrIdx.rend());
+ mlir::Value base = operands[0];
+ mlir::Value retval = genGEP(loc, ty, rewriter, base, offs);
+ rewriter.replaceOp(coor, retval);
return success();
}
- // The array was not boxed, so it must be contiguous. offset is therefore an
- // element offset and the base type is kept in the GEP unless the element
- // type size is itself dynamic.
- mlir::Value base;
- if (coor.subcomponent().empty()) {
- // No subcomponent.
- if (!coor.lenParams().empty()) {
- // Type parameters. Adjust element size explicitly.
- auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType());
- assert(eleTy && "result must be a reference-like type");
- if (fir::characterWithDynamicLen(eleTy)) {
- assert(coor.lenParams().size() == 1);
- auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize(
- eleTy.cast<fir::CharacterType>().getFKind());
- auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8);
- auto scaledBySize =
- rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling);
- auto length =
- integerCast(loc, rewriter, idxTy,
- adaptor.getOperands()[coor.lenParamsOffset()]);
- offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize,
- length);
- } else {
- TODO(loc, "compute size of derived type with type parameters");
- }
- }
- // Cast the base address to a pointer to T.
- base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty,
- adaptor.getOperands()[0]);
- } else {
- // Operand #0 must have a pointer type. For subcomponent slicing, we
- // want to cast away the array type and have a plain struct type.
- mlir::Type ty0 = adaptor.getOperands()[0].getType();
- auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>();
- assert(ptrTy && "expected pointer type");
- mlir::Type eleTy = ptrTy.getElementType();
- while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
- eleTy = arrTy.getElementType();
- auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy);
- base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy,
- adaptor.getOperands()[0]);
+ mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type");
+ return failure();
+ }
+};
+
+/// Convert `fir.field_index`. The conversion depends on whether the size of
+/// the record is static or dynamic.
+struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> {
+ using FIROpConversion::FIROpConversion;
+
+ // NB: most field references should be resolved by this point
+ mlir::LogicalResult
+ matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ auto recTy = field.getOnType().cast<fir::RecordType>();
+ unsigned index = recTy.getFieldIndex(field.getFieldId());
+
+ if (!fir::hasDynamicSize(recTy)) {
+ // Derived type has compile-time constant layout. Return index of the
+ // component type in the parent type (to be used in GEP).
+ rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset(
+ field.getLoc(), rewriter, index)});
+ return success();
}
- SmallVector<mlir::Value> args = {offset};
- for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
- args.push_back(operands[i]);
- rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args);
+
+ // Derived type has compile-time constant layout. Call the compiler
+ // generated function to determine the byte offset of the field at runtime.
+ // This returns a non-constant.
+ FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get(
+ field.getContext(), getOffsetMethodName(recTy, field.getFieldId()));
+ NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr);
+ NamedAttribute fieldAttr = rewriter.getNamedAttr(
+ "field", mlir::IntegerAttr::get(lowerTy().indexType(), index));
+ rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
+ field, lowerTy().offsetType(), adaptor.getOperands(),
+ llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr});
return success();
}
+
+ // Re-Construct the name of the compiler generated method that calculates the
+ // offset
+ inline static std::string getOffsetMethodName(fir::RecordType recTy,
+ llvm::StringRef field) {
+ return recTy.getName().str() + "P." + field.str() + ".offset";
+ }
};
-//
-// Primitive operations on Complex types
-//
+/// Convert `fir.end`
+struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> {
+ using FIROpConversion::FIROpConversion;
-/// Generate inline code for complex addition/subtraction
-template <typename LLVMOP, typename OPTY>
-static mlir::LLVM::InsertValueOp
-complexSum(OPTY sumop, mlir::ValueRange opnds,
- mlir::ConversionPatternRewriter &rewriter,
- fir::LLVMTypeConverter &lowering) {
- mlir::Value a = opnds[0];
- mlir::Value b = opnds[1];
- auto loc = sumop.getLoc();
- auto ctx = sumop.getContext();
- auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
- auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
- mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType()));
- mlir::Type ty = lowering.convertType(sumop.getType());
- auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
- auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
- auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
- auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
- auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1);
- auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1);
- auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
- auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0);
- return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1);
-}
+ mlir::LogicalResult
+ matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ TODO(firEnd.getLoc(), "fir.end codegen");
+ return failure();
+ }
+};
-namespace {
-struct AddcOpConversion : public FIROpConversion<fir::AddcOp> {
+/// Lower `fir.gentypedesc` to a global constant.
+struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor,
+ matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- // given: (x + iy) + (x' + iy')
- // result: (x + x') + i(y + y')
- auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(),
- rewriter, lowerTy());
- rewriter.replaceOp(addc, r.getResult());
+ TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen");
+ return failure();
+ }
+};
+
+/// Lower `fir.has_value` operation to `llvm.return` operation.
+struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> {
+ using FIROpConversion::FIROpConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands());
return success();
}
};
-struct SubcOpConversion : public FIROpConversion<fir::SubcOp> {
+/// Lower `fir.global` operation to `llvm.global` operation.
+/// `fir.insert_on_range` operations are replaced with constant dense attribute
+/// if they are applied on the full range.
+struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor,
+ matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- // given: (x + iy) - (x' + iy')
- // result: (x - x') + i(y - y')
- auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(),
- rewriter, lowerTy());
- rewriter.replaceOp(subc, r.getResult());
+ auto tyAttr = convertType(global.getType());
+ if (global.getType().isa<fir::BoxType>())
+ tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType();
+ auto loc = global.getLoc();
+ mlir::Attribute initAttr{};
+ if (global.getInitVal())
+ initAttr = global.getInitVal().getValue();
+ auto linkage = convertLinkage(global.getLinkName());
+ auto isConst = global.getConstant().hasValue();
+ auto g = rewriter.create<mlir::LLVM::GlobalOp>(
+ loc, tyAttr, isConst, linkage, global.getSymName(), initAttr);
+ auto &gr = g.getInitializerRegion();
+ rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end());
+ if (!gr.empty()) {
+ // Replace insert_on_range with a constant dense attribute if the
+ // initialization is on the full range.
+ auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>();
+ for (auto insertOp : insertOnRangeOps) {
+ if (isFullRange(insertOp.getCoor(), insertOp.getType())) {
+ auto seqTyAttr = convertType(insertOp.getType());
+ auto *op = insertOp.getVal().getDefiningOp();
+ auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op);
+ if (!constant) {
+ auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op);
+ if (!convertOp)
+ continue;
+ constant = cast<mlir::arith::ConstantOp>(
+ convertOp.getValue().getDefiningOp());
+ }
+ mlir::Type vecType = mlir::VectorType::get(
+ insertOp.getType().getShape(), constant.getType());
+ auto denseAttr = mlir::DenseElementsAttr::get(
+ vecType.cast<ShapedType>(), constant.getValue());
+ rewriter.setInsertionPointAfter(insertOp);
+ rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(
+ insertOp, seqTyAttr, denseAttr);
+ }
+ }
+ }
+ rewriter.eraseOp(global);
return success();
}
-};
-/// Inlined complex multiply
-struct MulcOpConversion : public FIROpConversion<fir::MulcOp> {
- using FIROpConversion::FIROpConversion;
+ bool isFullRange(mlir::DenseIntElementsAttr indexes,
+ fir::SequenceType seqTy) const {
+ auto extents = seqTy.getShape();
+ if (indexes.size() / 2 != static_cast<int64_t>(extents.size()))
+ return false;
+ auto cur_index = indexes.value_begin<int64_t>();
+ for (unsigned i = 0; i < indexes.size(); i += 2) {
+ if (*(cur_index++) != 0)
+ return false;
+ if (*(cur_index++) != extents[i / 2] - 1)
+ return false;
+ }
+ return true;
+ }
- mlir::LogicalResult
- matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- // TODO: Can we use a call to __muldc3 ?
- // given: (x + iy) * (x' + iy')
- // result: (xx'-yy')+i(xy'+yx')
- mlir::Value a = adaptor.getOperands()[0];
- mlir::Value b = adaptor.getOperands()[1];
- auto loc = mulc.getLoc();
- auto *ctx = mulc.getContext();
- auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
- auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
- mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType()));
- mlir::Type ty = convertType(mulc.getType());
- auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
- auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
- auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
- auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
- auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1);
- auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1);
- auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1);
- auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx);
- auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1);
- auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy);
- auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
- auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0);
- auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1);
- rewriter.replaceOp(mulc, r0.getResult());
- return success();
+ // TODO: String comparaison should be avoided. Replace linkName with an
+ // enumeration.
+ mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const {
+ if (optLinkage.hasValue()) {
+ auto name = optLinkage.getValue();
+ if (name == "internal")
+ return mlir::LLVM::Linkage::Internal;
+ if (name == "linkonce")
+ return mlir::LLVM::Linkage::Linkonce;
+ if (name == "common")
+ return mlir::LLVM::Linkage::Common;
+ if (name == "weak")
+ return mlir::LLVM::Linkage::Weak;
+ }
+ return mlir::LLVM::Linkage::External;
}
};
-/// Inlined complex division
-struct DivcOpConversion : public FIROpConversion<fir::DivcOp> {
+/// `fir.load` --> `llvm.load`
+struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor,
+ matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- // TODO: Can we use a call to __divdc3 instead?
- // Just generate inline code for now.
- // given: (x + iy) / (x' + iy')
- // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y'
- mlir::Value a = adaptor.getOperands()[0];
- mlir::Value b = adaptor.getOperands()[1];
- auto loc = divc.getLoc();
- auto *ctx = divc.getContext();
- auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
- auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
- mlir::Type eleTy = convertType(getComplexEleTy(divc.getType()));
- mlir::Type ty = convertType(divc.getType());
- auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
- auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
- auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
- auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
- auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1);
- auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1);
- auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1);
- auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1);
- auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1);
- auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1);
- auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1);
- auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy);
- auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy);
- auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d);
- auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d);
- auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
- auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0);
- auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1);
- rewriter.replaceOp(divc, r0.getResult());
+ // fir.box is a special case because it is considered as an ssa values in
+ // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box>
+ // and fir.box end up being the same llvm types and loading a
+ // fir.ref<fir.box> is actually a no op in LLVM.
+ if (load.getType().isa<fir::BoxType>()) {
+ rewriter.replaceOp(load, adaptor.getOperands()[0]);
+ } else {
+ mlir::Type ty = convertType(load.getType());
+ ArrayRef<NamedAttribute> at = load->getAttrs();
+ rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>(
+ load, ty, adaptor.getOperands(), at);
+ }
return success();
}
};
-/// Inlined complex negation
-struct NegcOpConversion : public FIROpConversion<fir::NegcOp> {
+/// Lower `fir.no_reassoc` to LLVM IR dialect.
+/// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast
+/// math flags?
+struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor,
+ matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- // given: -(x + iy)
- // result: -x - iy
- auto *ctxt = neg.getContext();
- auto eleTy = convertType(getComplexEleTy(neg.getType()));
- auto ty = convertType(neg.getType());
- auto loc = neg.getLoc();
- mlir::Value o0 = adaptor.getOperands()[0];
- auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0));
- auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1));
- auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0);
- auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1);
- auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp);
- auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip);
- auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0);
- rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1);
+ rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]);
return success();
}
};
-/// Conversion pattern for operation that must be dead. The information in these
-/// operations is used by other operation. At this point they should not have
-/// anymore uses.
-/// These operations are normally dead after the pre-codegen pass.
-template <typename FromOp>
-struct MustBeDeadConversion : public FIROpConversion<FromOp> {
- explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering)
- : FIROpConversion<FromOp>(lowering) {}
- using OpAdaptor = typename FromOp::Adaptor;
+static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
+ Optional<mlir::ValueRange> destOps,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Block *newBlock) {
+ if (destOps.hasValue())
+ rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(),
+ newBlock, mlir::ValueRange());
+ else
+ rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock);
+}
+
+template <typename A, typename B>
+static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps,
+ mlir::ConversionPatternRewriter &rewriter) {
+ if (destOps.hasValue())
+ rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(),
+ dest);
+ else
+ rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest);
+}
+
+static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
+ mlir::Block *dest,
+ Optional<mlir::ValueRange> destOps,
+ mlir::ConversionPatternRewriter &rewriter) {
+ auto *thisBlock = rewriter.getInsertionBlock();
+ auto *newBlock = createBlock(rewriter, dest);
+ rewriter.setInsertionPointToEnd(thisBlock);
+ genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock);
+ rewriter.setInsertionPointToEnd(newBlock);
+}
+
+/// Conversion of `fir.select_case`
+///
+/// The `fir.select_case` operation is converted to a if-then-else ladder.
+/// Depending on the case condition type, one or several comparison and
+/// conditional branching can be generated.
+///
+/// A a point value case such as `case(4)`, a lower bound case such as
+/// `case(5:)` or an upper bound case such as `case(:3)` are converted to a
+/// simple comparison between the selector value and the constant value in the
+/// case. The block associated with the case condition is then executed if
+/// the comparison succeed otherwise it branch to the next block with the
+/// comparison for the the next case conditon.
+///
+/// A closed interval case condition such as `case(7:10)` is converted with a
+/// first comparison and conditional branching for the lower bound. If
+/// successful, it branch to a second block with the comparison for the
+/// upper bound in the same case condition.
+///
+/// TODO: lowering of CHARACTER type cases is not handled yet.
+struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> {
+ using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(FromOp op, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const final {
- if (!op->getUses().empty())
- return rewriter.notifyMatchFailure(op, "op must be dead");
- rewriter.eraseOp(op);
+ matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ unsigned conds = caseOp.getNumConditions();
+ llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue();
+ // Type can be CHARACTER, INTEGER, or LOGICAL (C1145)
+ auto ty = caseOp.getSelector().getType();
+ if (ty.isa<fir::CharacterType>()) {
+ TODO(caseOp.getLoc(), "fir.select_case codegen with character type");
+ return failure();
+ }
+ mlir::Value selector = caseOp.getSelector(adaptor.getOperands());
+ auto loc = caseOp.getLoc();
+ for (unsigned t = 0; t != conds; ++t) {
+ mlir::Block *dest = caseOp.getSuccessor(t);
+ llvm::Optional<mlir::ValueRange> destOps =
+ caseOp.getSuccessorOperands(adaptor.getOperands(), t);
+ llvm::Optional<mlir::ValueRange> cmpOps =
+ *caseOp.getCompareOperands(adaptor.getOperands(), t);
+ mlir::Value caseArg = *(cmpOps.getValue().begin());
+ mlir::Attribute attr = cases[t];
+ if (attr.isa<fir::PointIntervalAttr>()) {
+ auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg);
+ genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
+ continue;
+ }
+ if (attr.isa<fir::LowerBoundAttr>()) {
+ auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
+ genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
+ continue;
+ }
+ if (attr.isa<fir::UpperBoundAttr>()) {
+ auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg);
+ genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
+ continue;
+ }
+ if (attr.isa<fir::ClosedIntervalAttr>()) {
+ auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
+ auto *thisBlock = rewriter.getInsertionBlock();
+ auto *newBlock1 = createBlock(rewriter, dest);
+ auto *newBlock2 = createBlock(rewriter, dest);
+ rewriter.setInsertionPointToEnd(thisBlock);
+ rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2);
+ rewriter.setInsertionPointToEnd(newBlock1);
+ mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1);
+ auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0);
+ genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2);
+ rewriter.setInsertionPointToEnd(newBlock2);
+ continue;
+ }
+ assert(attr.isa<mlir::UnitAttr>());
+ assert((t + 1 == conds) && "unit must be last");
+ genBrOp(caseOp, dest, destOps, rewriter);
+ }
return success();
}
};
-struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> {
- using MustBeDeadConversion::MustBeDeadConversion;
-};
+template <typename OP>
+static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select,
+ typename OP::Adaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) {
+ unsigned conds = select.getNumConditions();
+ auto cases = select.getCases().getValue();
+ mlir::Value selector = adaptor.getSelector();
+ auto loc = select.getLoc();
+ assert(conds > 0 && "select must have cases");
-struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> {
- using MustBeDeadConversion::MustBeDeadConversion;
-};
+ llvm::SmallVector<mlir::Block *> destinations;
+ llvm::SmallVector<mlir::ValueRange> destinationsOperands;
+ mlir::Block *defaultDestination;
+ mlir::ValueRange defaultOperands;
+ llvm::SmallVector<int32_t> caseValues;
-struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> {
- using MustBeDeadConversion::MustBeDeadConversion;
-};
+ for (unsigned t = 0; t != conds; ++t) {
+ mlir::Block *dest = select.getSuccessor(t);
+ auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t);
+ const mlir::Attribute &attr = cases[t];
+ if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) {
+ destinations.push_back(dest);
+ destinationsOperands.push_back(destOps.hasValue() ? *destOps
+ : ValueRange());
+ caseValues.push_back(intAttr.getInt());
+ continue;
+ }
+ assert(attr.template dyn_cast_or_null<mlir::UnitAttr>());
+ assert((t + 1 == conds) && "unit must be last");
+ defaultDestination = dest;
+ defaultOperands = destOps.hasValue() ? *destOps : ValueRange();
+ }
-struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> {
- using MustBeDeadConversion::MustBeDeadConversion;
-};
+ // LLVM::SwitchOp takes a i32 type for the selector.
+ if (select.getSelector().getType() != rewriter.getI32Type())
+ selector =
+ rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector);
-/// `fir.is_present` -->
-/// ```
-/// %0 = llvm.mlir.constant(0 : i64)
-/// %1 = llvm.ptrtoint %0
-/// %2 = llvm.icmp "ne" %1, %0 : i64
-/// ```
-struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>(
+ select, selector,
+ /*defaultDestination=*/defaultDestination,
+ /*defaultOperands=*/defaultOperands,
+ /*caseValues=*/caseValues,
+ /*caseDestinations=*/destinations,
+ /*caseOperands=*/destinationsOperands,
+ /*branchWeights=*/ArrayRef<int32_t>());
+}
+
+/// conversion of fir::SelectOp to an if-then-else ladder
+struct SelectOpConversion : public FIROpConversion<fir::SelectOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor,
+ matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::Type idxTy = lowerTy().indexType();
- mlir::Location loc = isPresent.getLoc();
- auto ptr = adaptor.getOperands()[0];
-
- if (isPresent.getVal().getType().isa<fir::BoxCharType>()) {
- auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>();
- assert(!structTy.isOpaque() && !structTy.getBody().empty());
-
- mlir::Type ty = structTy.getBody()[0];
- mlir::MLIRContext *ctx = isPresent.getContext();
- auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
- ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0);
- }
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0);
- auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr);
- rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
- isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0);
-
+ selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter);
return success();
}
};
-/// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of
-/// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element
-/// in this struct is a pointer. Its type is determined from `KIND`. The 2nd
-/// element is the length of the character buffer (`#n`).
-struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> {
+/// conversion of fir::SelectRankOp to an if-then-else ladder
+struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor,
+ matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::ValueRange operands = adaptor.getOperands();
- MLIRContext *ctx = emboxChar.getContext();
-
- mlir::Value charBuffer = operands[0];
- mlir::Value charBufferLen = operands[1];
-
- mlir::Location loc = emboxChar.getLoc();
- mlir::Type llvmStructTy = convertType(emboxChar.getType());
- auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy);
-
- mlir::Type lenTy =
- llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1];
- mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen);
-
- auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
- auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
- auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>(
- loc, llvmStructTy, llvmStruct, charBuffer, c0);
- rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
- emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1);
-
+ selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter);
return success();
}
};
-} // namespace
-
-/// Construct an `llvm.extractvalue` instruction. It will return value at
-/// element \p x from \p tuple.
-static mlir::LLVM::ExtractValueOp
-genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty,
- mlir::ConversionPatternRewriter &rewriter,
- mlir::MLIRContext *ctx, int x) {
- auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x));
- auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x];
- return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx);
-}
-namespace {
-/// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the
-/// boxchar.
-struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> {
+/// Lower `fir.select_type` to LLVM IR dialect.
+struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor,
+ matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::Value boxChar = adaptor.getOperands()[0];
- mlir::Location loc = boxChar.getLoc();
- mlir::MLIRContext *ctx = boxChar.getContext();
- mlir::Type returnValTy = boxCharLen.getResult().getType();
+ mlir::emitError(select.getLoc(),
+ "fir.select_type should have already been converted");
+ return failure();
+ }
+};
- constexpr int boxcharLenIdx = 1;
- mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex(
- loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx);
- mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len);
- rewriter.replaceOp(boxCharLen, lenAfterCast);
+/// `fir.store` --> `llvm.store`
+struct StoreOpConversion : public FIROpConversion<fir::StoreOp> {
+ using FIROpConversion::FIROpConversion;
+ mlir::LogicalResult
+ matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ if (store.getValue().getType().isa<fir::BoxType>()) {
+ // fir.box value is actually in memory, load it first before storing it.
+ mlir::Location loc = store.getLoc();
+ mlir::Type boxPtrTy = adaptor.getOperands()[0].getType();
+ auto val = rewriter.create<mlir::LLVM::LoadOp>(
+ loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(),
+ adaptor.getOperands()[0]);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>(
+ store, val, adaptor.getOperands()[1]);
+ } else {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>(
+ store, adaptor.getOperands()[0], adaptor.getOperands()[1]);
+ }
return success();
}
};
+namespace {
+
/// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for
/// the character buffer and one for the buffer length.
struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> {
@@ -2891,374 +2949,318 @@ struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> {
}
};
-/// Convert `fir.field_index`. The conversion depends on whether the size of
-/// the record is static or dynamic.
-struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> {
+/// convert to LLVM IR dialect `undef`
+struct UndefOpConversion : public FIROpConversion<fir::UndefOp> {
using FIROpConversion::FIROpConversion;
- // NB: most field references should be resolved by this point
mlir::LogicalResult
- matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor,
+ matchAndRewrite(fir::UndefOp undef, OpAdaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- auto recTy = field.getOnType().cast<fir::RecordType>();
- unsigned index = recTy.getFieldIndex(field.getFieldId());
-
- if (!fir::hasDynamicSize(recTy)) {
- // Derived type has compile-time constant layout. Return index of the
- // component type in the parent type (to be used in GEP).
- rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset(
- field.getLoc(), rewriter, index)});
- return success();
- }
-
- // Derived type has compile-time constant layout. Call the compiler
- // generated function to determine the byte offset of the field at runtime.
- // This returns a non-constant.
- FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get(
- field.getContext(), getOffsetMethodName(recTy, field.getFieldId()));
- NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr);
- NamedAttribute fieldAttr = rewriter.getNamedAttr(
- "field", mlir::IntegerAttr::get(lowerTy().indexType(), index));
- rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
- field, lowerTy().offsetType(), adaptor.getOperands(),
- llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr});
+ rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>(
+ undef, convertType(undef.getType()));
return success();
}
-
- // Re-Construct the name of the compiler generated method that calculates the
- // offset
- inline static std::string getOffsetMethodName(fir::RecordType recTy,
- llvm::StringRef field) {
- return recTy.getName().str() + "P." + field.str() + ".offset";
- }
};
-/// Convert to (memory) reference to a reference to a subobject.
-/// The coordinate_of op is a Swiss army knife operation that can be used on
-/// (memory) references to records, arrays, complex, etc. as well as boxes.
-/// With unboxed arrays, there is the restriction that the array have a static
-/// shape in all but the last column.
-struct CoordinateOpConversion
- : public FIROpAndTypeConversion<fir::CoordinateOp> {
- using FIROpAndTypeConversion::FIROpAndTypeConversion;
+struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> {
+ using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::ValueRange operands = adaptor.getOperands();
-
- mlir::Location loc = coor.getLoc();
- mlir::Value base = operands[0];
- mlir::Type baseObjectTy = coor.getBaseType();
- mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
- assert(objectTy && "fir.coordinate_of expects a reference type");
-
- // Complex type - basically, extract the real or imaginary part
- if (fir::isa_complex(objectTy)) {
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
- SmallVector<mlir::Value> offs = {c0, operands[1]};
- mlir::Value gep = genGEP(loc, ty, rewriter, base, offs);
- rewriter.replaceOp(coor, gep);
- return success();
+ matchAndRewrite(fir::ZeroOp zero, OpAdaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::Type ty = convertType(zero.getType());
+ if (ty.isa<mlir::LLVM::LLVMPointerType>()) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty);
+ } else if (ty.isa<mlir::IntegerType>()) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
+ zero, ty, mlir::IntegerAttr::get(zero.getType(), 0));
+ } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
+ zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0));
+ } else {
+ // TODO: create ConstantAggregateZero for FIR aggregate/array types.
+ return rewriter.notifyMatchFailure(
+ zero,
+ "conversion of fir.zero with aggregate type not implemented yet");
}
-
- // Boxed type - get the base pointer from the box
- if (baseObjectTy.dyn_cast<fir::BoxType>())
- return doRewriteBox(coor, ty, operands, loc, rewriter);
-
- // Reference or pointer type
- if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>())
- return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter);
-
- return rewriter.notifyMatchFailure(
- coor, "fir.coordinate_of base operand has unsupported type");
+ return success();
}
+};
- unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const {
- return fir::hasDynamicSize(ty)
- ? op.getDefiningOp()
- ->getAttrOfType<mlir::IntegerAttr>("field")
- .getInt()
- : getIntValue(op);
+/// `fir.unreachable` --> `llvm.unreachable`
+struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> {
+ using FIROpConversion::FIROpConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach);
+ return success();
}
+};
- int64_t getIntValue(mlir::Value val) const {
- assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value");
- mlir::Operation *defop = val.getDefiningOp();
+/// `fir.is_present` -->
+/// ```
+/// %0 = llvm.mlir.constant(0 : i64)
+/// %1 = llvm.ptrtoint %0
+/// %2 = llvm.icmp "ne" %1, %0 : i64
+/// ```
+struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> {
+ using FIROpConversion::FIROpConversion;
- if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop))
- return constOp.value();
- if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop))
- if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>())
- return attr.getValue().getSExtValue();
- fir::emitFatalError(val.getLoc(), "must be a constant");
- }
+ mlir::LogicalResult
+ matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::Type idxTy = lowerTy().indexType();
+ mlir::Location loc = isPresent.getLoc();
+ auto ptr = adaptor.getOperands()[0];
- bool hasSubDimensions(mlir::Type type) const {
- return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>();
- }
+ if (isPresent.getVal().getType().isa<fir::BoxCharType>()) {
+ auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>();
+ assert(!structTy.isOpaque() && !structTy.getBody().empty());
- /// Check whether this form of `!fir.coordinate_of` is supported. These
- /// additional checks are required, because we are not yet able to convert
- /// all valid forms of `!fir.coordinate_of`.
- /// TODO: Either implement the unsupported cases or extend the verifier
- /// in FIROps.cpp instead.
- bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const {
- const std::size_t numOfCoors = coors.size();
- std::size_t i = 0;
- bool subEle = false;
- bool ptrEle = false;
- for (; i < numOfCoors; ++i) {
- mlir::Value nxtOpnd = coors[i];
- if (auto arrTy = type.dyn_cast<fir::SequenceType>()) {
- subEle = true;
- i += arrTy.getDimension() - 1;
- type = arrTy.getEleTy();
- } else if (auto recTy = type.dyn_cast<fir::RecordType>()) {
- subEle = true;
- type = recTy.getType(getFieldNumber(recTy, nxtOpnd));
- } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) {
- subEle = true;
- type = tupTy.getType(getIntValue(nxtOpnd));
- } else {
- ptrEle = true;
- }
+ mlir::Type ty = structTy.getBody()[0];
+ mlir::MLIRContext *ctx = isPresent.getContext();
+ auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
+ ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0);
}
- if (ptrEle)
- return (!subEle) && (numOfCoors == 1);
- return subEle && (i >= numOfCoors);
- }
+ mlir::LLVM::ConstantOp c0 =
+ genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0);
+ auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
+ isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0);
- /// Walk the abstract memory layout and determine if the path traverses any
- /// array types with unknown shape. Return true iff all the array types have a
- /// constant shape along the path.
- bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const {
- const std::size_t sz = coors.size();
- std::size_t i = 0;
- for (; i < sz; ++i) {
- mlir::Value nxtOpnd = coors[i];
- if (auto arrTy = type.dyn_cast<fir::SequenceType>()) {
- if (fir::sequenceWithNonConstantShape(arrTy))
- return false;
- i += arrTy.getDimension() - 1;
- type = arrTy.getEleTy();
- } else if (auto strTy = type.dyn_cast<fir::RecordType>()) {
- type = strTy.getType(getFieldNumber(strTy, nxtOpnd));
- } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) {
- type = strTy.getType(getIntValue(nxtOpnd));
- } else {
- return true;
- }
- }
- return true;
+ return success();
}
+};
-private:
- mlir::LogicalResult
- doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands,
- mlir::Location loc,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Type boxObjTy = coor.getBaseType();
- assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`");
+/// Create value signaling an absent optional argument in a call, e.g.
+/// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>`
+struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> {
+ using FIROpConversion::FIROpConversion;
- mlir::Value boxBaseAddr = operands[0];
+ mlir::LogicalResult
+ matchAndRewrite(fir::AbsentOp absent, OpAdaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::Type ty = convertType(absent.getType());
+ mlir::Location loc = absent.getLoc();
- // 1. SPECIAL CASE (uses `fir.len_param_index`):
- // %box = ... : !fir.box<!fir.type<derived{len1:i32}>>
- // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}>
- // %addr = coordinate_of %box, %lenp
- if (coor.getNumOperands() == 2) {
- mlir::Operation *coordinateDef =
- (*coor.getCoor().begin()).getDefiningOp();
- if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) {
- TODO(loc,
- "fir.coordinate_of - fir.len_param_index is not supported yet");
- }
+ if (absent.getType().isa<fir::BoxCharType>()) {
+ auto structTy = ty.cast<mlir::LLVM::LLVMStructType>();
+ assert(!structTy.isOpaque() && !structTy.getBody().empty());
+ auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
+ auto nullField =
+ rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]);
+ mlir::MLIRContext *ctx = absent.getContext();
+ auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
+ absent, ty, undefStruct, nullField, c0);
+ } else {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty);
}
+ return success();
+ }
+};
- // 2. GENERAL CASE:
- // 2.1. (`fir.array`)
- // %box = ... : !fix.box<!fir.array<?xU>>
- // %idx = ... : index
- // %resultAddr = coordinate_of %box, %idx : !fir.ref<U>
- // 2.2 (`fir.derived`)
- // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>>
- // %idx = ... : i32
- // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32>
- // 2.3 (`fir.derived` inside `fir.array`)
- // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, field_2:f32}>>>
- // %idx1 = ... : index
- // %idx2 = ... : i32
- // %resultAddr = coordinate_of %box, %idx1, %idx2 : !fir.ref<f32>
- // 2.4. TODO: Either document or disable any other case that the following
- // implementation might convert.
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
- mlir::Value resultAddr =
- loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()),
- boxBaseAddr, rewriter);
- auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy);
- mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext());
+//
+// Primitive operations on Complex types
+//
- for (unsigned i = 1, last = operands.size(); i < last; ++i) {
- if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) {
- if (i != 1)
- TODO(loc, "fir.array nested inside other array and/or derived type");
- // Applies byte strides from the box. Ignore lower bound from box
- // since fir.coordinate_of indexes are zero based. Lowering takes care
- // of lower bound aspects. This both accounts for dynamically sized
- // types and non contiguous arrays.
- auto idxTy = lowerTy().indexType();
- mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0);
- for (unsigned index = i, lastIndex = i + arrTy.getDimension();
- index < lastIndex; ++index) {
- mlir::Value stride =
- loadStrideFromBox(loc, operands[0], index - i, rewriter);
- auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
- operands[index], stride);
- off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off);
- }
- auto voidPtrBase =
- rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr);
- SmallVector<mlir::Value> args{off};
- resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy,
- voidPtrBase, args);
- i += arrTy.getDimension() - 1;
- currentObjTy = arrTy.getEleTy();
- } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) {
- auto recRefTy =
- mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy));
- mlir::Value nxtOpnd = operands[i];
- auto memObj =
- rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr);
- llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd};
- currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
- auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy);
- auto gep = rewriter.create<mlir::LLVM::GEPOp>(
- loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj,
- args);
- resultAddr =
- rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep);
- } else {
- fir::emitFatalError(loc, "unexpected type in coordinate_of");
- }
- }
+/// Generate inline code for complex addition/subtraction
+template <typename LLVMOP, typename OPTY>
+static mlir::LLVM::InsertValueOp
+complexSum(OPTY sumop, mlir::ValueRange opnds,
+ mlir::ConversionPatternRewriter &rewriter,
+ fir::LLVMTypeConverter &lowering) {
+ mlir::Value a = opnds[0];
+ mlir::Value b = opnds[1];
+ auto loc = sumop.getLoc();
+ auto ctx = sumop.getContext();
+ auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
+ auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
+ mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType()));
+ mlir::Type ty = lowering.convertType(sumop.getType());
+ auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
+ auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
+ auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
+ auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
+ auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1);
+ auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1);
+ auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
+ auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0);
+ return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1);
+}
+} // namespace
- rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr);
+namespace {
+struct AddcOpConversion : public FIROpConversion<fir::AddcOp> {
+ using FIROpConversion::FIROpConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ // given: (x + iy) + (x' + iy')
+ // result: (x + x') + i(y + y')
+ auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(),
+ rewriter, lowerTy());
+ rewriter.replaceOp(addc, r.getResult());
return success();
}
+};
+
+struct SubcOpConversion : public FIROpConversion<fir::SubcOp> {
+ using FIROpConversion::FIROpConversion;
mlir::LogicalResult
- doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty,
- mlir::ValueRange operands, mlir::Location loc,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Type baseObjectTy = coor.getBaseType();
+ matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ // given: (x + iy) - (x' + iy')
+ // result: (x - x') + i(y - y')
+ auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(),
+ rewriter, lowerTy());
+ rewriter.replaceOp(subc, r.getResult());
+ return success();
+ }
+};
- mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
- bool hasSubdimension = hasSubDimensions(currentObjTy);
- bool columnIsDeferred = !hasSubdimension;
+/// Inlined complex multiply
+struct MulcOpConversion : public FIROpConversion<fir::MulcOp> {
+ using FIROpConversion::FIROpConversion;
- if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) {
- TODO(loc, "unsupported combination of coordinate operands");
- }
+ mlir::LogicalResult
+ matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ // TODO: Can we use a call to __muldc3 ?
+ // given: (x + iy) * (x' + iy')
+ // result: (xx'-yy')+i(xy'+yx')
+ mlir::Value a = adaptor.getOperands()[0];
+ mlir::Value b = adaptor.getOperands()[1];
+ auto loc = mulc.getLoc();
+ auto *ctx = mulc.getContext();
+ auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
+ auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
+ mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType()));
+ mlir::Type ty = convertType(mulc.getType());
+ auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
+ auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
+ auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
+ auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
+ auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1);
+ auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1);
+ auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1);
+ auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx);
+ auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1);
+ auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy);
+ auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
+ auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0);
+ auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1);
+ rewriter.replaceOp(mulc, r0.getResult());
+ return success();
+ }
+};
- const bool hasKnownShape =
- arraysHaveKnownShape(currentObjTy, operands.drop_front(1));
+/// Inlined complex division
+struct DivcOpConversion : public FIROpConversion<fir::DivcOp> {
+ using FIROpConversion::FIROpConversion;
- // If only the column is `?`, then we can simply place the column value in
- // the 0-th GEP position.
- if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) {
- if (!hasKnownShape) {
- const unsigned sz = arrTy.getDimension();
- if (arraysHaveKnownShape(arrTy.getEleTy(),
- operands.drop_front(1 + sz))) {
- llvm::ArrayRef<int64_t> shape = arrTy.getShape();
- bool allConst = true;
- for (unsigned i = 0; i < sz - 1; ++i) {
- if (shape[i] < 0) {
- allConst = false;
- break;
- }
- }
- if (allConst)
- columnIsDeferred = true;
- }
- }
- }
+ mlir::LogicalResult
+ matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ // TODO: Can we use a call to __divdc3 instead?
+ // Just generate inline code for now.
+ // given: (x + iy) / (x' + iy')
+ // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y'
+ mlir::Value a = adaptor.getOperands()[0];
+ mlir::Value b = adaptor.getOperands()[1];
+ auto loc = divc.getLoc();
+ auto *ctx = divc.getContext();
+ auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
+ auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
+ mlir::Type eleTy = convertType(getComplexEleTy(divc.getType()));
+ mlir::Type ty = convertType(divc.getType());
+ auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
+ auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
+ auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
+ auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
+ auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1);
+ auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1);
+ auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1);
+ auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1);
+ auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1);
+ auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1);
+ auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1);
+ auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy);
+ auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy);
+ auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d);
+ auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d);
+ auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
+ auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0);
+ auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1);
+ rewriter.replaceOp(divc, r0.getResult());
+ return success();
+ }
+};
- if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) {
- mlir::emitError(
- loc, "fir.coordinate_of with a dynamic element size is unsupported");
- return failure();
- }
+/// Inlined complex negation
+struct NegcOpConversion : public FIROpConversion<fir::NegcOp> {
+ using FIROpConversion::FIROpConversion;
- if (hasKnownShape || columnIsDeferred) {
- SmallVector<mlir::Value> offs;
- if (hasKnownShape && hasSubdimension) {
- mlir::LLVM::ConstantOp c0 =
- genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
- offs.push_back(c0);
- }
- const std::size_t sz = operands.size();
- Optional<int> dims;
- SmallVector<mlir::Value> arrIdx;
- for (std::size_t i = 1; i < sz; ++i) {
- mlir::Value nxtOpnd = operands[i];
+ mlir::LogicalResult
+ matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ // given: -(x + iy)
+ // result: -x - iy
+ auto *ctxt = neg.getContext();
+ auto eleTy = convertType(getComplexEleTy(neg.getType()));
+ auto ty = convertType(neg.getType());
+ auto loc = neg.getLoc();
+ mlir::Value o0 = adaptor.getOperands()[0];
+ auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0));
+ auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1));
+ auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0);
+ auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1);
+ auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp);
+ auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip);
+ auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0);
+ rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1);
+ return success();
+ }
+};
- if (!currentObjTy) {
- mlir::emitError(loc, "invalid coordinate/check failed");
- return failure();
- }
+/// Conversion pattern for operation that must be dead. The information in these
+/// operations is used by other operation. At this point they should not have
+/// anymore uses.
+/// These operations are normally dead after the pre-codegen pass.
+template <typename FromOp>
+struct MustBeDeadConversion : public FIROpConversion<FromOp> {
+ explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering)
+ : FIROpConversion<FromOp>(lowering) {}
+ using OpAdaptor = typename FromOp::Adaptor;
- // check if the i-th coordinate relates to an array
- if (dims.hasValue()) {
- arrIdx.push_back(nxtOpnd);
- int dimsLeft = *dims;
- if (dimsLeft > 1) {
- dims = dimsLeft - 1;
- continue;
- }
- currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy();
- // append array range in reverse (FIR arrays are column-major)
- offs.append(arrIdx.rbegin(), arrIdx.rend());
- arrIdx.clear();
- dims.reset();
- continue;
- }
- if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) {
- int d = arrTy.getDimension() - 1;
- if (d > 0) {
- dims = d;
- arrIdx.push_back(nxtOpnd);
- continue;
- }
- currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy();
- offs.push_back(nxtOpnd);
- continue;
- }
+ mlir::LogicalResult
+ matchAndRewrite(FromOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const final {
+ if (!op->getUses().empty())
+ return rewriter.notifyMatchFailure(op, "op must be dead");
+ rewriter.eraseOp(op);
+ return success();
+ }
+};
- // check if the i-th coordinate relates to a field
- if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>())
- currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
- else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>())
- currentObjTy = tupTy.getType(getIntValue(nxtOpnd));
- else
- currentObjTy = nullptr;
+struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> {
+ using MustBeDeadConversion::MustBeDeadConversion;
+};
- offs.push_back(nxtOpnd);
- }
- if (dims.hasValue())
- offs.append(arrIdx.rbegin(), arrIdx.rend());
- mlir::Value base = operands[0];
- mlir::Value retval = genGEP(loc, ty, rewriter, base, offs);
- rewriter.replaceOp(coor, retval);
- return success();
- }
+struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> {
+ using MustBeDeadConversion::MustBeDeadConversion;
+};
- mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type");
- return failure();
- }
+struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> {
+ using MustBeDeadConversion::MustBeDeadConversion;
+};
+
+struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> {
+ using MustBeDeadConversion::MustBeDeadConversion;
};
} // namespace
@@ -3294,11 +3296,11 @@ class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> {
DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion,
EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion,
ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion,
- FreeMemOpConversion, HasValueOpConversion, GenTypeDescOpConversion,
- GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion,
+ FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion,
+ GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion,
InsertValueOpConversion, IsPresentOpConversion,
- LenParamIndexOpConversion, LoadOpConversion, NegcOpConversion,
- NoReassocOpConversion, MulcOpConversion, SelectCaseOpConversion,
+ LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion,
+ NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion,
SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion,
ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion,
SliceOpConversion, StoreOpConversion, StringLitOpConversion,
More information about the flang-commits
mailing list