[flang-commits] [flang] [openmp] [mlir] [Flang][OpenMP] Initial mapping of Fortran pointers and allocatables for target devices (PR #71766)

via flang-commits flang-commits at lists.llvm.org
Fri Dec 1 03:07:11 PST 2023


https://github.com/agozillon updated https://github.com/llvm/llvm-project/pull/71766

>From 7b093d539eeb312efe31f12e15e485931415faf3 Mon Sep 17 00:00:00 2001
From: Andrew Gozillon <Andrew.Gozillon at amd.com>
Date: Mon, 27 Nov 2023 09:04:06 -0600
Subject: [PATCH] [Flang][OpenMP] Initial mapping of Fortran pointers and
 allocatables for target devices

This patch seeks to add an initial lowering for pointers and allocatable variables explicitly captured by implicit and explicit map in Flang OpenMP.
---
 .../include/flang/Optimizer/CodeGen/CodeGen.h |   7 +
 flang/lib/Lower/DirectivesCommon.h            |   5 +-
 flang/lib/Lower/OpenMP.cpp                    | 139 +++-
 flang/lib/Optimizer/CodeGen/CMakeLists.txt    |   1 +
 flang/lib/Optimizer/CodeGen/CodeGen.cpp       |   5 +
 flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp |  87 +++
 .../test/Driver/OpenMP/map-types-and-sizes.ll | 651 ++++++++++++++++++
 .../OpenMP/map-types-and-sizes.f90            |  54 +-
 flang/test/Lower/OpenACC/acc-bounds.f90       |  10 +-
 .../test/Lower/OpenACC/acc-data-operands.f90  |  23 +-
 flang/test/Lower/OpenACC/acc-enter-data.f90   | 133 ++--
 flang/test/Lower/OpenACC/acc-private.f90      |   4 +-
 flang/test/Lower/OpenACC/acc-reduction.f90    |   9 +-
 flang/test/Lower/OpenMP/FIR/array-bounds.f90  |  49 +-
 flang/test/Lower/OpenMP/FIR/target.f90        |   3 +-
 .../Lower/OpenMP/allocatable-array-bounds.f90 | 121 ++++
 flang/test/Lower/OpenMP/allocatable-map.f90   |  15 +
 flang/test/Lower/OpenMP/array-bounds.f90      |  47 +-
 flang/test/Lower/OpenMP/target.f90            |   3 +-
 mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td |  16 +-
 .../OpenMP/OpenMPToLLVMIRTranslation.cpp      | 243 ++++++-
 mlir/test/Dialect/OpenMP/ops.mlir             |  15 +
 ...target-fortran-allocatable-types-host.mlir | 349 ++++++++++
 .../allocatable-array-section-1d-bounds.f90   |  46 ++
 .../allocatable-array-section-3d-bounds.f90   |  44 ++
 .../fortran/allocatable-map-scopes.f95        |  66 ++
 .../failing/target_single_value_allocate.f90  |  27 +
 .../fortran/pointer-scopes-enter-exit-map.f90 |  83 +++
 ...pointer-target-array-section-3d-bounds.f90 |  43 ++
 .../fortran/pointer-target-map-scopes.f95     |  64 ++
 .../target_enter_exit_allocatables.f90        |  44 ++
 .../fortran/target_enter_exit_array.f90       |  41 ++
 32 files changed, 2273 insertions(+), 174 deletions(-)
 create mode 100644 flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp
 create mode 100644 flang/test/Driver/OpenMP/map-types-and-sizes.ll
 create mode 100644 flang/test/Lower/OpenMP/allocatable-array-bounds.f90
 create mode 100644 flang/test/Lower/OpenMP/allocatable-map.f90
 create mode 100644 mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir
 create mode 100644 openmp/libomptarget/test/offloading/fortran/allocatable-array-section-1d-bounds.f90
 create mode 100644 openmp/libomptarget/test/offloading/fortran/allocatable-array-section-3d-bounds.f90
 create mode 100644 openmp/libomptarget/test/offloading/fortran/allocatable-map-scopes.f95
 create mode 100644 openmp/libomptarget/test/offloading/fortran/failing/target_single_value_allocate.f90
 create mode 100644 openmp/libomptarget/test/offloading/fortran/pointer-scopes-enter-exit-map.f90
 create mode 100644 openmp/libomptarget/test/offloading/fortran/pointer-target-array-section-3d-bounds.f90
 create mode 100644 openmp/libomptarget/test/offloading/fortran/pointer-target-map-scopes.f95
 create mode 100644 openmp/libomptarget/test/offloading/fortran/target_enter_exit_allocatables.f90
 create mode 100644 openmp/libomptarget/test/offloading/fortran/target_enter_exit_array.f90

diff --git a/flang/include/flang/Optimizer/CodeGen/CodeGen.h b/flang/include/flang/Optimizer/CodeGen/CodeGen.h
index 7d8e548d89a18b5..7391bbadbfcf245 100644
--- a/flang/include/flang/Optimizer/CodeGen/CodeGen.h
+++ b/flang/include/flang/Optimizer/CodeGen/CodeGen.h
@@ -19,6 +19,7 @@
 namespace fir {
 
 struct NameUniquer;
+class LLVMTypeConverter;
 
 #define GEN_PASS_DECL_FIRTOLLVMLOWERING
 #define GEN_PASS_DECL_CODEGENREWRITE
@@ -79,6 +80,12 @@ std::unique_ptr<mlir::Pass> createLLVMDialectToLLVMPass(
 std::unique_ptr<mlir::Pass> createBoxedProcedurePass();
 std::unique_ptr<mlir::Pass> createBoxedProcedurePass(bool useThunks);
 
+/// Specialised conversion patterns of OpenMP operations for FIR to LLVM
+/// dialect, utilised in cases where the default OpenMP dialect handling cannot
+/// handle all cases for intermingled fir types and operations.
+void populateOpenMPFIRToLLVMConversionPatterns(
+    LLVMTypeConverter &converter, mlir::RewritePatternSet &patterns);
+
 // declarative passes
 #define GEN_PASS_REGISTRATION
 #include "flang/Optimizer/CodeGen/CGPasses.h.inc"
diff --git a/flang/lib/Lower/DirectivesCommon.h b/flang/lib/Lower/DirectivesCommon.h
index 58b8ea23450eb9d..708d3d97128148c 100644
--- a/flang/lib/Lower/DirectivesCommon.h
+++ b/flang/lib/Lower/DirectivesCommon.h
@@ -651,7 +651,7 @@ genBoundsOpsFromBox(fir::FirOpBuilder &builder, mlir::Location loc,
     if (dim == 0) // First stride is the element size.
       byteStride = dimInfo.getByteStride();
     mlir::Value bound = builder.create<BoundsOp>(
-        loc, boundTy, lb, ub, mlir::Value(), byteStride, true, baseLb);
+        loc, boundTy, lb, ub, dimInfo.getExtent(), byteStride, true, baseLb);
     // Compute the stride for the next dimension.
     byteStride = builder.create<mlir::arith::MulIOp>(loc, byteStride,
                                                      dimInfo.getExtent());
@@ -815,9 +815,10 @@ genBoundsOps(fir::FirOpBuilder &builder, mlir::Location loc,
             }
           }
         }
+
+        extent = fir::factory::readExtent(builder, loc, dataExv, dimension);
         if (!ubound) {
           // ub = extent - 1
-          extent = fir::factory::readExtent(builder, loc, dataExv, dimension);
           ubound = builder.create<mlir::arith::SubIOp>(loc, extent, one);
         }
       }
diff --git a/flang/lib/Lower/OpenMP.cpp b/flang/lib/Lower/OpenMP.cpp
index adbc277d6b019e8..201847ec7f92205 100644
--- a/flang/lib/Lower/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP.cpp
@@ -1710,30 +1710,86 @@ bool ClauseProcessor::processLink(
 
 static mlir::omp::MapInfoOp
 createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc,
-                mlir::Value baseAddr, std::stringstream &name,
-                mlir::SmallVector<mlir::Value> bounds, uint64_t mapType,
-                mlir::omp::VariableCaptureKind mapCaptureType,
-                mlir::Type retTy) {
-  mlir::Value varPtr, varPtrPtr;
-  mlir::TypeAttr varType;
-
+                mlir::Value baseAddr, mlir::Value varPtrPtr, std::string name,
+                mlir::SmallVector<mlir::Value> bounds,
+                mlir::SmallVector<mlir::Value> members, uint64_t mapType,
+                mlir::omp::VariableCaptureKind mapCaptureType, mlir::Type retTy,
+                bool isVal = false) {
   if (auto boxTy = baseAddr.getType().dyn_cast<fir::BaseBoxType>()) {
     baseAddr = builder.create<fir::BoxAddrOp>(loc, baseAddr);
     retTy = baseAddr.getType();
   }
 
-  varPtr = baseAddr;
-  varType = mlir::TypeAttr::get(
+  mlir::TypeAttr varType = mlir::TypeAttr::get(
       llvm::cast<mlir::omp::PointerLikeType>(retTy).getElementType());
 
   mlir::omp::MapInfoOp op = builder.create<mlir::omp::MapInfoOp>(
-      loc, retTy, varPtr, varType, varPtrPtr, bounds,
+      loc, retTy, baseAddr, varType, varPtrPtr, members, bounds,
       builder.getIntegerAttr(builder.getIntegerType(64, false), mapType),
       builder.getAttr<mlir::omp::VariableCaptureKindAttr>(mapCaptureType),
-      builder.getStringAttr(name.str()));
+      builder.getStringAttr(name));
+
   return op;
 }
 
+static mlir::omp::MapInfoOp processDescriptorTypeMappings(
+    Fortran::semantics::SemanticsContext &semanticsContext,
+    Fortran::lower::StatementContext &stmtCtx,
+    Fortran::lower::AbstractConverter &converter, mlir::Location loc,
+    mlir::Value descriptorAddr, mlir::Value descDataBaseAddr,
+    mlir::SmallVector<mlir::Value> &bounds, std::string asFortran,
+    llvm::omp::OpenMPOffloadMappingFlags mapCaptureType) {
+  llvm::SmallVector<mlir::Value> descriptorBaseAddrMembers;
+  fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
+
+  mlir::Value descriptor = descriptorAddr;
+  if (fir::isAssumedShape(fir::unwrapRefType(descriptorAddr.getType()))) {
+    mlir::OpBuilder::InsertPoint insPt = firOpBuilder.saveInsertionPoint();
+    firOpBuilder.setInsertionPointToStart(firOpBuilder.getAllocaBlock());
+    descriptor =
+        firOpBuilder.create<fir::AllocaOp>(loc, descriptorAddr.getType());
+    firOpBuilder.restoreInsertionPoint(insPt);
+    firOpBuilder.create<fir::StoreOp>(loc, descriptorAddr, descriptor);
+  }
+
+  // if (auto boxTy = descriptorAddr.getType().dyn_cast<fir::BaseBoxType>())
+  //   descriptorAddr = converter.getFirOpBuilder().create<fir::BoxAddrOp>(loc,
+  //   descriptorAddr);
+
+  mlir::Value baseAddrAddr = firOpBuilder.create<fir::BoxOffsetOp>(
+      loc, descriptor, fir::BoxFieldAttr::base_addr);
+
+  descriptorBaseAddrMembers.push_back(createMapInfoOp(
+      firOpBuilder, loc, descDataBaseAddr, baseAddrAddr, asFortran, bounds, {},
+      static_cast<std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+          mapCaptureType),
+      mlir::omp::VariableCaptureKind::ByRef, descDataBaseAddr.getType()));
+
+  if (fir::BaseBoxType baseBoxTy = llvm::dyn_cast<fir::BaseBoxType>(
+          fir::unwrapRefType(descriptor.getType()))) {
+    if (fir::boxHasAddendum(baseBoxTy)) {
+      mlir::Value addendumAddrAddr = firOpBuilder.create<fir::BoxOffsetOp>(
+          loc, descriptor, fir::BoxFieldAttr::derived_type);
+      // NOTE: We have no addendumAddr, but we must provide a varPtr alongside
+      // the varPtrPtr so we replicate it across the two for now.
+      descriptorBaseAddrMembers.push_back(createMapInfoOp(
+          firOpBuilder, loc, addendumAddrAddr, addendumAddrAddr, asFortran, {},
+          {},
+          static_cast<
+              std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+              mapCaptureType),
+          mlir::omp::VariableCaptureKind::ByRef, addendumAddrAddr.getType()));
+    }
+  }
+
+  return createMapInfoOp(
+      firOpBuilder, loc, descriptor, mlir::Value{}, asFortran, {},
+      descriptorBaseAddrMembers,
+      static_cast<std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+          mapCaptureType),
+      mlir::omp::VariableCaptureKind::ByRef, descriptor.getType());
+}
+
 bool ClauseProcessor::processMap(
     mlir::Location currentLocation, const llvm::omp::Directive &directive,
     Fortran::semantics::SemanticsContext &semanticsContext,
@@ -1798,22 +1854,34 @@ bool ClauseProcessor::processMap(
               mlir::omp::DataBoundsOp>(
               converter, firOpBuilder, semanticsContext, stmtCtx, ompObject,
               clauseLocation, asFortran, bounds, treatIndexAsSection);
-
-          // Explicit map captures are captured ByRef by default,
-          // optimisation passes may alter this to ByCopy or other capture
-          // types to optimise
-          mlir::Value mapOp = createMapInfoOp(
-              firOpBuilder, clauseLocation, baseAddr, asFortran, bounds,
-              static_cast<
-                  std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
-                  mapTypeBits),
-              mlir::omp::VariableCaptureKind::ByRef, baseAddr.getType());
+          auto origSymbol =
+              converter.getSymbolAddress(*getOmpObjectSymbol(ompObject));
+          mlir::Value mapOp, symAddr;
+          if (fir::isPointerType(origSymbol.getType()) ||
+              fir::isAllocatableType(origSymbol.getType()) ||
+              fir::isAssumedShape(origSymbol.getType())) {
+            symAddr = origSymbol;
+            mapOp = processDescriptorTypeMappings(
+                semanticsContext, stmtCtx, converter, clauseLocation,
+                origSymbol, baseAddr, bounds, asFortran.str(), mapTypeBits);
+          } else {
+            // Explicit map captures are captured ByRef by default,
+            // optimisation passes may alter this to ByCopy or other capture
+            // types to optimise
+            symAddr = baseAddr;
+            mapOp = createMapInfoOp(
+                firOpBuilder, clauseLocation, baseAddr, mlir::Value{},
+                asFortran.str(), bounds, {},
+                static_cast<std::underlying_type_t<
+                    llvm::omp::OpenMPOffloadMappingFlags>>(mapTypeBits),
+                mlir::omp::VariableCaptureKind::ByRef, baseAddr.getType());
+          }
 
           mapOperands.push_back(mapOp);
           if (mapSymTypes)
-            mapSymTypes->push_back(baseAddr.getType());
+            mapSymTypes->push_back(symAddr.getType());
           if (mapSymLocs)
-            mapSymLocs->push_back(baseAddr.getLoc());
+            mapSymLocs->push_back(symAddr.getLoc());
           if (mapSymbols)
             mapSymbols->push_back(getOmpObjectSymbol(ompObject));
         }
@@ -2590,7 +2658,8 @@ static void genBodyOfTargetOp(
         std::stringstream name;
         firOpBuilder.setInsertionPoint(targetOp);
         mlir::Value mapOp = createMapInfoOp(
-            firOpBuilder, copyVal.getLoc(), copyVal, name, bounds,
+            firOpBuilder, copyVal.getLoc(), copyVal, mlir::Value{}, name.str(),
+            bounds, llvm::SmallVector<mlir::Value>{},
             static_cast<
                 std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
                 llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT),
@@ -2718,12 +2787,22 @@ genTargetOp(Fortran::lower::AbstractConverter &converter,
           }
         }
 
-        mlir::Value mapOp = createMapInfoOp(
-            converter.getFirOpBuilder(), baseOp.getLoc(), baseOp, name, bounds,
-            static_cast<
-                std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
-                mapFlag),
-            captureKind, baseOp.getType());
+        mlir::Value mapOp;
+        if (fir::isPointerType(baseOp.getType()) ||
+            fir::isAllocatableType(baseOp.getType()) ||
+            fir::isAssumedShape(baseOp.getType())) {
+          mapOp = processDescriptorTypeMappings(
+              semanticsContext, stmtCtx, converter, baseOp.getLoc(), baseOp,
+              baseAddr, bounds, name.str(), mapFlag);
+        } else {
+          mapOp = createMapInfoOp(
+              converter.getFirOpBuilder(), baseOp.getLoc(), baseOp,
+              mlir::Value{}, name.str(), bounds, {},
+              static_cast<
+                  std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+                  mapFlag),
+              captureKind, baseOp.getType());
+        }
 
         mapOperands.push_back(mapOp);
         mapSymTypes.push_back(baseOp.getType());
diff --git a/flang/lib/Optimizer/CodeGen/CMakeLists.txt b/flang/lib/Optimizer/CodeGen/CMakeLists.txt
index 0daa97b00dfa00c..b304b8c950b7f85 100644
--- a/flang/lib/Optimizer/CodeGen/CMakeLists.txt
+++ b/flang/lib/Optimizer/CodeGen/CMakeLists.txt
@@ -1,4 +1,5 @@
 add_flang_library(FIRCodeGen
+  CodeGenOpenMP.cpp
   BoxedProcedure.cpp
   CGOps.cpp
   CodeGen.cpp
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 408377f1d7a1762..2164cdfd7b56de5 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -3857,6 +3857,11 @@ class FIRToLLVMLowering
     mlir::populateMathToLibmConversionPatterns(pattern);
     mlir::populateComplexToLLVMConversionPatterns(typeConverter, pattern);
     mlir::populateVectorToLLVMConversionPatterns(typeConverter, pattern);
+
+    // Flang specific overloads for OpenMP operations, to allow for special
+    // handling of things like Box types.
+    fir::populateOpenMPFIRToLLVMConversionPatterns(typeConverter, pattern);
+
     mlir::ConversionTarget target{*context};
     target.addLegalDialect<mlir::LLVM::LLVMDialect>();
     // The OpenMP dialect is legal for Operations without regions, for those
diff --git a/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp b/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp
new file mode 100644
index 000000000000000..5de95ed807b3fb0
--- /dev/null
+++ b/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp
@@ -0,0 +1,87 @@
+#include "flang/Optimizer/CodeGen/CodeGen.h"
+
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROps.h"
+#include "flang/Optimizer/Dialect/FIRType.h"
+#include "flang/Optimizer/Dialect/Support/FIRContext.h"
+#include "flang/Optimizer/Support/FatalError.h"
+#include "flang/Optimizer/Support/InternalNames.h"
+#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
+#include "mlir/Conversion/LLVMCommon/Pattern.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+using namespace fir;
+
+#define DEBUG_TYPE "flang-codegen-openmp"
+
+// fir::LLVMTypeConverter for converting to LLVM IR dialect types.
+#include "flang/Optimizer/CodeGen/TypeConverter.h"
+
+namespace {
+/// A pattern that converts the region arguments in a single-region OpenMP
+/// operation to the LLVM dialect. The body of the region is not modified and is
+/// expected to either be processed by the conversion infrastructure or already
+/// contain ops compatible with LLVM dialect types.
+template <typename OpType>
+class OpenMPFIROpConversion : public mlir::ConvertOpToLLVMPattern<OpType> {
+public:
+  explicit OpenMPFIROpConversion(const fir::LLVMTypeConverter &lowering)
+      : mlir::ConvertOpToLLVMPattern<OpType>(lowering) {}
+
+  const fir::LLVMTypeConverter &lowerTy() const {
+    return *static_cast<const fir::LLVMTypeConverter *>(
+        this->getTypeConverter());
+  }
+};
+
+// FIR Op specific conversion for MapInfoOp that overwrites the default OpenMP
+// Dialect lowering, this allows FIR specific lowering of types, required for
+// descriptors of allocatables currently.
+struct MapInfoOpConversion
+    : public OpenMPFIROpConversion<mlir::omp::MapInfoOp> {
+  using OpenMPFIROpConversion::OpenMPFIROpConversion;
+
+  mlir::LogicalResult
+  matchAndRewrite(mlir::omp::MapInfoOp curOp, OpAdaptor adaptor,
+                  mlir::ConversionPatternRewriter &rewriter) const override {
+    const mlir::TypeConverter *converter = getTypeConverter();
+    llvm::SmallVector<mlir::Type> resTypes;
+    if (failed(converter->convertTypes(curOp->getResultTypes(), resTypes)))
+      return mlir::failure();
+
+    llvm::SmallVector<mlir::NamedAttribute> newAttrs;
+    mlir::omp::MapInfoOp newOp;
+    for (mlir::NamedAttribute attr : curOp->getAttrs()) {
+      if (auto typeAttr = mlir::dyn_cast<mlir::TypeAttr>(attr.getValue())) {
+        mlir::Type newAttr;
+        if (fir::isPointerType(typeAttr.getValue()) ||
+            fir::isAllocatableType(typeAttr.getValue()) ||
+            fir::isAssumedShape(typeAttr.getValue())) {
+          newAttr = lowerTy().convertBoxTypeAsStruct(
+              mlir::cast<fir::BaseBoxType>(typeAttr.getValue()));
+        } else {
+          newAttr = converter->convertType(typeAttr.getValue());
+        }
+        newAttrs.emplace_back(attr.getName(), mlir::TypeAttr::get(newAttr));
+      } else {
+        newAttrs.push_back(attr);
+      }
+    }
+
+    rewriter.replaceOpWithNewOp<mlir::omp::MapInfoOp>(
+        curOp, resTypes, adaptor.getOperands(), newAttrs);
+
+    return mlir::success();
+  }
+};
+} // namespace
+
+void fir::populateOpenMPFIRToLLVMConversionPatterns(
+    LLVMTypeConverter &converter, mlir::RewritePatternSet &patterns) {
+  patterns.add<MapInfoOpConversion>(converter);
+}
diff --git a/flang/test/Driver/OpenMP/map-types-and-sizes.ll b/flang/test/Driver/OpenMP/map-types-and-sizes.ll
new file mode 100644
index 000000000000000..668839587e5ae95
--- /dev/null
+++ b/flang/test/Driver/OpenMP/map-types-and-sizes.ll
@@ -0,0 +1,651 @@
+; ModuleID = 'FIRModule'
+source_filename = "FIRModule"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%_QM__fortran_type_infoTderivedtype = type { { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, { ptr, i64, i32, i8, i8, i8, i8 }, i64, { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, i32, i8, i8, i8, i8, [4 x i8] }
+%_QM__fortran_type_infoTbinding = type { %_QM__fortran_builtinsT__builtin_c_funptr, { ptr, i64, i32, i8, i8, i8, i8 } }
+%_QM__fortran_builtinsT__builtin_c_funptr = type { i64 }
+%_QM__fortran_type_infoTcomponent = type { { ptr, i64, i32, i8, i8, i8, i8 }, i8, i8, i8, i8, [4 x i8], i64, %_QM__fortran_type_infoTvalue, { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] }, { ptr, i64, i32, i8, i8, i8, i8, [2 x [3 x i64]], ptr, [1 x i64] }, %_QM__fortran_builtinsT__builtin_c_ptr }
+%_QM__fortran_type_infoTvalue = type { i8, [7 x i8], i64 }
+%_QM__fortran_builtinsT__builtin_c_ptr = type { i64 }
+%_QM__fortran_type_infoTprocptrcomponent = type { { ptr, i64, i32, i8, i8, i8, i8 }, i64, %_QM__fortran_builtinsT__builtin_c_funptr }
+%_QM__fortran_type_infoTspecialbinding = type { i8, i8, i8, i8, [4 x i8], %_QM__fortran_builtinsT__builtin_c_funptr }
+%struct.ident_t = type { i32, i32, i32, i32, ptr }
+%struct.__tgt_offload_entry = type { ptr, ptr, i64, i32, i32 }
+%struct.__tgt_kernel_arguments = type { i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, i64, i64, [3 x i32], [3 x i32], i32 }
+
+$_QQclX2aa5d5546e0146d849b0a10e7caea608 = comdat any
+
+$_QQclX62 = comdat any
+
+$_QM__fortran_builtinsE.dt.__builtin_c_funptr = comdat any
+
+$_QM__fortran_builtinsE.n.__builtin_c_funptr = comdat any
+
+$_QM__fortran_builtinsE.c.__builtin_c_funptr = comdat any
+
+$_QM__fortran_builtinsE.n.__address = comdat any
+
+$_QM__fortran_builtinsE.dt.__builtin_c_ptr = comdat any
+
+$_QM__fortran_builtinsE.n.__builtin_c_ptr = comdat any
+
+$_QM__fortran_builtinsE.c.__builtin_c_ptr = comdat any
+
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @.omp_offloading.requires_reg, ptr null }]
+ at _QQclX2aa5d5546e0146d849b0a10e7caea608 = linkonce constant [96 x i8] c"/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90\00", comdat
+ at _QQclX62 = linkonce constant [1 x i8] c"b", comdat
+ at _QM__fortran_builtinsE.dt.__builtin_c_funptr = linkonce_odr constant %_QM__fortran_type_infoTderivedtype { { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTbinding, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTbinding, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8 } { ptr @_QM__fortran_builtinsE.n.__builtin_c_funptr, i64 mul (i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64), i64 18), i32 20180515, i8 0, i8 40, i8 1, i8 0 }, i64 8, { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTderivedtype, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 42, i8 1, i8 1, ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { ptr null, i64 ptrtoint (ptr getelementptr (i64, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 10, i8 1, i8 0, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (i64, ptr null, i32 1) to i64)]] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { ptr null, i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 7, i8 1, i8 0, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)]] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr @_QM__fortran_builtinsE.c.__builtin_c_funptr, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTcomponent, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 0, i64 1, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTcomponent, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTprocptrcomponent, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTprocptrcomponent, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTspecialbinding, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTspecialbinding, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, i32 0, i8 0, i8 1, i8 1, i8 1, [4 x i8] undef }, comdat
+ at _QM__fortran_builtinsE.n.__builtin_c_funptr = linkonce_odr constant [18 x i8] c"__builtin_c_funptr", comdat
+ at _QM__fortran_builtinsE.c.__builtin_c_funptr = linkonce_odr constant [1 x %_QM__fortran_type_infoTcomponent] [%_QM__fortran_type_infoTcomponent { { ptr, i64, i32, i8, i8, i8, i8 } { ptr @_QM__fortran_builtinsE.n.__address, i64 mul (i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64), i64 9), i32 20180515, i8 0, i8 40, i8 1, i8 0 }, i8 1, i8 0, i8 8, i8 0, [4 x i8] undef, i64 0, %_QM__fortran_type_infoTvalue { i8 1, [7 x i8] undef, i64 0 }, { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTderivedtype, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 42, i8 1, i8 1, ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [2 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64), i32 20180515, i8 2, i8 42, i8 1, i8 1, [2 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64)], [3 x i64] [i64 1, i64 0, i64 0]], ptr null, [1 x i64] undef }, %_QM__fortran_builtinsT__builtin_c_ptr zeroinitializer }], comdat
+ at _QM__fortran_builtinsE.n.__address = linkonce_odr constant [9 x i8] c"__address", comdat
+ at _QM__fortran_builtinsE.dt.__builtin_c_ptr = linkonce_odr constant %_QM__fortran_type_infoTderivedtype { { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTbinding, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTbinding, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8 } { ptr @_QM__fortran_builtinsE.n.__builtin_c_ptr, i64 mul (i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64), i64 15), i32 20180515, i8 0, i8 40, i8 1, i8 0 }, i64 8, { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTderivedtype, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 42, i8 1, i8 1, ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { ptr null, i64 ptrtoint (ptr getelementptr (i64, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 10, i8 1, i8 0, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (i64, ptr null, i32 1) to i64)]] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { ptr null, i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 7, i8 1, i8 0, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)]] }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr @_QM__fortran_builtinsE.c.__builtin_c_ptr, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTcomponent, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 0, i64 1, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTcomponent, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTprocptrcomponent, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTprocptrcomponent, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTspecialbinding, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTspecialbinding, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, i32 0, i8 0, i8 1, i8 1, i8 1, [4 x i8] undef }, comdat
+ at _QM__fortran_builtinsE.n.__builtin_c_ptr = linkonce_odr constant [15 x i8] c"__builtin_c_ptr", comdat
+ at _QM__fortran_builtinsE.c.__builtin_c_ptr = linkonce_odr constant [1 x %_QM__fortran_type_infoTcomponent] [%_QM__fortran_type_infoTcomponent { { ptr, i64, i32, i8, i8, i8, i8 } { ptr @_QM__fortran_builtinsE.n.__address, i64 mul (i64 ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64), i64 9), i32 20180515, i8 0, i8 40, i8 1, i8 0 }, i8 1, i8 0, i8 8, i8 0, [4 x i8] undef, i64 0, %_QM__fortran_type_infoTvalue { i8 1, [7 x i8] undef, i64 0 }, { ptr, i64, i32, i8, i8, i8, i8, ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTderivedtype, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 42, i8 1, i8 1, ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64), i32 20180515, i8 1, i8 42, i8 1, i8 1, [1 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64)]], ptr null, [1 x i64] undef }, { ptr, i64, i32, i8, i8, i8, i8, [2 x [3 x i64]], ptr, [1 x i64] } { ptr null, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64), i32 20180515, i8 2, i8 42, i8 1, i8 1, [2 x [3 x i64]] [[3 x i64] [i64 1, i64 0, i64 ptrtoint (ptr getelementptr (%_QM__fortran_type_infoTvalue, ptr null, i32 1) to i64)], [3 x i64] [i64 1, i64 0, i64 0]], ptr null, [1 x i64] undef }, %_QM__fortran_builtinsT__builtin_c_ptr zeroinitializer }], comdat
+ at 0 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;10;14;;\00", align 1
+ at .__omp_offloading_fd00_48447cd_maptype_scalar__l11.region_id = weak constant i8 0
+ at .offload_sizes = private unnamed_addr constant [1 x i64] [i64 4]
+ at .offload_maptypes = private unnamed_addr constant [1 x i64] [i64 800]
+ at .offload_mapnames = private constant [1 x ptr] [ptr @0]
+ at 1 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
+ at 2 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @1 }, align 8
+ at 3 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;19;14;;\00", align 1
+ at .__omp_offloading_fd00_48447cd_maptype_array__l20.region_id = weak constant i8 0
+ at .offload_sizes.1 = private unnamed_addr constant [1 x i64] [i64 4096]
+ at .offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 547]
+ at .offload_mapnames.3 = private constant [1 x ptr] [ptr @3]
+ at 4 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;27;23;;\00", align 1
+ at .__omp_offloading_fd00_48447cd_maptype_ptr__l28.region_id = weak constant i8 0
+ at .offload_maptypes.4 = private unnamed_addr constant [1 x i64] [i64 547]
+ at .offload_mapnames.5 = private constant [1 x ptr] [ptr @4]
+ at 5 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;35;27;;\00", align 1
+ at .__omp_offloading_fd00_48447cd_maptype_allocatable__l37.region_id = weak constant i8 0
+ at .offload_maptypes.6 = private unnamed_addr constant [1 x i64] [i64 547]
+ at .offload_mapnames.7 = private constant [1 x ptr] [ptr @5]
+ at 6 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;47;18;;\00", align 1
+ at 7 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;48;22;;\00", align 1
+ at .__omp_offloading_fd00_48447cd_maptype_c_ptr__l49.region_id = weak constant i8 0
+ at .offload_sizes.8 = private unnamed_addr constant [2 x i64] [i64 8, i64 4]
+ at .offload_maptypes.9 = private unnamed_addr constant [2 x i64] [i64 544, i64 800]
+ at .offload_mapnames.10 = private constant [2 x ptr] [ptr @6, ptr @7]
+ at 8 = private unnamed_addr constant [113 x i8] c";/work/agozillo/git/flang-dev/llvm-main-project/flang/test/Driver/OpenMP/map-types-and-sizes.f90;unknown;57;16;;\00", align 1
+ at .__omp_offloading_fd00_48447cd_maptype_char__l58.region_id = weak constant i8 0
+ at .offload_sizes.11 = private unnamed_addr constant [1 x i64] [i64 1]
+ at .offload_maptypes.12 = private unnamed_addr constant [1 x i64] [i64 800]
+ at .offload_mapnames.13 = private constant [1 x ptr] [ptr @8]
+ at .omp_offloading.entry_name = internal unnamed_addr constant [50 x i8] c"__omp_offloading_fd00_48447cd_maptype_scalar__l11\00"
+ at .omp_offloading.entry.__omp_offloading_fd00_48447cd_maptype_scalar__l11 = weak constant %struct.__tgt_offload_entry { ptr @.__omp_offloading_fd00_48447cd_maptype_scalar__l11.region_id, ptr @.omp_offloading.entry_name, i64 0, i32 0, i32 0 }, section "omp_offloading_entries", align 1
+ at .omp_offloading.entry_name.14 = internal unnamed_addr constant [49 x i8] c"__omp_offloading_fd00_48447cd_maptype_array__l20\00"
+ at .omp_offloading.entry.__omp_offloading_fd00_48447cd_maptype_array__l20 = weak constant %struct.__tgt_offload_entry { ptr @.__omp_offloading_fd00_48447cd_maptype_array__l20.region_id, ptr @.omp_offloading.entry_name.14, i64 0, i32 0, i32 0 }, section "omp_offloading_entries", align 1
+ at .omp_offloading.entry_name.15 = internal unnamed_addr constant [47 x i8] c"__omp_offloading_fd00_48447cd_maptype_ptr__l28\00"
+ at .omp_offloading.entry.__omp_offloading_fd00_48447cd_maptype_ptr__l28 = weak constant %struct.__tgt_offload_entry { ptr @.__omp_offloading_fd00_48447cd_maptype_ptr__l28.region_id, ptr @.omp_offloading.entry_name.15, i64 0, i32 0, i32 0 }, section "omp_offloading_entries", align 1
+ at .omp_offloading.entry_name.16 = internal unnamed_addr constant [55 x i8] c"__omp_offloading_fd00_48447cd_maptype_allocatable__l37\00"
+ at .omp_offloading.entry.__omp_offloading_fd00_48447cd_maptype_allocatable__l37 = weak constant %struct.__tgt_offload_entry { ptr @.__omp_offloading_fd00_48447cd_maptype_allocatable__l37.region_id, ptr @.omp_offloading.entry_name.16, i64 0, i32 0, i32 0 }, section "omp_offloading_entries", align 1
+ at .omp_offloading.entry_name.17 = internal unnamed_addr constant [49 x i8] c"__omp_offloading_fd00_48447cd_maptype_c_ptr__l49\00"
+ at .omp_offloading.entry.__omp_offloading_fd00_48447cd_maptype_c_ptr__l49 = weak constant %struct.__tgt_offload_entry { ptr @.__omp_offloading_fd00_48447cd_maptype_c_ptr__l49.region_id, ptr @.omp_offloading.entry_name.17, i64 0, i32 0, i32 0 }, section "omp_offloading_entries", align 1
+ at .omp_offloading.entry_name.18 = internal unnamed_addr constant [48 x i8] c"__omp_offloading_fd00_48447cd_maptype_char__l58\00"
+ at .omp_offloading.entry.__omp_offloading_fd00_48447cd_maptype_char__l58 = weak constant %struct.__tgt_offload_entry { ptr @.__omp_offloading_fd00_48447cd_maptype_char__l58.region_id, ptr @.omp_offloading.entry_name.18, i64 0, i32 0, i32 0 }, section "omp_offloading_entries", align 1
+
+declare ptr @malloc(i64)
+
+declare void @free(ptr)
+
+; Function Attrs: noinline nounwind
+define internal void @.omp_offloading.requires_reg() #0 section ".text.startup" {
+entry:
+  call void @__tgt_register_requires(i64 1)
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @__tgt_register_requires(i64) #1
+
+define void @maptype_scalar_() {
+  %.casted = alloca ptr, align 8
+  %.offload_baseptrs = alloca [1 x ptr], align 8
+  %.offload_ptrs = alloca [1 x ptr], align 8
+  %.offload_mappers = alloca [1 x ptr], align 8
+  %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
+  %1 = alloca i32, i64 1, align 4
+  br label %entry
+
+entry:                                            ; preds = %0
+  %2 = load i32, ptr %1, align 4
+  store i32 %2, ptr %.casted, align 4
+  %3 = load ptr, ptr %.casted, align 8
+  %4 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  store ptr %3, ptr %4, align 8
+  %5 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  store ptr %3, ptr %5, align 8
+  %6 = getelementptr inbounds [1 x ptr], ptr %.offload_mappers, i64 0, i64 0
+  store ptr null, ptr %6, align 8
+  %7 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  %8 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  %9 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
+  store i32 2, ptr %9, align 4
+  %10 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
+  store i32 1, ptr %10, align 4
+  %11 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
+  store ptr %7, ptr %11, align 8
+  %12 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
+  store ptr %8, ptr %12, align 8
+  %13 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
+  store ptr @.offload_sizes, ptr %13, align 8
+  %14 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
+  store ptr @.offload_maptypes, ptr %14, align 8
+  %15 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
+  store ptr @.offload_mapnames, ptr %15, align 8
+  %16 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
+  store ptr null, ptr %16, align 8
+  %17 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
+  store i64 0, ptr %17, align 8
+  %18 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
+  store i64 0, ptr %18, align 8
+  %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
+  store [3 x i32] [i32 -1, i32 0, i32 0], ptr %19, align 4
+  %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
+  store [3 x i32] zeroinitializer, ptr %20, align 4
+  %21 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
+  store i32 0, ptr %21, align 4
+  %22 = call i32 @__tgt_target_kernel(ptr @2, i64 -1, i32 -1, i32 0, ptr @.__omp_offloading_fd00_48447cd_maptype_scalar__l11.region_id, ptr %kernel_args)
+  %23 = icmp ne i32 %22, 0
+  br i1 %23, label %omp_offload.failed, label %omp_offload.cont
+
+omp_offload.failed:                               ; preds = %entry
+  call void @__omp_offloading_fd00_48447cd_maptype_scalar__l11(ptr %1)
+  br label %omp_offload.cont
+
+omp_offload.cont:                                 ; preds = %omp_offload.failed, %entry
+  ret void
+}
+
+define void @maptype_array_() {
+  %.offload_baseptrs = alloca [1 x ptr], align 8
+  %.offload_ptrs = alloca [1 x ptr], align 8
+  %.offload_mappers = alloca [1 x ptr], align 8
+  %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
+  %1 = alloca [1024 x i32], i64 1, align 4
+  br label %entry
+
+entry:                                            ; preds = %0
+  %2 = getelementptr inbounds [1024 x i32], ptr %1, i64 0, i64 0
+  %3 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  store ptr %1, ptr %3, align 8
+  %4 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  store ptr %2, ptr %4, align 8
+  %5 = getelementptr inbounds [1 x ptr], ptr %.offload_mappers, i64 0, i64 0
+  store ptr null, ptr %5, align 8
+  %6 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  %7 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  %8 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
+  store i32 2, ptr %8, align 4
+  %9 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
+  store i32 1, ptr %9, align 4
+  %10 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
+  store ptr %6, ptr %10, align 8
+  %11 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
+  store ptr %7, ptr %11, align 8
+  %12 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
+  store ptr @.offload_sizes.1, ptr %12, align 8
+  %13 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
+  store ptr @.offload_maptypes.2, ptr %13, align 8
+  %14 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
+  store ptr @.offload_mapnames.3, ptr %14, align 8
+  %15 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
+  store ptr null, ptr %15, align 8
+  %16 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
+  store i64 0, ptr %16, align 8
+  %17 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
+  store i64 0, ptr %17, align 8
+  %18 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
+  store [3 x i32] [i32 -1, i32 0, i32 0], ptr %18, align 4
+  %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
+  store [3 x i32] zeroinitializer, ptr %19, align 4
+  %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
+  store i32 0, ptr %20, align 4
+  %21 = call i32 @__tgt_target_kernel(ptr @2, i64 -1, i32 -1, i32 0, ptr @.__omp_offloading_fd00_48447cd_maptype_array__l20.region_id, ptr %kernel_args)
+  %22 = icmp ne i32 %21, 0
+  br i1 %22, label %omp_offload.failed, label %omp_offload.cont
+
+omp_offload.failed:                               ; preds = %entry
+  call void @__omp_offloading_fd00_48447cd_maptype_array__l20(ptr %1)
+  br label %omp_offload.cont
+
+omp_offload.cont:                                 ; preds = %omp_offload.failed, %entry
+  ret void
+}
+
+define void @maptype_ptr_() {
+  %.offload_baseptrs = alloca [1 x ptr], align 8
+  %.offload_ptrs = alloca [1 x ptr], align 8
+  %.offload_mappers = alloca [1 x ptr], align 8
+  %.offload_sizes = alloca [1 x i64], align 8
+  %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
+  %1 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %2 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } { ptr null, i64 ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 9, i8 1, i8 0 }, ptr %1, align 8
+  %3 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %1, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %3, ptr %2, align 8
+  br label %entry
+
+entry:                                            ; preds = %0
+  %element_size = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %2, i64 0, i32 1
+  %4 = load i64, ptr %element_size, align 8
+  %5 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  store ptr %2, ptr %5, align 8
+  %6 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  store ptr %2, ptr %6, align 8
+  %7 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
+  store i64 %4, ptr %7, align 8
+  %8 = getelementptr inbounds [1 x ptr], ptr %.offload_mappers, i64 0, i64 0
+  store ptr null, ptr %8, align 8
+  %9 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  %10 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  %11 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
+  %12 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
+  store i32 2, ptr %12, align 4
+  %13 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
+  store i32 1, ptr %13, align 4
+  %14 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
+  store ptr %9, ptr %14, align 8
+  %15 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
+  store ptr %10, ptr %15, align 8
+  %16 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
+  store ptr %11, ptr %16, align 8
+  %17 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
+  store ptr @.offload_maptypes.4, ptr %17, align 8
+  %18 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
+  store ptr @.offload_mapnames.5, ptr %18, align 8
+  %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
+  store ptr null, ptr %19, align 8
+  %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
+  store i64 0, ptr %20, align 8
+  %21 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
+  store i64 0, ptr %21, align 8
+  %22 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
+  store [3 x i32] [i32 -1, i32 0, i32 0], ptr %22, align 4
+  %23 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
+  store [3 x i32] zeroinitializer, ptr %23, align 4
+  %24 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
+  store i32 0, ptr %24, align 4
+  %25 = call i32 @__tgt_target_kernel(ptr @2, i64 -1, i32 -1, i32 0, ptr @.__omp_offloading_fd00_48447cd_maptype_ptr__l28.region_id, ptr %kernel_args)
+  %26 = icmp ne i32 %25, 0
+  br i1 %26, label %omp_offload.failed, label %omp_offload.cont
+
+omp_offload.failed:                               ; preds = %entry
+  call void @__omp_offloading_fd00_48447cd_maptype_ptr__l28(ptr %2)
+  br label %omp_offload.cont
+
+omp_offload.cont:                                 ; preds = %omp_offload.failed, %entry
+  ret void
+}
+
+define void @maptype_allocatable_() {
+  %.offload_baseptrs = alloca [1 x ptr], align 8
+  %.offload_ptrs = alloca [1 x ptr], align 8
+  %.offload_mappers = alloca [1 x ptr], align 8
+  %.offload_sizes = alloca [1 x i64], align 8
+  %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
+  %1 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %2 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %3 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %4 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %5 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %6 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } { ptr null, i64 ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 9, i8 2, i8 0 }, ptr %5, align 8
+  %7 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %5, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %7, ptr %6, align 8
+  %8 = call ptr @malloc(i64 ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64))
+  %9 = insertvalue { ptr, i64, i32, i8, i8, i8, i8 } { ptr undef, i64 ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 9, i8 2, i8 0 }, ptr %8, 0
+  store { ptr, i64, i32, i8, i8, i8, i8 } %9, ptr %4, align 8
+  %10 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %4, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %10, ptr %6, align 8
+  br label %entry
+
+entry:                                            ; preds = %0
+  %element_size = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %6, i64 0, i32 1
+  %11 = load i64, ptr %element_size, align 8
+  %12 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  store ptr %6, ptr %12, align 8
+  %13 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  store ptr %6, ptr %13, align 8
+  %14 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
+  store i64 %11, ptr %14, align 8
+  %15 = getelementptr inbounds [1 x ptr], ptr %.offload_mappers, i64 0, i64 0
+  store ptr null, ptr %15, align 8
+  %16 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  %17 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  %18 = getelementptr inbounds [1 x i64], ptr %.offload_sizes, i32 0, i32 0
+  %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
+  store i32 2, ptr %19, align 4
+  %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
+  store i32 1, ptr %20, align 4
+  %21 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
+  store ptr %16, ptr %21, align 8
+  %22 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
+  store ptr %17, ptr %22, align 8
+  %23 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
+  store ptr %18, ptr %23, align 8
+  %24 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
+  store ptr @.offload_maptypes.6, ptr %24, align 8
+  %25 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
+  store ptr @.offload_mapnames.7, ptr %25, align 8
+  %26 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
+  store ptr null, ptr %26, align 8
+  %27 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
+  store i64 0, ptr %27, align 8
+  %28 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
+  store i64 0, ptr %28, align 8
+  %29 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
+  store [3 x i32] [i32 -1, i32 0, i32 0], ptr %29, align 4
+  %30 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
+  store [3 x i32] zeroinitializer, ptr %30, align 4
+  %31 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
+  store i32 0, ptr %31, align 4
+  %32 = call i32 @__tgt_target_kernel(ptr @2, i64 -1, i32 -1, i32 0, ptr @.__omp_offloading_fd00_48447cd_maptype_allocatable__l37.region_id, ptr %kernel_args)
+  %33 = icmp ne i32 %32, 0
+  br i1 %33, label %omp_offload.failed, label %omp_offload.cont
+
+omp_offload.failed:                               ; preds = %entry
+  call void @__omp_offloading_fd00_48447cd_maptype_allocatable__l37(ptr %6)
+  br label %omp_offload.cont
+
+34:                                               ; preds = %omp_offload.cont
+  %35 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %6, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %35, ptr %1, align 8
+  %36 = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %1, i32 0, i32 0
+  %37 = load ptr, ptr %36, align 8
+  call void @free(ptr %37)
+  %38 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %5, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %38, ptr %6, align 8
+  br label %39
+
+39:                                               ; preds = %34, %omp_offload.cont
+  ret void
+
+omp_offload.cont:                                 ; preds = %omp_offload.failed, %entry
+  %40 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %6, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %40, ptr %3, align 8
+  %41 = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %3, i32 0, i32 0
+  %42 = load ptr, ptr %41, align 8
+  call void @free(ptr %42)
+  %43 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %5, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %43, ptr %6, align 8
+  %44 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %6, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %44, ptr %2, align 8
+  %45 = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %2, i32 0, i32 0
+  %46 = load ptr, ptr %45, align 8
+  %47 = ptrtoint ptr %46 to i64
+  %48 = icmp ne i64 %47, 0
+  br i1 %48, label %34, label %39
+}
+
+define void @maptype_c_ptr_() {
+  %.casted = alloca ptr, align 8
+  %.offload_baseptrs = alloca [2 x ptr], align 8
+  %.offload_ptrs = alloca [2 x ptr], align 8
+  %.offload_mappers = alloca [2 x ptr], align 8
+  %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
+  %1 = alloca %_QM__fortran_builtinsT__builtin_c_ptr, i64 1, align 8
+  %2 = alloca i32, i64 1, align 4
+  br label %entry
+
+entry:                                            ; preds = %0
+  %3 = load i32, ptr %2, align 4
+  store i32 %3, ptr %.casted, align 4
+  %4 = load ptr, ptr %.casted, align 8
+  %5 = getelementptr inbounds [2 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  store ptr %1, ptr %5, align 8
+  %6 = getelementptr inbounds [2 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  store ptr %1, ptr %6, align 8
+  %7 = getelementptr inbounds [2 x ptr], ptr %.offload_mappers, i64 0, i64 0
+  store ptr null, ptr %7, align 8
+  %8 = getelementptr inbounds [2 x ptr], ptr %.offload_baseptrs, i32 0, i32 1
+  store ptr %4, ptr %8, align 8
+  %9 = getelementptr inbounds [2 x ptr], ptr %.offload_ptrs, i32 0, i32 1
+  store ptr %4, ptr %9, align 8
+  %10 = getelementptr inbounds [2 x ptr], ptr %.offload_mappers, i64 0, i64 1
+  store ptr null, ptr %10, align 8
+  %11 = getelementptr inbounds [2 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  %12 = getelementptr inbounds [2 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  %13 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
+  store i32 2, ptr %13, align 4
+  %14 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
+  store i32 2, ptr %14, align 4
+  %15 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
+  store ptr %11, ptr %15, align 8
+  %16 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
+  store ptr %12, ptr %16, align 8
+  %17 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
+  store ptr @.offload_sizes.8, ptr %17, align 8
+  %18 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
+  store ptr @.offload_maptypes.9, ptr %18, align 8
+  %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
+  store ptr @.offload_mapnames.10, ptr %19, align 8
+  %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
+  store ptr null, ptr %20, align 8
+  %21 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
+  store i64 0, ptr %21, align 8
+  %22 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
+  store i64 0, ptr %22, align 8
+  %23 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
+  store [3 x i32] [i32 -1, i32 0, i32 0], ptr %23, align 4
+  %24 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
+  store [3 x i32] zeroinitializer, ptr %24, align 4
+  %25 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
+  store i32 0, ptr %25, align 4
+  %26 = call i32 @__tgt_target_kernel(ptr @2, i64 -1, i32 -1, i32 0, ptr @.__omp_offloading_fd00_48447cd_maptype_c_ptr__l49.region_id, ptr %kernel_args)
+  %27 = icmp ne i32 %26, 0
+  br i1 %27, label %omp_offload.failed, label %omp_offload.cont
+
+omp_offload.failed:                               ; preds = %entry
+  call void @__omp_offloading_fd00_48447cd_maptype_c_ptr__l49(ptr %1, ptr %2)
+  br label %omp_offload.cont
+
+omp_offload.cont:                                 ; preds = %omp_offload.failed, %entry
+  ret void
+}
+
+define void @maptype_char_() {
+  %.casted = alloca ptr, align 8
+  %.offload_baseptrs = alloca [1 x ptr], align 8
+  %.offload_ptrs = alloca [1 x ptr], align 8
+  %.offload_mappers = alloca [1 x ptr], align 8
+  %kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
+  %1 = alloca [1 x i8], i64 1, align 1
+  br label %entry
+
+entry:                                            ; preds = %0
+  %2 = load [1 x i8], ptr %1, align 1
+  store [1 x i8] %2, ptr %.casted, align 1
+  %3 = load ptr, ptr %.casted, align 8
+  %4 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  store ptr %3, ptr %4, align 8
+  %5 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  store ptr %3, ptr %5, align 8
+  %6 = getelementptr inbounds [1 x ptr], ptr %.offload_mappers, i64 0, i64 0
+  store ptr null, ptr %6, align 8
+  %7 = getelementptr inbounds [1 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+  %8 = getelementptr inbounds [1 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+  %9 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 0
+  store i32 2, ptr %9, align 4
+  %10 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 1
+  store i32 1, ptr %10, align 4
+  %11 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 2
+  store ptr %7, ptr %11, align 8
+  %12 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 3
+  store ptr %8, ptr %12, align 8
+  %13 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 4
+  store ptr @.offload_sizes.11, ptr %13, align 8
+  %14 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 5
+  store ptr @.offload_maptypes.12, ptr %14, align 8
+  %15 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 6
+  store ptr @.offload_mapnames.13, ptr %15, align 8
+  %16 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 7
+  store ptr null, ptr %16, align 8
+  %17 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 8
+  store i64 0, ptr %17, align 8
+  %18 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 9
+  store i64 0, ptr %18, align 8
+  %19 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 10
+  store [3 x i32] [i32 -1, i32 0, i32 0], ptr %19, align 4
+  %20 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 11
+  store [3 x i32] zeroinitializer, ptr %20, align 4
+  %21 = getelementptr inbounds %struct.__tgt_kernel_arguments, ptr %kernel_args, i32 0, i32 12
+  store i32 0, ptr %21, align 4
+  %22 = call i32 @__tgt_target_kernel(ptr @2, i64 -1, i32 -1, i32 0, ptr @.__omp_offloading_fd00_48447cd_maptype_char__l58.region_id, ptr %kernel_args)
+  %23 = icmp ne i32 %22, 0
+  br i1 %23, label %omp_offload.failed, label %omp_offload.cont
+
+omp_offload.failed:                               ; preds = %entry
+  call void @__omp_offloading_fd00_48447cd_maptype_char__l58(ptr %1)
+  br label %omp_offload.cont
+
+omp_offload.cont:                                 ; preds = %omp_offload.failed, %entry
+  ret void
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare ptr @llvm.stacksave.p0() #2
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare void @llvm.stackrestore.p0(ptr) #2
+
+define internal void @__omp_offloading_fd00_48447cd_maptype_scalar__l11(ptr %0) {
+entry:
+  br label %omp.target
+
+omp.target:                                       ; preds = %entry
+  store i32 10, ptr %0, align 4
+  br label %omp.region.cont
+
+omp.region.cont:                                  ; preds = %omp.target
+  ret void
+}
+
+; Function Attrs: nounwind
+declare i32 @__tgt_target_kernel(ptr, i64, i32, i32, ptr, ptr) #1
+
+define internal void @__omp_offloading_fd00_48447cd_maptype_array__l20(ptr %0) {
+entry:
+  br label %omp.target
+
+omp.target:                                       ; preds = %entry
+  %1 = getelementptr [1024 x i32], ptr %0, i32 0, i64 9
+  store i32 20, ptr %1, align 4
+  br label %omp.region.cont
+
+omp.region.cont:                                  ; preds = %omp.target
+  ret void
+}
+
+define internal void @__omp_offloading_fd00_48447cd_maptype_ptr__l28(ptr %0) {
+entry:
+  br label %omp.target
+
+omp.target:                                       ; preds = %entry
+  %1 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %2 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %0, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %2, ptr %1, align 8
+  %3 = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %1, i32 0, i32 0
+  %4 = load ptr, ptr %3, align 8
+  store i32 10, ptr %4, align 4
+  br label %omp.region.cont
+
+omp.region.cont:                                  ; preds = %omp.target
+  ret void
+}
+
+define internal void @__omp_offloading_fd00_48447cd_maptype_allocatable__l37(ptr %0) {
+entry:
+  br label %omp.target
+
+omp.target6:                                      ; preds = %omp.target5, %omp.target2
+  br label %omp.region.cont
+
+omp.target5:                                      ; preds = %omp.target4, %omp.target3
+  %1 = insertvalue { ptr, i64, i32, i8, i8, i8, i8 } { ptr undef, i64 ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64), i32 20180515, i8 0, i8 9, i8 2, i8 0 }, ptr %4, 0
+  store { ptr, i64, i32, i8, i8, i8, i8 } %1, ptr %6, align 8
+  %2 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %6, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %2, ptr %0, align 8
+  br label %omp.target6
+
+omp.target4:                                      ; preds = %omp.target3
+  call void @free(ptr %10)
+  br label %omp.target5
+
+omp.target3:                                      ; preds = %omp.target2
+  br i1 %12, label %omp.target4, label %omp.target5
+
+omp.target2:                                      ; preds = %omp.target1, %omp.target
+  %3 = phi i1 [ true, %omp.target1 ], [ false, %omp.target ]
+  %4 = phi ptr [ %5, %omp.target1 ], [ %10, %omp.target ]
+  store i32 10, ptr %4, align 4
+  br i1 %3, label %omp.target3, label %omp.target6
+
+omp.target1:                                      ; preds = %omp.target
+  %5 = call ptr @malloc(i64 ptrtoint (ptr getelementptr (i32, ptr null, i32 1) to i64))
+  br label %omp.target2
+
+omp.target:                                       ; preds = %entry
+  %6 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %7 = alloca { ptr, i64, i32, i8, i8, i8, i8 }, align 8
+  %8 = load { ptr, i64, i32, i8, i8, i8, i8 }, ptr %0, align 8
+  store { ptr, i64, i32, i8, i8, i8, i8 } %8, ptr %7, align 8
+  %9 = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %7, i32 0, i32 0
+  %10 = load ptr, ptr %9, align 8
+  %11 = ptrtoint ptr %10 to i64
+  %12 = icmp ne i64 %11, 0
+  br i1 %12, label %omp.target2, label %omp.target1
+
+omp.region.cont:                                  ; preds = %omp.target6
+  ret void
+}
+
+define internal void @__omp_offloading_fd00_48447cd_maptype_c_ptr__l49(ptr %0, ptr %1) {
+entry:
+  br label %omp.target
+
+omp.target:                                       ; preds = %entry
+  %2 = alloca %_QM__fortran_builtinsT__builtin_c_ptr, i64 1, align 8
+  %3 = getelementptr %_QM__fortran_builtinsT__builtin_c_ptr, ptr %2, i32 0, i32 0
+  %4 = ptrtoint ptr %1 to i64
+  store i64 %4, ptr %3, align 8
+  %5 = getelementptr %_QM__fortran_builtinsT__builtin_c_ptr, ptr %0, i32 0, i32 0
+  %6 = load i64, ptr %3, align 8
+  store i64 %6, ptr %5, align 8
+  br label %omp.region.cont
+
+omp.region.cont:                                  ; preds = %omp.target
+  ret void
+}
+
+define internal void @__omp_offloading_fd00_48447cd_maptype_char__l58(ptr %0) {
+entry:
+  br label %omp.target
+
+omp.target:                                       ; preds = %entry
+  %1 = load [1 x i8], ptr @_QQclX62, align 1
+  store [1 x i8] %1, ptr %0, align 1
+  br label %omp.region.cont
+
+omp.region.cont:                                  ; preds = %omp.target
+  ret void
+}
+
+attributes #0 = { noinline nounwind }
+attributes #1 = { nounwind }
+attributes #2 = { nocallback nofree nosync nounwind willreturn }
+
+!llvm.module.flags = !{!0, !1}
+!omp_offload.info = !{!2, !3, !4, !5, !6, !7}
+
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+!1 = !{i32 7, !"openmp", i32 11}
+!2 = !{i32 0, i32 64768, i32 75777997, !"maptype_allocatable_", i32 37, i32 0, i32 3}
+!3 = !{i32 0, i32 64768, i32 75777997, !"maptype_array_", i32 20, i32 0, i32 1}
+!4 = !{i32 0, i32 64768, i32 75777997, !"maptype_c_ptr_", i32 49, i32 0, i32 4}
+!5 = !{i32 0, i32 64768, i32 75777997, !"maptype_char_", i32 58, i32 0, i32 5}
+!6 = !{i32 0, i32 64768, i32 75777997, !"maptype_ptr_", i32 28, i32 0, i32 2}
+!7 = !{i32 0, i32 64768, i32 75777997, !"maptype_scalar_", i32 11, i32 0, i32 0}
diff --git a/flang/test/Integration/OpenMP/map-types-and-sizes.f90 b/flang/test/Integration/OpenMP/map-types-and-sizes.f90
index f0a0e5e765b4151..97723aae57d57ca 100644
--- a/flang/test/Integration/OpenMP/map-types-and-sizes.f90
+++ b/flang/test/Integration/OpenMP/map-types-and-sizes.f90
@@ -30,7 +30,7 @@ subroutine mapType_array
   !$omp end target
 end subroutine mapType_array
 
-!CHECK: @.offload_sizes{{.*}} = private unnamed_addr constant [1 x i64] [i64 8]
+!CHECK: @.offload_sizes{{.*}} = private unnamed_addr constant [1 x i64] [i64 24]
 !CHECK: @.offload_maptypes{{.*}} = private unnamed_addr constant [1 x i64] [i64 547]
 subroutine mapType_ptr
   integer, pointer :: a
@@ -39,6 +39,37 @@ subroutine mapType_ptr
   !$omp end target
 end subroutine mapType_ptr
 
+!CHECK: @.offload_sizes{{.*}} = private unnamed_addr constant [1 x i64] [i64 24]
+!CHECK: @.offload_maptypes{{.*}} = private unnamed_addr constant [1 x i64] [i64 547]
+subroutine mapType_allocatable
+  integer, allocatable :: a
+  allocate(a)
+  !$omp target
+     a = 10
+  !$omp end target
+  deallocate(a)
+end subroutine mapType_allocatable
+
+!CHECK: @.offload_sizes{{.*}} = private unnamed_addr constant [3 x i64] [i64 0, i64 24, i64 4]
+!CHECK: @.offload_maptypes{{.*}} = private unnamed_addr constant [3 x i64] [i64 32, i64 281474976710657, i64 281474976710675]
+subroutine mapType_ptr_explicit
+  integer, pointer :: a
+  !$omp target map(tofrom: a)
+     a = 10
+  !$omp end target
+end subroutine mapType_ptr_explicit
+
+!CHECK: @.offload_sizes{{.*}} = private unnamed_addr constant [3 x i64] [i64 0, i64 24, i64 4]
+!CHECK: @.offload_maptypes{{.*}} = private unnamed_addr constant [3 x i64] [i64 32, i64 281474976710657, i64 281474976710675]
+subroutine mapType_allocatable_explicit
+  integer, allocatable :: a
+  allocate(a)
+  !$omp target map(tofrom: a)
+     a = 10
+  !$omp end target
+  deallocate(a)
+end subroutine mapType_allocatable_explicit
+
 !CHECK: @.offload_sizes{{.*}} = private unnamed_addr constant [2 x i64] [i64 8, i64 4]
 !CHECK: @.offload_maptypes{{.*}} = private unnamed_addr constant [2 x i64] [i64 544, i64 800]
 subroutine mapType_c_ptr
@@ -58,3 +89,24 @@ subroutine mapType_char
      a = 'b'
   !$omp end target
 end subroutine mapType_char
+
+!CHECK-LABEL: define void @maptype_ptr_explicit_() {
+!CHECK: %[[ALLOCA:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
+!CHECK: %[[ALLOCA_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %[[ALLOCA]], i32 1
+!CHECK: %[[ALLOCA_GEP_INT:.*]] = ptrtoint ptr %[[ALLOCA_GEP]] to i64
+!CHECK: %[[ALLOCA_INT:.*]] = ptrtoint ptr %[[ALLOCA]] to i64
+!CHECK: %[[SIZE_DIFF:.*]] = sub i64 %[[ALLOCA_GEP_INT]], %[[ALLOCA_INT]]
+!CHECK: %[[DIV:.*]] = sdiv exact i64 %[[SIZE_DIFF]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+!CHECK: %[[OFFLOAD_SIZE_ARR:.*]] = getelementptr inbounds [3 x i64], ptr %.offload_sizes, i32 0, i32 0
+!CHECK: store i64 %[[DIV]], ptr %[[OFFLOAD_SIZE_ARR]], align 8
+
+
+!CHECK-LABEL: define void @maptype_allocatable_explicit_() {
+!CHECK: %[[ALLOCA:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
+!CHECK: %[[ALLOCA_GEP:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %[[ALLOCA]], i32 1
+!CHECK: %[[ALLOCA_GEP_INT:.*]] = ptrtoint ptr %[[ALLOCA_GEP]] to i64
+!CHECK: %[[ALLOCA_INT:.*]] = ptrtoint ptr %[[ALLOCA]] to i64
+!CHECK: %[[SIZE_DIFF:.*]] = sub i64 %[[ALLOCA_GEP_INT]], %[[ALLOCA_INT]]
+!CHECK: %[[DIV:.*]] = sdiv exact i64 %[[SIZE_DIFF]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+!CHECK: %[[OFFLOAD_SIZE_ARR:.*]] = getelementptr inbounds [3 x i64], ptr %.offload_sizes, i32 0, i32 0
+!CHECK: store i64 %[[DIV]], ptr %[[OFFLOAD_SIZE_ARR]], align 8
\ No newline at end of file
diff --git a/flang/test/Lower/OpenACC/acc-bounds.f90 b/flang/test/Lower/OpenACC/acc-bounds.f90
index b1ef869d726ae26..8db18ab5aa9c4bb 100644
--- a/flang/test/Lower/OpenACC/acc-bounds.f90
+++ b/flang/test/Lower/OpenACC/acc-bounds.f90
@@ -31,7 +31,7 @@ subroutine acc_derived_type_component_pointer_array()
 ! CHECK: %[[C1:.*]] = arith.constant 1 : index
 ! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box<!fir.ptr<!fir.array<?xi32>>>, index) -> (index, index, index)
 ! CHECK: %[[UB:.*]] = arith.subi %[[BOX_DIMS1]]#1, %[[C1]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true}
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true}
 ! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<!fir.array<?xi32>>>) -> !fir.ptr<!fir.array<?xi32>>
 ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xi32>> {name = "d%array_comp", structured = false}
 ! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ptr<!fir.array<?xi32>>)
@@ -72,7 +72,7 @@ subroutine acc_derived_type_component_allocatable_array()
 ! CHECK: %[[C1:.*]] = arith.constant 1 : index
 ! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
 ! CHECK: %[[UB:.*]] = arith.subi %[[BOX_DIMS1]]#1, %[[C1]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true}
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true}
 ! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
 ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xi32>> {name = "d%array_comp", structured = false}
 ! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xi32>>)
@@ -105,13 +105,13 @@ subroutine acc_multi_strides(a)
 ! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?x?x?xf32>> {fir.bindc_name = "a"})
 ! CHECK: %[[DECL_ARG0:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QMopenacc_boundsFacc_multi_stridesEa"} : (!fir.box<!fir.array<?x?x?xf32>>) -> (!fir.box<!fir.array<?x?x?xf32>>, !fir.box<!fir.array<?x?x?xf32>>)
 ! CHECK: %[[BOX_DIMS0:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#1, %c0{{.*}} : (!fir.box<!fir.array<?x?x?xf32>>, index) -> (index, index, index)
-! CHECK: %[[BOUNDS0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%[[BOX_DIMS0]]#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true}
+! CHECK: %[[BOUNDS0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[BOX_DIMS0]]#1 : index) stride(%[[BOX_DIMS0]]#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true}
 ! CHECK: %[[STRIDE1:.*]] = arith.muli %[[BOX_DIMS0]]#2, %[[BOX_DIMS0]]#1 : index
 ! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#1, %c1{{.*}} : (!fir.box<!fir.array<?x?x?xf32>>, index) -> (index, index, index)
-! CHECK: %[[BOUNDS1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%[[STRIDE1]] : index) startIdx(%{{.*}} : index) {strideInBytes = true}
+! CHECK: %[[BOUNDS1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[STRIDE1]] : index) startIdx(%{{.*}} : index) {strideInBytes = true}
 ! CHECK: %[[STRIDE2:.*]] = arith.muli %[[STRIDE1]], %[[BOX_DIMS1]]#1 : index
 ! CHECK: %[[BOX_DIMS2:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#1, %c2{{.*}} : (!fir.box<!fir.array<?x?x?xf32>>, index) -> (index, index, index)
-! CHECK: %[[BOUNDS2:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%[[STRIDE2]] : index) startIdx(%{{.*}} : index) {strideInBytes = true}
+! CHECK: %[[BOUNDS2:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[BOX_DIMS2]]#1 : index) stride(%[[STRIDE2]] : index) startIdx(%{{.*}} : index) {strideInBytes = true}
 ! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_ARG0]]#1 : (!fir.box<!fir.array<?x?x?xf32>>) -> !fir.ref<!fir.array<?x?x?xf32>>
 ! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?x?x?xf32>>) bounds(%29, %33, %37) -> !fir.ref<!fir.array<?x?x?xf32>> {name = "a"}
 ! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.ref<!fir.array<?x?x?xf32>>) {
diff --git a/flang/test/Lower/OpenACC/acc-data-operands.f90 b/flang/test/Lower/OpenACC/acc-data-operands.f90
index 047155425eeec5e..8177aeb888ab343 100644
--- a/flang/test/Lower/OpenACC/acc-data-operands.f90
+++ b/flang/test/Lower/OpenACC/acc-data-operands.f90
@@ -19,17 +19,18 @@ subroutine acc_operand_array_section()
 end subroutine
 
 ! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_section
+! CHECK: %[[EXT:.*]] = arith.constant 100 : index
 ! CHECK: %[[ARR:.*]] = fir.alloca !fir.array<100xf32>
 ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARR]]
 ! CHECK: %[[ONE:.*]] = arith.constant 1 : index
 ! CHECK: %[[LB:.*]] = arith.constant 0 : index
 ! CHECK: %[[UB:.*]] = arith.constant 49 : index
-! CHECK: %[[BOUND_1_50:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
+! CHECK: %[[BOUND_1_50:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
 ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL]]#1 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND_1_50]]) -> !fir.ref<!fir.array<100xf32>> {name = "a(1:50)"}
 ! CHECK: %[[ONE:.*]] = arith.constant 1 : index
 ! CHECK: %[[LB:.*]] = arith.constant 50 : index
 ! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[BOUND_51_100:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
+! CHECK: %[[BOUND_51_100:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
 ! CHECK: %[[COPYOUT_CREATE:.*]] = acc.create varPtr(%[[DECL]]#1 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND_51_100]]) -> !fir.ref<!fir.array<100xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a(51:100)"}
 ! CHECK: acc.data dataOperands(%[[COPYIN]], %[[COPYOUT_CREATE]] : !fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>) {
 ! CHECK:   acc.terminator
@@ -48,11 +49,12 @@ subroutine acc_operand_array_section_component()
 ! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_section_component() {
 ! CHECK: %[[W:.*]] = fir.alloca !fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}> {bindc_name = "w", uniq_name = "_QMacc_data_operandFacc_operand_array_section_componentEw"}
 ! CHECK: %[[DECLW:.*]]:2 = hlfir.declare %[[W]]
+! CHECK: %[[EXT:.*]] = arith.constant 100 : index
 ! CHECK: %[[COORD_DATA:.*]] = hlfir.designate %[[DECLW]]#0{"data"}   shape %{{.*}} : (!fir.ref<!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<100xf32>>
 ! CHECK: %[[ONE:.*]] = arith.constant 1 : index
 ! CHECK: %[[LB:.*]] = arith.constant 0 : index
 ! CHECK: %[[UB:.*]] = arith.constant 19 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
 ! CHECK: %[[COPY_COPYIN:.*]] = acc.copyin varPtr(%[[COORD_DATA]] : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xf32>> {dataClause = #acc<data_clause acc_copy>, name = "w%data(1:20)"}
 ! CHECK: acc.data dataOperands(%[[COPY_COPYIN]] : !fir.ref<!fir.array<100xf32>>) {
 ! CHECK:   acc.terminator
@@ -133,7 +135,10 @@ subroutine acc_operand_array_section_allocatable()
 ! CHECK: %[[LB:.*]] = arith.subi %[[C1]], %[[DIMS0_0]]#0 : index
 ! CHECK: %[[C50:.*]] = arith.constant 50 : index
 ! CHECK: %[[UB:.*]] = arith.subi %[[C50]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
+! CHECK: %[[LOAD_BOX_A_2:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+! CHECK: %[[C0:.*]] = arith.constant 0 : index
+! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
 ! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(1:50)"}
 ! CHECK: %[[LOAD_BOX_A_0:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
@@ -146,7 +151,10 @@ subroutine acc_operand_array_section_allocatable()
 ! CHECK: %[[LB:.*]] = arith.subi %[[C51]], %[[DIMS0_0]]#0 : index
 ! CHECK: %[[C100:.*]] = arith.constant 100 : index
 ! CHECK: %[[UB:.*]] = arith.subi %[[C100]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
+! CHECK: %[[LOAD_BOX_A_2:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+! CHECK: %[[C0:.*]] = arith.constant 0 : index
+! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
 ! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 ! CHECK: %[[COPYOUT_CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a(51:100)"}
 ! CHECK: acc.data dataOperands(%[[COPYIN]], %[[COPYOUT_CREATE]] : !fir.heap<!fir.array<?xf32>>, !fir.heap<!fir.array<?xf32>>) {
@@ -179,7 +187,10 @@ subroutine acc_operand_array_section_pointer()
 ! CHECK: %[[LB:.*]] = arith.subi %[[C1]], %[[DIMS0_0]]#0 : index
 ! CHECK: %[[C50:.*]] = arith.constant 50 : index
 ! CHECK: %[[UB:.*]] = arith.subi %[[C50]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
+! CHECK: %[[LOAD_BOX_P_2:.*]] = fir.load %[[DECLP]]#1 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
+! CHECK: %[[C0:.*]] = arith.constant 0 : index
+! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_2]], %[[C0]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
 ! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_P_0]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
 ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xf32>> {name = "p(1:50)"}
 ! CHECK: acc.data dataOperands(%[[COPYIN]] : !fir.ptr<!fir.array<?xf32>>) {
diff --git a/flang/test/Lower/OpenACC/acc-enter-data.f90 b/flang/test/Lower/OpenACC/acc-enter-data.f90
index 619a9502f82fce4..e7c4b8b20528d11 100644
--- a/flang/test/Lower/OpenACC/acc-enter-data.f90
+++ b/flang/test/Lower/OpenACC/acc-enter-data.f90
@@ -56,35 +56,35 @@ subroutine acc_enter_data
 !CHECK: acc.enter_data if([[IF2]]) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>){{$}}
 
   !$acc enter data create(a) create(b) create(c)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "c", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>){{$}}
 
   !$acc enter data create(a) create(b) create(zero: c)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "c", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>){{$}}
 
   !$acc enter data copyin(a) create(b) attach(d)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]])  -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b", structured = false}
 !CHECK: %[[BOX_D:.*]] = fir.load %[[DECLD]]#1 : !fir.ref<!fir.box<!fir.ptr<f32>>>
 !CHECK: %[[BOX_ADDR_D:.*]] = fir.box_addr %[[BOX_D]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
@@ -92,56 +92,56 @@ subroutine acc_enter_data
 !CHECK: acc.enter_data dataOperands(%[[COPYIN_A]], %[[CREATE_B]], %[[ATTACH_D]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ptr<f32>){{$}}
 
   !$acc enter data create(a) async
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) attributes {async}
 
   !$acc enter data create(a) wait
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) attributes {wait}
 
   !$acc enter data create(a) async wait
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) attributes {async, wait}
 
   !$acc enter data create(a) async(1)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32
 !CHECK: acc.enter_data async(%[[ASYNC1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
   !$acc enter data create(a) async(async)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref<i32>
 !CHECK: acc.enter_data async(%[[ASYNC2]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
   !$acc enter data create(a) wait(1)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: %[[WAIT1:.*]] = arith.constant 1 : i32
 !CHECK: acc.enter_data wait(%[[WAIT1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
   !$acc enter data create(a) wait(queues: 1, 2)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: %[[WAIT2:.*]] = arith.constant 1 : i32
 !CHECK: %[[WAIT3:.*]] = arith.constant 2 : i32
 !CHECK: acc.enter_data wait(%[[WAIT2]], %[[WAIT3]] : i32, i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
   !$acc enter data create(a) wait(devnum: 1: queues: 1, 2)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
 !CHECK: %[[WAIT4:.*]] = arith.constant 1 : i32
 !CHECK: %[[WAIT5:.*]] = arith.constant 2 : i32
@@ -149,8 +149,8 @@ subroutine acc_enter_data
 !CHECK: acc.enter_data wait_devnum(%[[WAIT6]] : i32) wait(%[[WAIT4]], %[[WAIT5]] : i32, i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
   !$acc enter data copyin(a(1:10,1:5))
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(1:10,1:5)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
@@ -161,7 +161,7 @@ subroutine acc_enter_data
 !CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
 !CHECK: %[[LB2:.*]] = arith.constant 0 : index
 !CHECK: %[[UB2:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB2]] : index) upperbound(%[[UB2]] : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
+!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB2]] : index) upperbound(%[[UB2]] : index) extent(%[[EXTENT_C10]] : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
 !CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(1:,1:5)", structured = false}
 !CHECK: acc.enter_data   dataOperands(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
@@ -169,10 +169,10 @@ subroutine acc_enter_data
 !CHECK: %[[LB:.*]] = arith.constant 0 : index
 !CHECK: %[[ONE:.*]] = arith.constant 1 : index
 !CHECK: %[[UB1:.*]] = arith.constant 9 : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB1]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB1]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
 !CHECK: %[[LB:.*]] = arith.constant 0 : index
 !CHECK: %[[UB2:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB2]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
+!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB2]] : index) extent(%[[EXTENT_C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
 !CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(:10,1:5)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>)
 
@@ -212,12 +212,12 @@ subroutine acc_enter_data_dummy(a, b, n, m)
 !CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
 
   !$acc enter data create(a)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
 
   !$acc enter data create(b)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXT_B]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "b", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
 
@@ -225,7 +225,7 @@ subroutine acc_enter_data_dummy(a, b, n, m)
 !CHECK: %[[ONE:.*]] = arith.constant 1 : index
 !CHECK: %[[LB1:.*]] = arith.constant 4 : index
 !CHECK: %[[UB1:.*]] = arith.constant 9 : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
 !CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND1]]) -> !fir.ref<!fir.array<10xf32>> {name = "a(5:10)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref<!fir.array<10xf32>>)
 
@@ -237,7 +237,7 @@ subroutine acc_enter_data_dummy(a, b, n, m)
 !CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
 !CHECK: %[[CONVERT_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> index
 !CHECK: %[[UB:.*]] = arith.subi %[[CONVERT_M]], %[[N_IDX]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[ONE]] : index) startIdx(%[[N_IDX]] : index)
+!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[ONE]] : index) startIdx(%[[N_IDX]] : index)
 !CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND1]]) -> !fir.ref<!fir.array<?xf32>> {name = "b(n:m)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref<!fir.array<?xf32>>)
 
@@ -280,7 +280,7 @@ subroutine acc_enter_data_non_default_lb()
 !CHECK: %[[LB:.*]] = arith.subi %[[SECTIONLB]], %[[BASELB]] : index
 !CHECK: %[[SECTIONUB:.*]] = arith.constant 9 : index
 !CHECK: %[[UB:.*]] = arith.subi %[[SECTIONUB]], %[[BASELB]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[ONE]] : index) startIdx(%[[BASELB]] : index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%[[BASELB]] : index)
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "a(5:9)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
 
@@ -297,7 +297,7 @@ subroutine acc_enter_data_non_default_lb()
 !CHECK: %[[ONE:.*]] = arith.constant 1 : index
 !CHECK: %[[SECTIONUB:.*]] = arith.constant 6 : index
 !CHECK: %[[UB:.*]] = arith.subi %[[SECTIONUB]], %[[BASELB]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) stride(%[[ONE]] : index) startIdx(%[[BASELB]] : index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%[[BASELB]] : index)
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "a(:6)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
 
@@ -341,7 +341,7 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 !CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
 !CHECK: %[[LB:.*]] = arith.constant 0 : index
 !CHECK: %[[UB:.*]] = arith.subi %[[DIMS]]#1, %[[C1]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS]]#2 : index) startIdx(%[[C1]] : index) {strideInBytes = true}
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS]]#2 : index) startIdx(%[[C1]] : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
@@ -385,7 +385,8 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 
 !CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
 !CHECK: %[[UB:.*]] = arith.constant 3 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
+!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
 
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(:4)", structured = false}
@@ -398,7 +399,8 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 !CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
 !CHECK: %[[LB:.*]] = arith.constant 5 : index
 !CHECK: %[[UB:.*]] = arith.constant 9 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
+!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
 
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(6:10)", structured = false}
@@ -433,7 +435,8 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 !CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
 !CHECK: %[[CONVERT_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> index
 !CHECK: %[[UB:.*]] = arith.subi %[[CONVERT_M]], %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[BASELB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
+!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[BASELB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
 
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(:m)", structured = false}
@@ -452,7 +455,8 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 !CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
 !CHECK: %[[CONVERT_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> index
 !CHECK: %[[UB:.*]] = arith.subi %[[CONVERT_M]], %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
+!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
 
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(n:m)", structured = false}
@@ -467,7 +471,8 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 !CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
 !CHECK: %[[CONVERT_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> index
 !CHECK: %[[UB:.*]] = arith.subi %[[CONVERT_M]], %[[LB_C10_IDX]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true}
+!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLB]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true}
 
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLB]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "b(:m)", structured = false}
@@ -480,7 +485,7 @@ subroutine acc_enter_data_assumed(a, b, n, m)
 !CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
 !CHECK: %[[C0:.*]] = arith.constant 0 : index
 !CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true}
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true}
 
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLB]]#1 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "b", structured = false}
@@ -508,7 +513,7 @@ subroutine acc_enter_data_allocatable()
 !CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0_1]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
 !CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
 !CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %c1{{.*}} : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
@@ -529,7 +534,7 @@ subroutine acc_enter_data_allocatable()
 !CHECK: %[[C0:.*]] = arith.constant 0 : index
 !CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
 !CHECK: %[[UB:.*]] = arith.subi %[[DIMS2]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB:.*]] : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB:.*]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(:)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
@@ -547,7 +552,10 @@ subroutine acc_enter_data_allocatable()
 !CHECK: %[[LB:.*]] = arith.subi %[[C2]], %[[DIMS0]]#0 : index
 !CHECK: %[[C5:.*]] = arith.constant 5 : index
 !CHECK: %[[UB:.*]] = arith.subi %[[C5]], %[[DIMS0]]#0 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
+!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+!CHECK: %[[C0:.*]] = arith.constant 0 : index
+!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(2:5)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
@@ -565,9 +573,9 @@ subroutine acc_enter_data_allocatable()
 !CHECK: %[[C3:.*]] = arith.constant 3 : index
 !CHECK: %[[LB:.*]] = arith.subi %[[C3]], %[[DIMS0]]#0 : index
 
-!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
 !CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
+!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
 !CHECK: %[[UB:.*]] = arith.subi %[[DIMS2]]#1, %[[ONE]] : index
 !CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
@@ -586,7 +594,10 @@ subroutine acc_enter_data_allocatable()
 !CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
 !CHECK: %[[C7:.*]] = arith.constant 7 : index
 !CHECK: %[[UB:.*]] = arith.subi %[[C7]], %[[DIMS0]]#0 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
+!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+!CHECK: %[[C0:.*]] = arith.constant 0 : index
+!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(:7)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
@@ -678,23 +689,23 @@ subroutine acc_enter_data_derived_type()
 
   !$acc enter data create(a%array(1:5))
 
-
+!CHECK: %[[C10:.*]] = arith.constant 10 : index
 !CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"}   shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
 !CHECK: %[[C1:.*]] = arith.constant 1 : index
 !CHECK: %[[C0:.*]] = arith.constant 0 : index
 !CHECK: %[[C4:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[C4]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[C4]] : index)  extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array(1:5)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
 
   !$acc enter data create(a%array(:5))
 
-
+!CHECK: %[[C10:.*]] = arith.constant 10 : index
 !CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"}   shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
 !CHECK: %[[LB:.*]] = arith.constant 0 : index
 !CHECK: %[[C1:.*]] = arith.constant 1 : index
 !CHECK: %[[C4:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[C4]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[C4]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array(:5)", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
 
@@ -734,7 +745,7 @@ subroutine acc_enter_data_derived_type()
 !CHECK: %[[DIM0_1:.*]] = arith.constant 0 : index
 !CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[DATA_BOX]], %[[DIM0_1]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
 !CHECK: %[[UB:.*]] = arith.subi %[[DIMS0_1]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
+!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_1]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
 !CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DATA_BOX]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
 !CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xi32>> {name = "c%data", structured = false}
 !CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xi32>>)
diff --git a/flang/test/Lower/OpenACC/acc-private.f90 b/flang/test/Lower/OpenACC/acc-private.f90
index 0caaefc82225424..042596568bd7742 100644
--- a/flang/test/Lower/OpenACC/acc-private.f90
+++ b/flang/test/Lower/OpenACC/acc-private.f90
@@ -215,7 +215,7 @@ program acc_private
 ! CHECK: %[[C1:.*]] = arith.constant 1 : index
 ! CHECK: %[[LB:.*]] = arith.constant 0 : index
 ! CHECK: %[[UB:.*]] = arith.constant 49 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
 ! CHECK: %[[B_PRIVATE:.*]] = acc.private varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<50xf32>> {name = "b(1:50)"}
 ! CHECK: acc.loop private(@privatization_ref_50xf32 -> %[[B_PRIVATE]] : !fir.ref<!fir.array<50xf32>>)
 
@@ -252,7 +252,7 @@ program acc_private
 ! CHECK: %[[C1:.*]] = arith.constant 1 : index
 ! CHECK: %[[LB:.*]] = arith.constant 50 : index
 ! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
 ! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#1 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<50xf32>> {name = "b(51:100)"}
 ! CHECK: acc.parallel firstprivate(@firstprivatization_section_lb50.ub99_ref_50xf32 -> %[[FP_B]] : !fir.ref<!fir.array<50xf32>>)
 
diff --git a/flang/test/Lower/OpenACC/acc-reduction.f90 b/flang/test/Lower/OpenACC/acc-reduction.f90
index 98fef6a2d2f29a3..73f7176cd45067e 100644
--- a/flang/test/Lower/OpenACC/acc-reduction.f90
+++ b/flang/test/Lower/OpenACC/acc-reduction.f90
@@ -1142,11 +1142,12 @@ subroutine acc_reduction_add_static_slice(a)
 
 ! CHECK-LABEL: func.func @_QPacc_reduction_add_static_slice(
 ! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"})
+! CHECK: %[[C100:.*]] = arith.constant 100 : index
 ! HLFIR: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]]
 ! CHECK: %[[C1:.*]] = arith.constant 1 : index
 ! CHECK: %[[LB:.*]] = arith.constant 10 : index
 ! CHECK: %[[UB:.*]] = arith.constant 19 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
+! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C100]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
 ! FIR:   %[[RED:.*]] = acc.reduction varPtr(%[[ARG0]] : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xi32>> {name = "a(11:20)"}
 ! HLFIR: %[[RED:.*]] = acc.reduction varPtr(%[[DECLARG0]]#1 : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xi32>> {name = "a(11:20)"}
 ! CHECK: acc.parallel reduction(@reduction_add_section_lb10.ub19_ref_100xi32 -> %[[RED]] : !fir.ref<!fir.array<100xi32>>)
@@ -1184,7 +1185,7 @@ subroutine acc_reduction_add_dynamic_extent_add_with_section(a)
 ! CHECK-LABEL: func.func @_QPacc_reduction_add_dynamic_extent_add_with_section(
 ! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"})
 ! HLFIR: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFacc_reduction_add_dynamic_extent_add_with_sectionEa"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-! HLFIR: %[[BOUND:.*]] = acc.bounds lowerbound(%c1{{.*}} : index) upperbound(%c3{{.*}} : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true}
+! HLFIR: %[[BOUND:.*]] = acc.bounds lowerbound(%c1{{.*}} : index) upperbound(%c3{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true}
 ! HLFIR: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
 ! HLFIR: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xi32>> {name = "a(2:4)"}
 ! HLFIR: acc.parallel reduction(@reduction_add_section_lb1.ub3_box_Uxi32 -> %[[RED]] : !fir.ref<!fir.array<?xi32>>)
@@ -1199,7 +1200,7 @@ subroutine acc_reduction_add_allocatable(a)
 ! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> {fir.bindc_name = "a"})
 ! HLFIR: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFacc_reduction_add_allocatableEa"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>)
 ! HLFIR: %[[BOX:.*]] = fir.load %[[DECL]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! HLFIR: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true}
+! HLFIR: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true}
 ! HLFIR: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
 ! HLFIR: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>)   bounds(%6) -> !fir.heap<!fir.array<?xf32>> {name = "a"}
 ! HLFIR: acc.parallel reduction(@reduction_max_box_heap_Uxf32 -> %[[RED]] : !fir.heap<!fir.array<?xf32>>)
@@ -1214,7 +1215,7 @@ subroutine acc_reduction_add_pointer_array(a)
 ! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>> {fir.bindc_name = "a"})
 ! HLFIR: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFacc_reduction_add_pointer_arrayEa"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>)
 ! HLFIR: %[[BOX:.*]] = fir.load %[[DECL]]#1 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! HLFIR: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true}
+! HLFIR: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true}
 ! HLFIR: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
 ! HLFIR: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xf32>> {name = "a"}
 ! HLFIR: acc.parallel reduction(@reduction_max_box_ptr_Uxf32 -> %[[RED]] : !fir.ptr<!fir.array<?xf32>>)
diff --git a/flang/test/Lower/OpenMP/FIR/array-bounds.f90 b/flang/test/Lower/OpenMP/FIR/array-bounds.f90
index 8acc9738821464f..63c3efa0bce71e4 100644
--- a/flang/test/Lower/OpenMP/FIR/array-bounds.f90
+++ b/flang/test/Lower/OpenMP/FIR/array-bounds.f90
@@ -4,16 +4,18 @@
 !ALL-LABEL: func.func @_QPread_write_section(
 !ALL:  %[[ITER:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFread_write_sectionEi"}
 !ALL:  %[[READ:.*]] = fir.address_of(@_QFread_write_sectionEsp_read) : !fir.ref<!fir.array<10xi32>>
+!ALL:  %[[C10:.*]] = arith.constant 10 : index
 !ALL:  %[[WRITE:.*]] = fir.address_of(@_QFread_write_sectionEsp_write) : !fir.ref<!fir.array<10xi32>>
+!ALL:  %[[C10_0:.*]] = arith.constant 10 : index
 !ALL:  %[[C1:.*]] = arith.constant 1 : index
 !ALL:  %[[C2:.*]] = arith.constant 1 : index
 !ALL:  %[[C3:.*]] = arith.constant 4 : index
-!ALL:  %[[BOUNDS0:.*]] = omp.bounds   lower_bound(%[[C2]] : index) upper_bound(%[[C3]] : index) stride(%[[C1]] : index) start_idx(%[[C1]] : index)
+!ALL:  %[[BOUNDS0:.*]] = omp.bounds   lower_bound(%[[C2]] : index) upper_bound(%[[C3]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) start_idx(%[[C1]] : index)
 !ALL:  %[[MAP0:.*]] = omp.map_info var_ptr(%[[READ]] : !fir.ref<!fir.array<10xi32>>, !fir.array<10xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS0]]) -> !fir.ref<!fir.array<10xi32>> {name = "sp_read(2:5)"}
 !ALL:  %[[C4:.*]] = arith.constant 1 : index
 !ALL:  %[[C5:.*]] = arith.constant 1 : index
 !ALL:  %[[C6:.*]] = arith.constant 4 : index
-!ALL:  %[[BOUNDS1:.*]] = omp.bounds   lower_bound(%[[C5]] : index) upper_bound(%[[C6]] : index) stride(%[[C4]] : index) start_idx(%[[C4]] : index)
+!ALL:  %[[BOUNDS1:.*]] = omp.bounds   lower_bound(%[[C5]] : index) upper_bound(%[[C6]] : index) extent(%[[C10_0]] : index) stride(%[[C4]] : index) start_idx(%[[C4]] : index)
 !ALL:  %[[MAP1:.*]] = omp.map_info var_ptr(%[[WRITE]] : !fir.ref<!fir.array<10xi32>>, !fir.array<10xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS1]]) -> !fir.ref<!fir.array<10xi32>> {name = "sp_write(2:5)"}
 !ALL:  %[[MAP2:.*]] = omp.map_info var_ptr(%[[ITER]] : !fir.ref<i32>, i32)   map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = "i"}
 !ALL: omp.target map_entries(%[[MAP0]] -> %{{.*}}, %[[MAP1]] -> %{{.*}}, %[[MAP2]] -> %{{.*}} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>, !fir.ref<i32>) {
@@ -33,49 +35,53 @@ module assumed_array_routines
 contains
 !ALL-LABEL: func.func @_QMassumed_array_routinesPassumed_shape_array(
 !ALL-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "arr_read_write"})
+!ALL: %[[INTERMEDIATE_ALLOCA:.*]] = fir.alloca !fir.box<!fir.array<?xi32>>
 !ALL: %[[ALLOCA:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QMassumed_array_routinesFassumed_shape_arrayEi"}
 !ALL: %[[C0:.*]] = arith.constant 1 : index
 !ALL: %[[C1:.*]] = arith.constant 0 : index
-!ALL: %[[C2:.*]]:3 = fir.box_dims %arg0, %[[C1]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+!ALL: %[[DIMS0:.*]]:3 = fir.box_dims %arg0, %[[C1]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
 !ALL: %[[C3:.*]] = arith.constant 1 : index
 !ALL: %[[C4:.*]] = arith.constant 4 : index
-!ALL: %[[BOUNDS:.*]] = omp.bounds   lower_bound(%[[C3]] : index) upper_bound(%[[C4]] : index) stride(%[[C2]]#2 : index) start_idx(%[[C0]] : index) {stride_in_bytes = true}
-!ALL: %[[ADDROF:.*]] = fir.box_addr %arg0 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
-!ALL: %[[MAP:.*]] = omp.map_info var_ptr(%[[ADDROF]] : !fir.ref<!fir.array<?xi32>>, !fir.array<?xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.ref<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}
+!ALL: %[[C0_1:.*]] = arith.constant 0 : index
+!ALL: %[[DIMS1:.*]]:3 = fir.box_dims %arg0, %[[C0_1]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+!ALL: %[[BOUNDS:.*]] = omp.bounds   lower_bound(%[[C3]] : index) upper_bound(%[[C4]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) start_idx(%[[C0]] : index) {stride_in_bytes = true}
+!ALL: %[[BOXADDRADDR:.*]] = fir.box_offset %0 base_addr : (!fir.ref<!fir.box<!fir.array<?xi32>>>) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>
+!ALL: %[[BOXADDR:.*]] = fir.box_addr %[[ARG0]] : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
+!ALL: %[[MAP_MEMBER:.*]] = omp.map_info var_ptr(%[[BOXADDR]] : !fir.ref<!fir.array<?xi32>>, !fir.array<?xi32>) var_ptr_ptr(%[[BOXADDRADDR]] : !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.ref<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}
+!ALL: %[[MAP:.*]] = omp.map_info var_ptr(%0 : !fir.ref<!fir.box<!fir.array<?xi32>>>, !fir.box<!fir.array<?xi32>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_MEMBER]] : !fir.ref<!fir.array<?xi32>>) -> !fir.ref<!fir.box<!fir.array<?xi32>>> {name = "arr_read_write(2:5)"}
 !ALL: %[[MAP2:.*]] = omp.map_info var_ptr(%[[ALLOCA]] : !fir.ref<i32>, i32)   map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = "i"}
-!ALL: omp.target map_entries(%[[MAP]] -> %{{.*}}, %[[MAP2]] -> %{{.*}} : !fir.ref<!fir.array<?xi32>>, !fir.ref<i32>) {
-
+!ALL: omp.target map_entries(%[[MAP]] -> %{{.*}}, %[[MAP2]] -> %{{.*}} : !fir.ref<!fir.box<!fir.array<?xi32>>>, !fir.ref<i32>) {
     subroutine assumed_shape_array(arr_read_write)
-            integer, intent(inout) :: arr_read_write(:)
+        integer, intent(inout) :: arr_read_write(:)
 
         !$omp target map(tofrom:arr_read_write(2:5))
             do i = 2, 5
                 arr_read_write(i) = i
             end do
         !$omp end target
-        end subroutine assumed_shape_array
+    end subroutine assumed_shape_array
 
 !ALL-LABEL:   func.func @_QMassumed_array_routinesPassumed_size_array(
 !ALL-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<?xi32>> {fir.bindc_name = "arr_read_write"})
+!ALL: %[[UNDEF:.*]] = fir.undefined index
 !ALL: %[[ALLOCA:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QMassumed_array_routinesFassumed_size_arrayEi"}
 !ALL: %[[C0:.*]] = arith.constant 1 : index
 !ALL: %[[C1:.*]] = arith.constant 1 : index
 !ALL: %[[C2:.*]] = arith.constant 4 : index
-!ALL: %[[BOUNDS:.*]]  = omp.bounds   lower_bound(%[[C1]] : index) upper_bound(%[[C2]] : index) stride(%[[C0]] : index) start_idx(%[[C0]] : index)
+!ALL: %[[BOUNDS:.*]]  = omp.bounds   lower_bound(%[[C1]] : index) upper_bound(%[[C2]] : index) extent(%[[UNDEF]] : index) stride(%[[C0]] : index) start_idx(%[[C0]] : index)
 !ALL: %[[MAP:.*]] = omp.map_info var_ptr(%[[ARG0]] : !fir.ref<!fir.array<?xi32>>, !fir.array<?xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.ref<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}
 !ALL: %[[MAP2:.*]] = omp.map_info var_ptr(%[[ALLOCA]] : !fir.ref<i32>, i32)   map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !fir.ref<i32> {name = "i"}
 !ALL: omp.target map_entries(%[[MAP]] -> %{{.*}}, %[[MAP2]] -> %{{.*}} : !fir.ref<!fir.array<?xi32>>, !fir.ref<i32>) {
+    subroutine assumed_size_array(arr_read_write)
+        integer, intent(inout) :: arr_read_write(*)
 
-        subroutine assumed_size_array(arr_read_write)
-            integer, intent(inout) :: arr_read_write(*)
-
-        !$omp target map(tofrom:arr_read_write(2:5))
-            do i = 2, 5
-                arr_read_write(i) = i
-            end do
-        !$omp end target
-        end subroutine assumed_size_array
-    end module assumed_array_routines
+    !$omp target map(tofrom:arr_read_write(2:5))
+        do i = 2, 5
+            arr_read_write(i) = i
+        end do
+    !$omp end target
+    end subroutine assumed_size_array
+end module assumed_array_routines
 
 !DEVICE-NOT:func.func @_QPcall_assumed_shape_and_size_array() {
 
@@ -107,7 +113,6 @@ end module assumed_array_routines
 !HOST:fir.call @_QMassumed_array_routinesPassumed_size_array(%[[ARG1]]) fastmath<contract> : (!fir.ref<!fir.array<?xi32>>) -> ()
 !HOST:return
 !HOST:}
-
 subroutine call_assumed_shape_and_size_array
     use assumed_array_routines
     integer :: arr_read_write(20)
diff --git a/flang/test/Lower/OpenMP/FIR/target.f90 b/flang/test/Lower/OpenMP/FIR/target.f90
index 2034ac84334e546..4ee53127f402519 100644
--- a/flang/test/Lower/OpenMP/FIR/target.f90
+++ b/flang/test/Lower/OpenMP/FIR/target.f90
@@ -356,7 +356,8 @@ end subroutine omp_target_device_ptr
  subroutine omp_target_device_addr
    integer, pointer :: a
    !CHECK: %[[VAL_0:.*]] = fir.alloca !fir.box<!fir.ptr<i32>> {bindc_name = "a", uniq_name = "_QFomp_target_device_addrEa"}
-   !CHECK: %[[MAP:.*]] = omp.map_info var_ptr({{.*}})   map_clauses(tofrom) capture(ByRef) -> {{.*}} {name = "a"}
+   !CHECK: %[[MAP_MEMBERS:.*]] = omp.map_info var_ptr({{.*}} : !fir.ptr<i32>, i32) var_ptr_ptr({{.*}} : !fir.llvm_ptr<!fir.ref<i32>>) map_clauses(tofrom) capture(ByRef) -> !fir.ptr<i32> {name = "a"}
+   !CHECK: %[[MAP:.*]] = omp.map_info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.box<!fir.ptr<i32>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_MEMBERS]] : !fir.ptr<i32>) -> !fir.ref<!fir.box<!fir.ptr<i32>>> {name = "a"}
    !CHECK: omp.target_data map_entries(%[[MAP]] : {{.*}}) use_device_addr(%[[VAL_0]] : !fir.ref<!fir.box<!fir.ptr<i32>>>) {
    !$omp target data map(tofrom: a) use_device_addr(a)
    !CHECK: ^bb0(%[[VAL_1:.*]]: !fir.ref<!fir.box<!fir.ptr<i32>>>):
diff --git a/flang/test/Lower/OpenMP/allocatable-array-bounds.f90 b/flang/test/Lower/OpenMP/allocatable-array-bounds.f90
new file mode 100644
index 000000000000000..5bf84dc3d5962a4
--- /dev/null
+++ b/flang/test/Lower/OpenMP/allocatable-array-bounds.f90
@@ -0,0 +1,121 @@
+!RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s --check-prefixes HOST
+
+!HOST-LABEL:  func.func @_QPread_write_section() {
+
+!HOST: %[[ALLOCA_1:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>> {bindc_name = "sp_read", uniq_name = "_QFread_write_sectionEsp_read"}
+!HOST: %[[DECLARE_1:.*]]:2 = hlfir.declare %[[ALLOCA_1]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFread_write_sectionEsp_read"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+
+!HOST: %[[ALLOCA_2:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>> {bindc_name = "sp_write", uniq_name = "_QFread_write_sectionEsp_write"}
+!HOST: %[[DECLARE_2:.*]]:2 = hlfir.declare %[[ALLOCA_2]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFread_write_sectionEsp_write"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+
+!HOST: %[[LOAD_1:.*]] = fir.load %[[DECLARE_1]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[LOAD_2:.*]] = fir.load %[[DECLARE_1]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_1:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_1:.*]]:3 = fir.box_dims %[[LOAD_2]], %[[CONSTANT_1]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[CONSTANT_2:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_2:.*]]:3 = fir.box_dims %[[LOAD_1]], %[[CONSTANT_2]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[CONSTANT_3:.*]] = arith.constant 2 : index
+!HOST: %[[LB_1:.*]] = arith.subi %[[CONSTANT_3]], %[[BOX_1]]#0 : index
+!HOST: %[[CONSTANT_4:.*]] = arith.constant 5 : index
+!HOST: %[[UB_1:.*]] = arith.subi %[[CONSTANT_4]], %[[BOX_1]]#0 : index
+!HOST: %[[LOAD_3:.*]] = fir.load %[[DECLARE_1]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_3:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_3:.*]]:3 = fir.box_dims %[[LOAD_3]], %[[CONSTANT_3]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[BOUNDS_1:.*]] = omp.bounds lower_bound(%[[LB_1]] : index) upper_bound(%[[UB_1]] : index) extent(%[[BOX_3]]#1 : index) stride(%[[BOX_2]]#2 : index) start_idx(%[[BOX_1]]#0 : index) {stride_in_bytes = true}
+!HOST: %[[VAR_PTR_PTR:.*]] = fir.box_offset %[[DECLARE_1]]#1 base_addr : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>
+!HOST: %[[VAR_PTR:.*]] = fir.box_addr %[[LOAD_1]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
+!HOST: %[[MAP_INFO_MEMBER:.*]] = omp.map_info var_ptr(%[[VAR_PTR]] : !fir.heap<!fir.array<?xi32>>, !fir.array<?xi32>) var_ptr_ptr(%[[VAR_PTR_PTR]] : !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS_1]]) -> !fir.heap<!fir.array<?xi32>> {name = "sp_read(2:5)"}    
+!HOST: %[[MAP_INFO_1:.*]] = omp.map_info var_ptr(%[[DECLARE_1]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.box<!fir.heap<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_INFO_MEMBER]] : !fir.heap<!fir.array<?xi32>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {name = "sp_read(2:5)"}
+
+!HOST: %[[LOAD_3:.*]] = fir.load %[[DECLARE_2]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[LOAD_4:.*]] = fir.load %[[DECLARE_2]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_5:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_3:.*]]:3 = fir.box_dims %[[LOAD_4]], %[[CONSTANT_5]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[CONSTANT_6:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_4:.*]]:3 = fir.box_dims %[[LOAD_3]], %[[CONSTANT_6]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[CONSTANT_7:.*]] = arith.constant 2 : index
+!HOST: %[[LB_2:.*]] = arith.subi %[[CONSTANT_7]], %[[BOX_3]]#0 : index
+!HOST: %[[CONSTANT_8:.*]] = arith.constant 5 : index
+!HOST: %[[UB_2:.*]] = arith.subi %[[CONSTANT_8]], %[[BOX_3]]#0 : index
+!HOST: %[[LOAD_5:.*]] = fir.load %[[DECLARE_2]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_5:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_5:.*]]:3 = fir.box_dims %[[LOAD_5]], %[[CONSTANT_5]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[BOUNDS_2:.*]] = omp.bounds lower_bound(%[[LB_2]] : index) upper_bound(%[[UB_2]] : index) extent(%[[BOX_5]]#1 : index) stride(%[[BOX_4]]#2 : index) start_idx(%[[BOX_3]]#0 : index) {stride_in_bytes = true}
+!HOST: %[[VAR_PTR_PTR:.*]] = fir.box_offset %[[DECLARE_2]]#1 base_addr : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>
+!HOST: %[[VAR_PTR:.*]] = fir.box_addr %[[LOAD_3]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
+!HOST: %[[MAP_INFO_MEMBER:.*]] = omp.map_info var_ptr(%[[VAR_PTR]] : !fir.heap<!fir.array<?xi32>>, !fir.array<?xi32>) var_ptr_ptr(%[[VAR_PTR_PTR]] : !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS_2]]) -> !fir.heap<!fir.array<?xi32>> {name = "sp_write(2:5)"}    
+!HOST: %[[MAP_INFO_2:.*]] = omp.map_info var_ptr(%[[DECLARE_2]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.box<!fir.heap<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_INFO_MEMBER]] : !fir.heap<!fir.array<?xi32>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {name = "sp_write(2:5)"}
+
+subroutine read_write_section()
+    integer, allocatable :: sp_read(:)
+    integer, allocatable :: sp_write(:)
+    allocate(sp_read(10)) 
+    allocate(sp_write(10))
+    sp_write = (/0,0,0,0,0,0,0,0,0,0/)
+    sp_read = (/1,2,3,4,5,6,7,8,9,10/)
+
+!$omp target map(tofrom:sp_read(2:5)) map(tofrom:sp_write(2:5))
+    do i = 2, 5
+        sp_write(i) = sp_read(i)
+    end do
+!$omp end target
+end subroutine read_write_section
+
+module assumed_allocatable_array_routines
+    contains
+
+!HOST-LABEL: func.func @_QMassumed_allocatable_array_routinesPassumed_shape_array(
+
+!HOST: %[[DECLARE:.*]]:2 = hlfir.declare %[[ARG:.*]] {fortran_attrs = #fir.var_attrs<allocatable, intent_inout>, uniq_name = "_QMassumed_allocatable_array_routinesFassumed_shape_arrayEarr_read_write"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+!HOST: %[[LOAD_1:.*]] = fir.load %[[DECLARE]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[LOAD_2:.*]] = fir.load %[[DECLARE]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_1:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_1:.*]]:3 = fir.box_dims %[[LOAD_2]], %[[CONSTANT_1]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[CONSTANT_2:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_2:.*]]:3 = fir.box_dims %[[LOAD_1]], %[[CONSTANT_2]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[CONSTANT_3:.*]] = arith.constant 2 : index
+!HOST: %[[LB:.*]] = arith.subi %[[CONSTANT_3]], %[[BOX_1]]#0 : index
+!HOST: %[[CONSTANT_4:.*]] = arith.constant 5 : index
+!HOST: %[[UB:.*]] = arith.subi %[[CONSTANT_4]], %[[BOX_1]]#0 : index
+!HOST: %[[LOAD_3:.*]] = fir.load %[[DECLARE]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_3:.*]] = arith.constant 0 : index
+!HOST: %[[BOX_3:.*]]:3 = fir.box_dims %[[LOAD_3]], %[[CONSTANT_3]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+!HOST: %[[BOUNDS:.*]] = omp.bounds lower_bound(%[[LB]] : index) upper_bound(%[[UB]] : index) extent(%[[BOX_3]]#1 : index) stride(%[[BOX_2]]#2 : index) start_idx(%[[BOX_1]]#0 : index) {stride_in_bytes = true}
+!HOST: %[[VAR_PTR_PTR:.*]] = fir.box_offset %[[DECLARE]]#1 base_addr : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>
+!HOST: %[[VAR_PTR:.*]] = fir.box_addr %[[LOAD_1]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
+!HOST: %[[MAP_INFO_MEMBER:.*]] = omp.map_info var_ptr(%[[VAR_PTR]] : !fir.heap<!fir.array<?xi32>>, !fir.array<?xi32>) var_ptr_ptr(%[[VAR_PTR_PTR]] : !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.heap<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}    
+!HOST: %[[MAP_INFO:.*]] = omp.map_info var_ptr(%[[DECLARE]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.box<!fir.heap<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_INFO_MEMBER]] : !fir.heap<!fir.array<?xi32>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {name = "arr_read_write(2:5)"}
+    
+    subroutine assumed_shape_array(arr_read_write)
+    integer, allocatable, intent(inout) :: arr_read_write(:)
+
+!$omp target map(tofrom:arr_read_write(2:5))
+    do i = 2, 5
+        arr_read_write(i) = i
+    end do
+!$omp end target
+end subroutine assumed_shape_array
+end module assumed_allocatable_array_routines
+
+!HOST-LABEL: func.func @_QPcall_assumed_shape_and_size_array() {
+!HOST: %[[ALLOCA:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xi32>>> {bindc_name = "arr_read_write", uniq_name = "_QFcall_assumed_shape_and_size_arrayEarr_read_write"}
+!HOST: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFcall_assumed_shape_and_size_arrayEarr_read_write"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+!HOST: %[[ALLOCA_MEM:.*]] = fir.allocmem !fir.array<?xi32>, %{{.*}} {fir.must_be_heap = true, uniq_name = "_QFcall_assumed_shape_and_size_arrayEarr_read_write.alloc"}
+!HOST: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
+!HOST: %[[EMBOX:.*]] = fir.embox %[[ALLOCA_MEM]](%[[SHAPE]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
+!HOST: fir.store %[[EMBOX]] to %[[DECLARE]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[LOAD:.*]] = fir.load %[[DECLARE]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+!HOST: %[[CONSTANT_1:.*]] = arith.constant 10 : index
+!HOST: %[[CONSTANT_2:.*]] = arith.constant 20 : index
+!HOST: %[[CONSTANT_3:.*]] = arith.constant 1 : index
+!HOST: %[[CONSTANT_4:.*]] = arith.constant 11 : index
+!HOST: %[[SHAPE:.*]] = fir.shape %[[CONSTANT_4]] : (index) -> !fir.shape<1>
+!HOST: %[[DESIGNATE:.*]] = hlfir.designate %[[LOAD]] (%[[CONSTANT_1]]:%[[CONSTANT_2]]:%[[CONSTANT_3]])  shape %[[SHAPE]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index, index, index, !fir.shape<1>) -> !fir.ref<!fir.array<11xi32>>
+!HOST: fir.call @_QPassumed_size_array(%[[DESIGNATE]]) fastmath<contract> : (!fir.ref<!fir.array<11xi32>>) -> ()
+subroutine call_assumed_shape_and_size_array
+    use assumed_allocatable_array_routines
+    integer, allocatable :: arr_read_write(:)
+    allocate(arr_read_write(20))
+    call assumed_size_array(arr_read_write(10:20))
+    deallocate(arr_read_write)
+end subroutine call_assumed_shape_and_size_array
\ No newline at end of file
diff --git a/flang/test/Lower/OpenMP/allocatable-map.f90 b/flang/test/Lower/OpenMP/allocatable-map.f90
new file mode 100644
index 000000000000000..5b8302459ca1248
--- /dev/null
+++ b/flang/test/Lower/OpenMP/allocatable-map.f90
@@ -0,0 +1,15 @@
+!RUN: %flang_fc1 -emit-hlfir -fopenmp %s -o - | FileCheck %s --check-prefixes="HLFIRDIALECT"
+
+!HLFIRDIALECT: %[[POINTER:.*]]:2 = hlfir.declare %0 {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFpointer_routineEpoint"} : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> (!fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.ref<!fir.box<!fir.ptr<i32>>>)
+!HLFIRDIALECT: %[[LOAD:.*]] = fir.load %[[POINTER]]#1 : !fir.ref<!fir.box<!fir.ptr<i32>>>
+!HLFIRDIALECT: %[[BOX_OFF:.*]] = fir.box_offset %[[POINTER]]#1 base_addr : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> !fir.llvm_ptr<!fir.ref<i32>>
+!HLFIRDIALECT: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<i32>>) -> !fir.ptr<i32>
+!HLFIRDIALECT: %[[POINTER_MAP_MEMBER:.*]] = omp.map_info var_ptr(%[[BOX_ADDR]] : !fir.ptr<i32>, i32) var_ptr_ptr(%[[BOX_OFF]] : !fir.llvm_ptr<!fir.ref<i32>>) map_clauses(implicit, tofrom) capture(ByRef) -> !fir.ptr<i32> {name = "point"}
+!HLFIRDIALECT: %[[POINTER_MAP:.*]] = omp.map_info var_ptr(%[[POINTER]]#1 : !fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.box<!fir.ptr<i32>>) map_clauses(implicit, tofrom) capture(ByRef) members(%[[POINTER_MAP_MEMBER]] : !fir.ptr<i32>) -> !fir.ref<!fir.box<!fir.ptr<i32>>> {name = "point"}
+!HLFIRDIALECT: omp.target map_entries({{.*}}, %[[POINTER_MAP]] -> {{.*}} : {{.*}}, !fir.ref<!fir.box<!fir.ptr<i32>>>) {
+subroutine pointer_routine()
+    integer, pointer :: point 
+!$omp target map(tofrom:pointer)
+    point = 1
+!$omp end target
+end subroutine pointer_routine
diff --git a/flang/test/Lower/OpenMP/array-bounds.f90 b/flang/test/Lower/OpenMP/array-bounds.f90
index c0e50acb8e37c42..3ed156e2ba96421 100644
--- a/flang/test/Lower/OpenMP/array-bounds.f90
+++ b/flang/test/Lower/OpenMP/array-bounds.f90
@@ -8,19 +8,18 @@
 !HOST:  %[[READ_SHAPE:.*]] = fir.shape %[[C10]] : (index) -> !fir.shape<1>
 !HOST:  %[[READ_DECL:.*]]:2 = hlfir.declare %[[READ]](%[[READ_SHAPE]]) {uniq_name = "_QFread_write_sectionEsp_read"} : (!fir.ref<!fir.array<10xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>)
 !HOST:  %[[WRITE:.*]] = fir.address_of(@_QFread_write_sectionEsp_write) : !fir.ref<!fir.array<10xi32>>
-!HOST:  %[[C10:.*]] = arith.constant 10 : index
-!HOST:  %[[WRITE_SHAPE:.*]] = fir.shape %[[C10]] : (index) -> !fir.shape<1>
+!HOST:  %[[C10_0:.*]] = arith.constant 10 : index
+!HOST:  %[[WRITE_SHAPE:.*]] = fir.shape %[[C10_0]] : (index) -> !fir.shape<1>
 !HOST:  %[[WRITE_DECL:.*]]:2 = hlfir.declare %[[WRITE]](%[[WRITE_SHAPE]]) {uniq_name = "_QFread_write_sectionEsp_write"} : (!fir.ref<!fir.array<10xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>)
-
 !HOST:  %[[C1:.*]] = arith.constant 1 : index
 !HOST:  %[[C2:.*]] = arith.constant 1 : index
 !HOST:  %[[C3:.*]] = arith.constant 4 : index
-!HOST:  %[[BOUNDS0:.*]] = omp.bounds   lower_bound(%[[C2]] : index) upper_bound(%[[C3]] : index) stride(%[[C1]] : index) start_idx(%[[C1]] : index)
+!HOST:  %[[BOUNDS0:.*]] = omp.bounds   lower_bound(%[[C2]] : index) upper_bound(%[[C3]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) start_idx(%[[C1]] : index)
 !HOST:  %[[MAP0:.*]] = omp.map_info var_ptr(%[[READ_DECL]]#1 : !fir.ref<!fir.array<10xi32>>, !fir.array<10xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS0]]) -> !fir.ref<!fir.array<10xi32>> {name = "sp_read(2:5)"}
 !HOST:  %[[C4:.*]] = arith.constant 1 : index
 !HOST:  %[[C5:.*]] = arith.constant 1 : index
 !HOST:  %[[C6:.*]] = arith.constant 4 : index
-!HOST:  %[[BOUNDS1:.*]] = omp.bounds   lower_bound(%[[C5]] : index) upper_bound(%[[C6]] : index) stride(%[[C4]] : index) start_idx(%[[C4]] : index)
+!HOST:  %[[BOUNDS1:.*]] = omp.bounds   lower_bound(%[[C5]] : index) upper_bound(%[[C6]] : index) extent(%[[C10_0]] : index) stride(%[[C4]] : index) start_idx(%[[C4]] : index)
 !HOST:  %[[MAP1:.*]] = omp.map_info var_ptr(%[[WRITE_DECL]]#1 : !fir.ref<!fir.array<10xi32>>, !fir.array<10xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS1]]) -> !fir.ref<!fir.array<10xi32>> {name = "sp_write(2:5)"}
 !HOST:  omp.target map_entries(%[[MAP0]] -> %{{.*}}, %[[MAP1]] -> %{{.*}}, {{.*}} -> {{.*}} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>, !fir.ref<i32>) {
 
@@ -41,16 +40,21 @@ module assumed_array_routines
 
 !HOST-LABEL: func.func @_QMassumed_array_routinesPassumed_shape_array(
 !HOST-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "arr_read_write"}) {
+!HOST: %[[INTERMEDIATE_ALLOCA:.*]] = fir.alloca !fir.box<!fir.array<?xi32>>
 !HOST: %[[ARG0_DECL:.*]]:2 = hlfir.declare %[[ARG0]] {fortran_attrs = #fir.var_attrs<intent_inout>, uniq_name = "_QMassumed_array_routinesFassumed_shape_arrayEarr_read_write"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
 !HOST: %[[C0:.*]] = arith.constant 1 : index
 !HOST: %[[C1:.*]] = arith.constant 0 : index
-!HOST: %[[C2:.*]]:3 = fir.box_dims %[[ARG0_DECL]]#1, %[[C1]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+!HOST: %[[DIMS0:.*]]:3 = fir.box_dims %[[ARG0_DECL]]#1, %[[C1]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
 !HOST: %[[C3:.*]] = arith.constant 1 : index
 !HOST: %[[C4:.*]] = arith.constant 4 : index
-!HOST: %[[BOUNDS:.*]] = omp.bounds   lower_bound(%[[C3]] : index) upper_bound(%[[C4]] : index) stride(%[[C2]]#2 : index) start_idx(%[[C0]] : index) {stride_in_bytes = true}
-!HOST: %[[ADDROF:.*]] = fir.box_addr %[[ARG0_DECL]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
-!HOST: %[[MAP:.*]] = omp.map_info var_ptr(%[[ADDROF]] : !fir.ref<!fir.array<?xi32>>, !fir.array<?xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.ref<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}
-!HOST: omp.target   map_entries(%[[MAP]] -> %{{.*}}, {{.*}} -> {{.*}} : !fir.ref<!fir.array<?xi32>>, !fir.ref<i32>) {
+!HOST: %[[C0_1:.*]] = arith.constant 0 : index
+!HOST: %[[DIMS1:.*]]:3 = fir.box_dims %[[ARG0_DECL]]#1, %[[C0_1]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+!HOST: %[[BOUNDS:.*]] = omp.bounds   lower_bound(%[[C3]] : index) upper_bound(%[[C4]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) start_idx(%[[C0]] : index) {stride_in_bytes = true}
+!HOST: %[[VAR_PTR_PTR:.*]] = fir.box_offset %0 base_addr : (!fir.ref<!fir.box<!fir.array<?xi32>>>) -> !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>
+!HOST: %[[VAR_PTR:.*]] = fir.box_addr %[[ARG0_DECL]]#1 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
+!HOST: %[[MAP_INFO_MEMBER:.*]] = omp.map_info var_ptr(%[[VAR_PTR]] : !fir.ref<!fir.array<?xi32>>, !fir.array<?xi32>) var_ptr_ptr(%[[VAR_PTR_PTR]] : !fir.llvm_ptr<!fir.ref<!fir.array<?xi32>>>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.ref<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}
+!HOST: %[[MAP:.*]] = omp.map_info var_ptr(%[[INTERMEDIATE_ALLOCA]] : !fir.ref<!fir.box<!fir.array<?xi32>>>, !fir.box<!fir.array<?xi32>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_INFO_MEMBER]] : !fir.ref<!fir.array<?xi32>>) -> !fir.ref<!fir.box<!fir.array<?xi32>>> {name = "arr_read_write(2:5)"}
+!HOST: omp.target   map_entries(%[[MAP]] -> %{{.*}}, {{.*}} -> {{.*}} : !fir.ref<!fir.box<!fir.array<?xi32>>>, !fir.ref<i32>) {
     subroutine assumed_shape_array(arr_read_write)
             integer, intent(inout) :: arr_read_write(:)
 
@@ -59,7 +63,7 @@ subroutine assumed_shape_array(arr_read_write)
                 arr_read_write(i) = i
             end do
         !$omp end target
-        end subroutine assumed_shape_array
+    end subroutine assumed_shape_array
 
 
 !HOST-LABEL: func.func @_QMassumed_array_routinesPassumed_size_array(
@@ -71,20 +75,19 @@ end subroutine assumed_shape_array
 !HOST: %[[C0:.*]] = arith.constant 1 : index
 !HOST: %[[C1:.*]] = arith.constant 1 : index
 !HOST: %[[C2:.*]] = arith.constant 4 : index
-!HOST: %[[BOUNDS:.*]]  = omp.bounds   lower_bound(%[[C1]] : index) upper_bound(%[[C2]] : index) stride(%[[C0]] : index) start_idx(%[[C0]] : index)
+!HOST: %[[BOUNDS:.*]]  = omp.bounds   lower_bound(%[[C1]] : index) upper_bound(%[[C2]] : index) extent(%[[UNDEF]] : index) stride(%[[C0]] : index) start_idx(%[[C0]] : index)
 !HOST: %[[MAP:.*]] = omp.map_info var_ptr(%[[ARG0_DECL]]#1 : !fir.ref<!fir.array<?xi32>>, !fir.array<?xi32>)   map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS]]) -> !fir.ref<!fir.array<?xi32>> {name = "arr_read_write(2:5)"}
 !HOST: omp.target map_entries(%[[MAP]] -> %{{.*}}, {{.*}} -> {{.*}} : !fir.ref<!fir.array<?xi32>>, !fir.ref<i32>) {
-        subroutine assumed_size_array(arr_read_write)
-            integer, intent(inout) :: arr_read_write(*)
-
-        !$omp target map(tofrom:arr_read_write(2:5))
-            do i = 2, 5
-                arr_read_write(i) = i
-            end do
-        !$omp end target
-        end subroutine assumed_size_array
-    end module assumed_array_routines
+    subroutine assumed_size_array(arr_read_write)
+        integer, intent(inout) :: arr_read_write(*)
 
+    !$omp target map(tofrom:arr_read_write(2:5))
+        do i = 2, 5
+            arr_read_write(i) = i
+        end do
+    !$omp end target
+    end subroutine assumed_size_array
+end module assumed_array_routines
 
 !HOST-LABEL:func.func @_QPcall_assumed_shape_and_size_array() {
 !HOST: %[[C20:.*]] = arith.constant 20 : index
diff --git a/flang/test/Lower/OpenMP/target.f90 b/flang/test/Lower/OpenMP/target.f90
index 26b4d595d52291b..4db702ab042657b 100644
--- a/flang/test/Lower/OpenMP/target.f90
+++ b/flang/test/Lower/OpenMP/target.f90
@@ -357,7 +357,8 @@ subroutine omp_target_device_addr
    integer, pointer :: a
    !CHECK: %[[VAL_0:.*]] = fir.alloca !fir.box<!fir.ptr<i32>> {bindc_name = "a", uniq_name = "_QFomp_target_device_addrEa"}
    !CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %0 {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFomp_target_device_addrEa"} : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> (!fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.ref<!fir.box<!fir.ptr<i32>>>)
-   !CHECK: %[[MAP:.*]] = omp.map_info var_ptr({{.*}})   map_clauses(tofrom) capture(ByRef) -> {{.*}} {name = "a"}
+   !CHECK: %[[MAP_MEMBERS:.*]] = omp.map_info var_ptr({{.*}} : !fir.ptr<i32>, i32) var_ptr_ptr({{.*}} : !fir.llvm_ptr<!fir.ref<i32>>) map_clauses(tofrom) capture(ByRef) -> !fir.ptr<i32> {name = "a"}
+   !CHECK: %[[MAP:.*]] = omp.map_info var_ptr({{.*}} : !fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.box<!fir.ptr<i32>>) map_clauses(tofrom) capture(ByRef) members(%[[MAP_MEMBERS]] : !fir.ptr<i32>) -> !fir.ref<!fir.box<!fir.ptr<i32>>> {name = "a"}
    !CHECK: omp.target_data map_entries(%[[MAP]] : {{.*}}) use_device_addr(%[[VAL_0_DECL]]#1 : !fir.ref<!fir.box<!fir.ptr<i32>>>) {
    !$omp target data map(tofrom: a) use_device_addr(a)
    !CHECK: ^bb0(%[[VAL_1:.*]]: !fir.ref<!fir.box<!fir.ptr<i32>>>):
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 8ff5380f71ad453..3bca0f565082fa8 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -1148,6 +1148,7 @@ def MapInfoOp : OpenMP_Op<"map_info", [AttrSizedOperandSegments]> {
   let arguments = (ins OpenMP_PointerLikeType:$var_ptr,
                        TypeAttr:$var_type,
                        Optional<OpenMP_PointerLikeType>:$var_ptr_ptr,
+                       Variadic<OpenMP_PointerLikeType>:$members,
                        Variadic<DataBoundsType>:$bounds, /* rank-0 to rank-{n-1} */
                        OptionalAttr<UI64Attr>:$map_type,
                        OptionalAttr<VariableCaptureKindAttr>:$map_capture_type,
@@ -1187,13 +1188,17 @@ def MapInfoOp : OpenMP_Op<"map_info", [AttrSizedOperandSegments]> {
     - `var_type`: The type of the variable to copy.
     - `var_ptr_ptr`: Used when the variable copied is a member of a class, structure
       or derived type and refers to the originating struct.
+    - `members`:  Used to indicate mapped child members for the current MapInfoOp, 
+       represented as other MapInfoOp's, utilised in cases where a parent structure 
+       type and members of the structure type are being mapped at the same time. 
+       For example: map(to: parent, parent->member, parent->member2[:10])  
     - `bounds`: Used when copying slices of array's, pointers or pointer members of
-      objects (e.g. derived types or classes), indicates the bounds to be copied
-      of the variable. When it's an array slice it is in rank order where rank 0
-      is the inner-most dimension.
+       objects (e.g. derived types or classes), indicates the bounds to be copied
+       of the variable. When it's an array slice it is in rank order where rank 0
+       is the inner-most dimension.
     - 'map_clauses': OpenMP map type for this map capture, for example: from, to and
-      always. It's a bitfield composed of the OpenMP runtime flags stored in
-      OpenMPOffloadMappingFlags.
+       always. It's a bitfield composed of the OpenMP runtime flags stored in
+       OpenMPOffloadMappingFlags.
     - 'map_capture_type': Capture type for the variable e.g. this, byref, byvalue, byvla
        this can affect how the variable is lowered.
     - `name`: Holds the name of variable as specified in user clause (including bounds).
@@ -1205,6 +1210,7 @@ def MapInfoOp : OpenMP_Op<"map_info", [AttrSizedOperandSegments]> {
         `var_ptr_ptr` `(` $var_ptr_ptr `:` type($var_ptr_ptr) `)`
       | `map_clauses` `(` custom<MapClause>($map_type) `)`
       | `capture` `(` custom<CaptureType>($map_capture_type) `)`
+      | `members` `(` $members `:` type($members) `)`
       | `bounds` `(` $bounds `)`
     ) `->` type($omp_ptr) attr-dict
   }];
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 4f6200d29a70a64..5919241c7b95773 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -1676,14 +1676,14 @@ uint64_t getArrayElementSizeInBits(LLVM::LLVMArrayType arrTy, DataLayout &dl) {
 // This function is somewhat equivalent to Clang's getExprTypeSize inside of
 // CGOpenMPRuntime.cpp.
 llvm::Value *getSizeInBytes(DataLayout &dl, const mlir::Type &type,
-                            Operation *clauseOp, llvm::IRBuilderBase &builder,
+                            Operation *clauseOp, llvm::Value *basePointer,
+                            llvm::Type *baseType, llvm::IRBuilderBase &builder,
                             LLVM::ModuleTranslation &moduleTranslation) {
   // utilising getTypeSizeInBits instead of getTypeSize as getTypeSize gives
   // the size in inconsistent byte or bit format.
   uint64_t underlyingTypeSzInBits = dl.getTypeSizeInBits(type);
-  if (auto arrTy = llvm::dyn_cast_if_present<LLVM::LLVMArrayType>(type)) {
+  if (auto arrTy = llvm::dyn_cast_if_present<LLVM::LLVMArrayType>(type))
     underlyingTypeSzInBits = getArrayElementSizeInBits(arrTy, dl);
-  }
 
   if (auto memberClause =
           mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(clauseOp)) {
@@ -1728,13 +1728,13 @@ void collectMapDataFromMapOperands(MapInfoData &mapData,
                                    LLVM::ModuleTranslation &moduleTranslation,
                                    DataLayout &dl,
                                    llvm::IRBuilderBase &builder) {
-  for (mlir::Value mapValue : mapOperands) {
+  auto mapFill = [&](mlir::Value mapValue) {
     assert(mlir::isa<mlir::omp::MapInfoOp>(mapValue.getDefiningOp()) &&
            "missing map info operation or incorrect map info operation type");
     if (auto mapOp = mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(
             mapValue.getDefiningOp())) {
-      mapData.OriginalValue.push_back(
-          moduleTranslation.lookupValue(mapOp.getVarPtr()));
+      mapData.OriginalValue.push_back(moduleTranslation.lookupValue(
+          mapOp.getVarPtrPtr() ? mapOp.getVarPtrPtr() : mapOp.getVarPtr()));
       mapData.Pointers.push_back(mapData.OriginalValue.back());
 
       if (llvm::Value *refPtr =
@@ -1747,10 +1747,11 @@ void collectMapDataFromMapOperands(MapInfoData &mapData,
         mapData.BasePointers.push_back(mapData.OriginalValue.back());
       }
 
-      mapData.Sizes.push_back(getSizeInBytes(dl, mapOp.getVarType(), mapOp,
-                                             builder, moduleTranslation));
       mapData.BaseType.push_back(
           moduleTranslation.convertType(mapOp.getVarType()));
+      mapData.Sizes.push_back(getSizeInBytes(
+          dl, mapOp.getVarType(), mapOp, mapData.BasePointers.back(),
+          mapData.BaseType.back(), builder, moduleTranslation));
       mapData.MapClause.push_back(mapOp.getOperation());
       mapData.Types.push_back(
           llvm::omp::OpenMPOffloadMappingFlags(mapOp.getMapType().value()));
@@ -1759,6 +1760,183 @@ void collectMapDataFromMapOperands(MapInfoData &mapData,
       mapData.DevicePointers.push_back(
           llvm::OpenMPIRBuilder::DeviceInfoTy::None);
     }
+  };
+
+  // In the case of Fortran descriptors some members get added implicitly
+  // after the target region has been generated during CodeGen lowering
+  // which prevents them from being added trivially to the target region
+  // as map arguments, we must handle this case here by generating
+  // MapInfoData for them.
+  // TODO: Revisit this when implementing derived types explicit member
+  // mapping, we likely want to represent these identically to simplify
+  // the overall lowering
+  SmallVector<Value> mapMemberOperands;
+  for (size_t i = 0; i < mapOperands.size(); ++i) {
+    auto mapInfoOp =
+        mlir::dyn_cast<mlir::omp::MapInfoOp>(mapOperands[i].getDefiningOp());
+    for (auto members : mapInfoOp.getMembers()) {
+      if (!std::any_of(mapOperands.begin(), mapOperands.end(),
+                       [&](auto mapOp) {
+                         return mapOp.getDefiningOp() ==
+                                members.getDefiningOp();
+                       }) &&
+          !std::any_of(mapMemberOperands.begin(), mapMemberOperands.end(),
+                       [&](auto mapOp) {
+                         return mapOp.getDefiningOp() ==
+                                members.getDefiningOp();
+                       }))
+        mapMemberOperands.push_back(members);
+    }
+  }
+
+  for (mlir::Value mapValue : mapOperands)
+    mapFill(mapValue);
+
+  for (mlir::Value mapValue : mapMemberOperands)
+    mapFill(mapValue);
+}
+
+static void processMapWithMembersOf(
+    LLVM::ModuleTranslation &moduleTranslation, llvm::IRBuilderBase &builder,
+    llvm::OpenMPIRBuilder &ompBuilder, DataLayout &dl,
+    llvm::OpenMPIRBuilder::MapInfosTy &combinedInfo, MapInfoData &mapData,
+    uint64_t mapDataIndex, bool isTargetParams) {
+  auto parentClause =
+      mlir::dyn_cast<mlir::omp::MapInfoOp>(mapData.MapClause[mapDataIndex]);
+
+  ////////// First Parent Map Segment //////////
+  // Map the first segment of our structure
+  combinedInfo.Types.emplace_back(
+      isTargetParams
+          ? llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM
+          : llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE);
+  combinedInfo.DevicePointers.emplace_back(
+      llvm::OpenMPIRBuilder::DeviceInfoTy::None);
+  combinedInfo.Names.emplace_back(LLVM::createMappingInformation(
+      mapData.MapClause[mapDataIndex]->getLoc(), ompBuilder));
+  combinedInfo.BasePointers.emplace_back(mapData.BasePointers[mapDataIndex]);
+  combinedInfo.Pointers.emplace_back(mapData.Pointers[mapDataIndex]);
+
+  // Calculate size of the parent object being mapped based on the
+  // addresses at runtime, highAddr - lowAddr = size. This of course
+  // doesn't factor in allocated data like pointers, hence the further
+  // processing of members specified by users, or in the case of
+  // Fortran pointers and allocatables, the mapping of the pointed to
+  // data by the descriptor (which itself, is a structure containing
+  // runtime information on the dynamically allocated data).
+  llvm::Value *lowAddr = builder.CreatePointerCast(
+      mapData.Pointers[mapDataIndex], builder.getPtrTy());
+  llvm::Value *highAddr = builder.CreatePointerCast(
+      builder.CreateConstGEP1_32(mapData.BaseType[mapDataIndex],
+                                 mapData.Pointers[mapDataIndex], 1),
+      builder.getPtrTy());
+  llvm::Value *size = builder.CreateIntCast(
+      builder.CreatePtrDiff(builder.getInt8Ty(), highAddr, lowAddr),
+      builder.getInt64Ty(),
+      /*isSigned=*/false);
+  combinedInfo.Sizes.push_back(size);
+
+  ////////// Second Parent Map Segment //////////
+  // This creates the initial MEMBER_OF mapping that consists of
+  // the parent/top level container (same as above effectively, except
+  // with a fixed initial compile time size and seperate maptype which
+  // indicates the true mape type (tofrom etc.) and that it is a part
+  // of a larger mapping and indicating the link between it and it's
+  // members that are also explicitly mapped).
+  llvm::omp::OpenMPOffloadMappingFlags mapFlag =
+      llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
+  if (isTargetParams)
+    mapFlag &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
+
+  llvm::omp::OpenMPOffloadMappingFlags memberOfFlag =
+      ompBuilder.getMemberOfFlag(combinedInfo.BasePointers.size() - 1);
+  ompBuilder.setCorrectMemberOfFlag(mapFlag, memberOfFlag);
+
+  combinedInfo.Types.emplace_back(mapFlag);
+  combinedInfo.DevicePointers.emplace_back(
+      llvm::OpenMPIRBuilder::DeviceInfoTy::None);
+  combinedInfo.Names.emplace_back(LLVM::createMappingInformation(
+      mapData.MapClause[mapDataIndex]->getLoc(), ompBuilder));
+  combinedInfo.BasePointers.emplace_back(mapData.BasePointers[mapDataIndex]);
+  combinedInfo.Pointers.emplace_back(mapData.Pointers[mapDataIndex]);
+  combinedInfo.Sizes.emplace_back(mapData.Sizes[mapDataIndex]);
+
+  ////////// Mapping of Members Segment //////////
+  for (auto mappedMembers : parentClause.getMembers()) {
+    auto memberClause =
+        mlir::dyn_cast<mlir::omp::MapInfoOp>(mappedMembers.getDefiningOp());
+    int memberDataIdx = -1;
+    for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
+      if (mapData.MapClause[i] == memberClause)
+        memberDataIdx = i;
+    }
+
+    assert(memberDataIdx >= 0 && "could not find mapped member of structure");
+
+    // Same MemberOfFlag to indicate its link with parent and other members
+    // of, and we flag that it's part of a pointer and object coupling.
+    auto mapFlag =
+        llvm::omp::OpenMPOffloadMappingFlags(memberClause.getMapType().value());
+    mapFlag &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
+    ompBuilder.setCorrectMemberOfFlag(mapFlag, memberOfFlag);
+    mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
+    combinedInfo.Types.emplace_back(mapFlag);
+    combinedInfo.DevicePointers.emplace_back(
+        llvm::OpenMPIRBuilder::DeviceInfoTy::None);
+    combinedInfo.Names.emplace_back(
+        LLVM::createMappingInformation(memberClause.getLoc(), ompBuilder));
+
+    combinedInfo.BasePointers.emplace_back(mapData.BasePointers[memberDataIdx]);
+
+    std::vector<llvm::Value *> idx{builder.getInt64(0)};
+    llvm::Value *offsetAddress = nullptr;
+    if (!memberClause.getBounds().empty()) {
+      if (mapData.BaseType[memberDataIdx]->isArrayTy()) {
+        for (int i = memberClause.getBounds().size() - 1; i >= 0; --i) {
+          if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::DataBoundsOp>(
+                  memberClause.getBounds()[i].getDefiningOp())) {
+            idx.push_back(
+                moduleTranslation.lookupValue(boundOp.getLowerBound()));
+          }
+        }
+      } else {
+        std::vector<llvm::Value *> dimensionIndexSizeOffset{
+            builder.getInt64(1)};
+        for (size_t i = 1; i < memberClause.getBounds().size(); ++i) {
+          if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::DataBoundsOp>(
+                  memberClause.getBounds()[i].getDefiningOp())) {
+            dimensionIndexSizeOffset.push_back(builder.CreateMul(
+                moduleTranslation.lookupValue(boundOp.getExtent()),
+                dimensionIndexSizeOffset[i - 1]));
+          }
+        }
+
+        for (int i = memberClause.getBounds().size() - 1; i >= 0; --i) {
+          if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::DataBoundsOp>(
+                  memberClause.getBounds()[i].getDefiningOp())) {
+            if (!offsetAddress)
+              offsetAddress = builder.CreateMul(
+                  moduleTranslation.lookupValue(boundOp.getLowerBound()),
+                  dimensionIndexSizeOffset[i]);
+            else
+              offsetAddress = builder.CreateAdd(
+                  offsetAddress,
+                  builder.CreateMul(
+                      moduleTranslation.lookupValue(boundOp.getLowerBound()),
+                      dimensionIndexSizeOffset[i]));
+          }
+        }
+      }
+    }
+
+    llvm::Value *memberIdx =
+        builder.CreateLoad(builder.getPtrTy(), mapData.Pointers[memberDataIdx]);
+    memberIdx = builder.CreateInBoundsGEP(
+        mapData.BaseType[memberDataIdx], memberIdx,
+        offsetAddress ? std::vector<llvm::Value *>{offsetAddress} : idx,
+        "member_idx");
+    combinedInfo.Pointers.emplace_back(memberIdx);
+    combinedInfo.Sizes.emplace_back(mapData.Sizes[memberDataIdx]);
   }
 }
 
@@ -1782,15 +1960,56 @@ static void genMapInfos(llvm::IRBuilderBase &builder,
     combinedInfo.Names.clear();
   };
 
+  llvm::SmallVector<size_t, 4> primaryMapIdx;
+  for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
+    primaryMapIdx.push_back(i);
+  }
+
+  // TODO: Handle nested MembersOf, currently only cares about the first level
+  // of nesting (all that was relevant for Fortran descriptors), but a slight
+  // refactoring of mapInfoData to hold nestings or membersOf may be a better
+  // approach to simplify things.
+  for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
+    auto mapInfoOp = mlir::dyn_cast<mlir::omp::MapInfoOp>(mapData.MapClause[i]);
+    for (auto member : mapInfoOp.getMembers()) {
+      for (size_t j = 0; j < primaryMapIdx.size(); j++) {
+        if (member.getDefiningOp() == mapData.MapClause[primaryMapIdx[j]]) {
+          primaryMapIdx.erase(&primaryMapIdx[j]);
+          j--;
+        }
+      }
+    }
+  }
+
   // We operate under the assumption that all vectors that are
   // required in MapInfoData are of equal lengths (either filled with
   // default constructed data or appropiate information) so we can
   // utilise the size from any component of MapInfoData, if we can't
   // something is missing from the initial MapInfoData construction.
-  for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
+  for (unsigned long i : primaryMapIdx) {
+    auto mapInfoOp = mlir::dyn_cast<mlir::omp::MapInfoOp>(mapData.MapClause[i]);
+
+    bool isImplicit =
+        mapInfoOp.getMapType().value() &
+        static_cast<
+            std::underlying_type_t<llvm::omp::OpenMPOffloadMappingFlags>>(
+            llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
+
+    // TODO: Look into more thoroughly handling implicit semantics, this
+    // mimics what Clang currently does with structures, which is map the
+    // structure and not the internals i.e. the descriptor without the pointer
+    // itself, so not a deep copy, which may be incorrect for
+    // allocatables/pointers, but allows explicit mapping and initial
+    // enter/exit handling
+    if (!isImplicit && !mapInfoOp.getMembers().empty()) {
+      processMapWithMembersOf(moduleTranslation, builder, *ompBuilder, dl,
+                              combinedInfo, mapData, i, isTargetParams);
+      continue;
+    }
+
     // Declare Target Mappings are excluded from being marked as
-    // OMP_MAP_TARGET_PARAM as they are not passed as parameters, they're marked
-    // with OMP_MAP_PTR_AND_OBJ instead.
+    // OMP_MAP_TARGET_PARAM as they are not passed as parameters, they're
+    // marked with OMP_MAP_PTR_AND_OBJ instead.
     auto mapFlag = mapData.Types[i];
     if (mapData.IsDeclareTarget[i])
       mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
@@ -2407,7 +2626,7 @@ convertOmpTarget(Operation &opInst, llvm::IRBuilderBase &builder,
   };
 
   llvm::SmallVector<llvm::Value *, 4> kernelInput;
-  for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
+  for (size_t i = 0; i < mapOperands.size(); ++i) {
     // declare target arguments are not passed to kernels as arguments
     if (!mapData.IsDeclareTarget[i])
       kernelInput.push_back(mapData.OriginalValue[i]);
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 4d88d9ac86fe16c..548a95284f373ea 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -2082,3 +2082,18 @@ func.func @omp_targets_with_map_bounds(%arg0: !llvm.ptr, %arg1: !llvm.ptr) -> ()
 
     return
 }
+
+// CHECK-LABEL: omp_targets_is_allocatable
+// CHECK-SAME: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr)
+func.func @omp_targets_is_allocatable(%arg0: !llvm.ptr, %arg1: !llvm.ptr) -> () {
+  // CHECK: %[[MAP0:.*]] = omp.map_info var_ptr(%[[ARG0]] : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {is_fortran_allocatable, name = ""}
+  %mapv1 = omp.map_info var_ptr(%arg0 : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>)   map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {is_fortran_allocatable, name = ""}
+  // CHECK: %[[MAP1:.*]] = omp.map_info var_ptr(%[[ARG1]] : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(to) capture(ByCopy) -> !llvm.ptr {name = ""}
+  %mapv2 = omp.map_info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(to) capture(ByCopy) -> !llvm.ptr {name = ""}
+  // CHECK: omp.target map_entries(%[[MAP0]] -> {{.*}}, %[[MAP1]] -> {{.*}} : !llvm.ptr, !llvm.ptr)
+  omp.target map_entries(%mapv1 -> %arg2, %mapv2 -> %arg3 : !llvm.ptr, !llvm.ptr) {
+    ^bb0(%arg2: !llvm.ptr, %arg3 : !llvm.ptr):
+      omp.terminator
+  }
+  return
+}
diff --git a/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir b/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir
new file mode 100644
index 000000000000000..dc3b230a7a55ee1
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir
@@ -0,0 +1,349 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+// This test checks the offload sizes, map types and base pointers and pointers
+// provided to the OpenMP kernel argument structure are correct when lowering 
+// to LLVM-IR from MLIR when the fortran allocatables flag is switched on and 
+// a fortran allocatable descriptor type is provided alongside the omp.map_info,
+// the test utilises mapping of array sections, full arrays and individual 
+// allocated scalars.
+
+module attributes {omp.is_target_device = false} {
+  llvm.func @malloc(i64) -> !llvm.ptr
+  llvm.func @_QQmain() {
+    %0 = llvm.mlir.constant(1 : i32) : i32
+    %1 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+    %2 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+    %3 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+    %4 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+    %5 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+    %6 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+    %7 = llvm.mlir.constant(10 : index) : i64
+    %8 = llvm.mlir.constant(5 : index) : i64
+    %9 = llvm.mlir.constant(2 : index) : i64
+    %10 = llvm.mlir.constant(1 : index) : i64
+    %11 = llvm.mlir.constant(0 : index) : i64
+    %12 = llvm.mlir.addressof @_QFEfull_arr : !llvm.ptr
+    %13 = llvm.mlir.constant(1 : i64) : i64
+    %14 = llvm.alloca %13 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {bindc_name = "scalar"} : (i64) -> !llvm.ptr
+    %15 = llvm.mlir.zero : !llvm.ptr
+    %16 = llvm.mlir.constant(27 : i32) : i32
+    %17 = llvm.getelementptr %15[1] : (!llvm.ptr) -> !llvm.ptr, f32
+    %18 = llvm.ptrtoint %17 : !llvm.ptr to i64
+    %19 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
+    %20 = llvm.insertvalue %18, %19[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    %21 = llvm.mlir.constant(20180515 : i32) : i32
+    %22 = llvm.insertvalue %21, %20[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    %23 = llvm.mlir.constant(0 : i32) : i32
+    %24 = llvm.trunc %23 : i32 to i8
+    %25 = llvm.insertvalue %24, %22[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    %26 = llvm.trunc %16 : i32 to i8
+    %27 = llvm.insertvalue %26, %25[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    %28 = llvm.mlir.constant(2 : i32) : i32
+    %29 = llvm.trunc %28 : i32 to i8
+    %30 = llvm.insertvalue %29, %27[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    %31 = llvm.insertvalue %24, %30[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    %32 = llvm.insertvalue %15, %31[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    llvm.store %32, %6 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
+    %33 = llvm.load %6 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
+    llvm.store %33, %14 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
+    %34 = llvm.mlir.addressof @_QFEsect_arr : !llvm.ptr
+    %35 = llvm.mul %18, %7  : i64
+    %36 = llvm.call @malloc(%35) {operandSegmentSizes = array<i32: 0, 1>, uniq_name = "_QFEfull_arr.alloc"} : (i64) -> !llvm.ptr
+    %37 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %38 = llvm.insertvalue %18, %37[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %39 = llvm.insertvalue %21, %38[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %40 = llvm.trunc %0 : i32 to i8
+    %41 = llvm.insertvalue %40, %39[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %42 = llvm.insertvalue %26, %41[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %43 = llvm.insertvalue %29, %42[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %44 = llvm.insertvalue %24, %43[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %45 = llvm.insertvalue %13, %44[7, 0, 0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %46 = llvm.insertvalue %7, %45[7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %47 = llvm.insertvalue %18, %46[7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %48 = llvm.insertvalue %36, %47[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    llvm.store %48, %5 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+    %49 = llvm.load %5 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    llvm.store %49, %12 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+    %50 = llvm.getelementptr %15[1] : (!llvm.ptr) -> !llvm.ptr, i32
+    %51 = llvm.ptrtoint %50 : !llvm.ptr to i64
+    %52 = llvm.mul %51, %7  : i64
+    %53 = llvm.call @malloc(%52) {operandSegmentSizes = array<i32: 0, 1>, uniq_name = "_QFEsect_arr.alloc"} : (i64) -> !llvm.ptr
+    %54 = llvm.mlir.constant(9 : i32) : i32
+    %55 = llvm.insertvalue %51, %37[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %56 = llvm.insertvalue %21, %55[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %57 = llvm.insertvalue %40, %56[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %58 = llvm.trunc %54 : i32 to i8
+    %59 = llvm.insertvalue %58, %57[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %60 = llvm.insertvalue %29, %59[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %61 = llvm.insertvalue %24, %60[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %62 = llvm.insertvalue %13, %61[7, 0, 0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %63 = llvm.insertvalue %7, %62[7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %64 = llvm.insertvalue %51, %63[7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %65 = llvm.insertvalue %53, %64[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    llvm.store %65, %4 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+    %66 = llvm.load %4 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    llvm.store %66, %34 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+    %67 = llvm.call @malloc(%18) {in_type = f32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFEscalar.alloc"} : (i64) -> !llvm.ptr
+    %68 = llvm.insertvalue %67, %31[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+    llvm.store %68, %3 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
+    %69 = llvm.load %3 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
+    llvm.store %69, %14 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
+    %70 = llvm.load %12 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    llvm.store %70, %2 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+    %71 = llvm.getelementptr %2[0, 7, %11, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %72 = llvm.load %71 : !llvm.ptr -> i64
+    %73 = llvm.getelementptr %2[0, 7, %11, 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %74 = llvm.load %73 : !llvm.ptr -> i64
+    %75 = llvm.getelementptr %2[0, 7, %11, 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %76 = llvm.load %75 : !llvm.ptr -> i64
+    %77 = llvm.sub %74, %10  : i64
+    %78 = omp.bounds lower_bound(%11 : i64) upper_bound(%77 : i64) extent(%74 : i64) stride(%76 : i64) start_idx(%72 : i64) {stride_in_bytes = true}
+    %79 = llvm.getelementptr %12[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %80 = omp.map_info var_ptr(%79 : !llvm.ptr, f32) map_clauses(tofrom) capture(ByRef) bounds(%78) -> !llvm.ptr {name = "full_arr"}
+    %81 = omp.map_info var_ptr(%12 : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>) map_clauses(tofrom) capture(ByRef) members(%80 : !llvm.ptr) -> !llvm.ptr {name = "full_arr"}
+    %82 = llvm.load %34 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    llvm.store %82, %1 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+    %83 = llvm.getelementptr %1[0, 7, %11, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %84 = llvm.load %83 : !llvm.ptr -> i64
+    %85 = llvm.getelementptr %1[0, 7, %11, 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %86 = llvm.load %85 : !llvm.ptr -> i64
+    %87 = llvm.getelementptr %1[0, 7, %11, 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %88 = llvm.load %87 : !llvm.ptr -> i64
+    %89 = llvm.sub %9, %84  : i64
+    %90 = llvm.sub %8, %84  : i64
+    %91 = omp.bounds lower_bound(%89 : i64) upper_bound(%90 : i64) extent(%86 : i64) stride(%88 : i64) start_idx(%84 : i64) {stride_in_bytes = true}
+    %92 = llvm.getelementptr %34[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %93 = omp.map_info var_ptr(%92 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) bounds(%91) -> !llvm.ptr {name = "sect_arr(2:5)"}
+    %94 = omp.map_info var_ptr(%34 : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>) map_clauses(tofrom) capture(ByRef) members(%93 : !llvm.ptr) -> !llvm.ptr {name = "sect_arr(2:5)"}
+    %95 = llvm.getelementptr %14[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
+    %96 = omp.map_info var_ptr(%95 : !llvm.ptr, f32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "scalar"}
+    %97 = omp.map_info var_ptr(%14 : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>) map_clauses(tofrom) capture(ByRef) members(%96 : !llvm.ptr) -> !llvm.ptr {name = "scalar"}
+    omp.target map_entries(%81 -> %arg0, %94 -> %arg1, %97 -> %arg2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {
+    ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr):
+      %98 = llvm.mlir.constant(1 : i32) : i32
+      %99 = llvm.alloca %98 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+      %100 = llvm.alloca %98 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+      %101 = llvm.alloca %98 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
+      %102 = llvm.mlir.constant(13 : i32) : i32
+      %103 = llvm.mlir.constant(0 : index) : i64
+      %104 = llvm.mlir.constant(3 : index) : i64
+      %105 = llvm.mlir.constant(1 : index) : i64
+      %106 = llvm.mlir.constant(1.000000e+00 : f32) : f32
+      %107 = llvm.load %arg0 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      llvm.store %107, %101 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+      %108 = llvm.getelementptr %101[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %109 = llvm.load %108 : !llvm.ptr -> !llvm.ptr
+      %110 = llvm.getelementptr %101[0, 7, %103, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %111 = llvm.load %110 : !llvm.ptr -> i64
+      %112 = llvm.getelementptr %101[0, 7, %103, 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %113 = llvm.load %112 : !llvm.ptr -> i64
+      %114 = llvm.getelementptr %101[0, 7, %103, 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %115 = llvm.mlir.constant(1 : i64) : i64
+      %116 = llvm.mlir.constant(0 : i64) : i64
+      %117 = llvm.sub %105, %111  : i64
+      %118 = llvm.mul %117, %115  : i64
+      %119 = llvm.mul %118, %115  : i64
+      %120 = llvm.add %119, %116  : i64
+      %121 = llvm.getelementptr %109[%120] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+      llvm.store %106, %121 : f32, !llvm.ptr
+      %122 = llvm.load %arg1 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      llvm.store %122, %100 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+      %123 = llvm.getelementptr %100[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %124 = llvm.load %123 : !llvm.ptr -> !llvm.ptr
+      %125 = llvm.getelementptr %100[0, 7, %103, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %126 = llvm.load %125 : !llvm.ptr -> i64
+      %127 = llvm.getelementptr %100[0, 7, %103, 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %128 = llvm.load %127 : !llvm.ptr -> i64
+      %129 = llvm.getelementptr %100[0, 7, %103, 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+      %130 = llvm.sub %104, %126  : i64
+      %131 = llvm.mul %130, %115  : i64
+      %132 = llvm.mul %131, %115  : i64
+      %133 = llvm.add %132, %116  : i64
+      %134 = llvm.getelementptr %124[%133] : (!llvm.ptr, i64) -> !llvm.ptr, i32
+      llvm.store %98, %134 : i32, !llvm.ptr
+      %135 = llvm.sitofp %98 : i32 to f32
+      %136 = llvm.alloca %115 x f32 : (i64) -> !llvm.ptr
+      llvm.store %135, %136 : f32, !llvm.ptr
+      %137 = llvm.mlir.constant(27 : i32) : i32
+      %138 = llvm.mlir.zero : !llvm.ptr
+      %139 = llvm.getelementptr %138[1] : (!llvm.ptr) -> !llvm.ptr, f32
+      %140 = llvm.ptrtoint %139 : !llvm.ptr to i64
+      %141 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
+      %142 = llvm.insertvalue %140, %141[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      %143 = llvm.mlir.constant(20180515 : i32) : i32
+      %144 = llvm.insertvalue %143, %142[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      %145 = llvm.mlir.constant(0 : i32) : i32
+      %146 = llvm.trunc %145 : i32 to i8
+      %147 = llvm.insertvalue %146, %144[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      %148 = llvm.trunc %137 : i32 to i8
+      %149 = llvm.insertvalue %148, %147[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      %150 = llvm.insertvalue %146, %149[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      %151 = llvm.insertvalue %146, %150[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      %152 = llvm.insertvalue %136, %151[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> 
+      llvm.store %152, %99 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
+      %153 = llvm.mlir.addressof @_QQclX6606f061cfd55e600f1165aa97a647a5 : !llvm.ptr
+      %154 = llvm.call @_FortranAAssign(%arg2, %99, %153, %102) : (!llvm.ptr, !llvm.ptr, !llvm.ptr, i32) -> !llvm.struct<()>
+      omp.terminator
+    }
+    llvm.return
+  }
+  llvm.mlir.global internal @_QFEfull_arr() {addr_space = 0 : i32} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {
+    %0 = llvm.mlir.constant(0 : index) : i64
+    %1 = llvm.mlir.zero : !llvm.ptr
+    %2 = llvm.mlir.constant(27 : i32) : i32
+    %3 = llvm.getelementptr %1[1] : (!llvm.ptr) -> !llvm.ptr, f32
+    %4 = llvm.ptrtoint %3 : !llvm.ptr to i64
+    %5 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %7 = llvm.mlir.constant(20180515 : i32) : i32
+    %8 = llvm.insertvalue %7, %6[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %9 = llvm.mlir.constant(1 : i32) : i32
+    %10 = llvm.trunc %9 : i32 to i8
+    %11 = llvm.insertvalue %10, %8[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %12 = llvm.trunc %2 : i32 to i8
+    %13 = llvm.insertvalue %12, %11[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %14 = llvm.mlir.constant(2 : i32) : i32
+    %15 = llvm.trunc %14 : i32 to i8
+    %16 = llvm.insertvalue %15, %13[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %17 = llvm.mlir.constant(0 : i32) : i32
+    %18 = llvm.trunc %17 : i32 to i8
+    %19 = llvm.insertvalue %18, %16[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %20 = llvm.mlir.constant(1 : i64) : i64
+    %21 = llvm.insertvalue %20, %19[7, 0, 0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %22 = llvm.insertvalue %0, %21[7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %23 = llvm.insertvalue %4, %22[7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %24 = llvm.insertvalue %1, %23[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    llvm.return %24 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+  }
+  llvm.mlir.global internal @_QFEsect_arr() {addr_space = 0 : i32} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {
+    %0 = llvm.mlir.constant(0 : index) : i64
+    %1 = llvm.mlir.zero : !llvm.ptr
+    %2 = llvm.mlir.constant(9 : i32) : i32
+    %3 = llvm.getelementptr %1[1] : (!llvm.ptr) -> !llvm.ptr, i32
+    %4 = llvm.ptrtoint %3 : !llvm.ptr to i64
+    %5 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+    %6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %7 = llvm.mlir.constant(20180515 : i32) : i32
+    %8 = llvm.insertvalue %7, %6[2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %9 = llvm.mlir.constant(1 : i32) : i32
+    %10 = llvm.trunc %9 : i32 to i8
+    %11 = llvm.insertvalue %10, %8[3] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %12 = llvm.trunc %2 : i32 to i8
+    %13 = llvm.insertvalue %12, %11[4] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %14 = llvm.mlir.constant(2 : i32) : i32
+    %15 = llvm.trunc %14 : i32 to i8
+    %16 = llvm.insertvalue %15, %13[5] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %17 = llvm.mlir.constant(0 : i32) : i32
+    %18 = llvm.trunc %17 : i32 to i8
+    %19 = llvm.insertvalue %18, %16[6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %20 = llvm.mlir.constant(1 : i64) : i64
+    %21 = llvm.insertvalue %20, %19[7, 0, 0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %22 = llvm.insertvalue %0, %21[7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %23 = llvm.insertvalue %4, %22[7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    %24 = llvm.insertvalue %1, %23[0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> 
+    llvm.return %24 : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+  }
+  llvm.mlir.global linkonce constant @_QQclX6606f061cfd55e600f1165aa97a647a5() comdat(@__llvm_comdat::@_QQclX6606f061cfd55e600f1165aa97a647a5) {addr_space = 0 : i32} : !llvm.array<97 x i8> {
+    %0 = llvm.mlir.constant("/home/agozillo/git/flang-dev/work-dir/declare-target-map/omptarget-fortran-allocatables-test.f90\00") : !llvm.array<97 x i8>
+    llvm.return %0 : !llvm.array<97 x i8>
+  }
+  llvm.comdat @__llvm_comdat {
+    llvm.comdat_selector @_QQclX6606f061cfd55e600f1165aa97a647a5 any
+  }
+  llvm.mlir.global external constant @_QQEnvironmentDefaults() {addr_space = 0 : i32} : !llvm.ptr {
+    %0 = llvm.mlir.zero : !llvm.ptr
+    llvm.return %0 : !llvm.ptr
+  }
+  llvm.func @_FortranAAssign(!llvm.ptr, !llvm.ptr, !llvm.ptr, i32) -> !llvm.struct<()> attributes {sym_visibility = "private"}
+}
+
+// CHECK: @.offload_sizes = private unnamed_addr constant [9 x i64] [i64 0, i64 48, i64 0, i64 0, i64 48, i64 0, i64 0, i64 24, i64 4]
+// CHECK: @.offload_maptypes = private unnamed_addr constant [9 x i64] [i64 32, i64 281474976710657, i64 281474976710675, i64 32, i64 1125899906842625, i64 1125899906842643, i64 32, i64 1970324836974593, i64 1970324836974611]
+// CHECK: @.offload_mapnames = private constant [9 x ptr] [ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}, ptr @{{.*}}]
+
+// CHECK: define void @_QQmain()
+
+// CHECK: %[[ARR_SECT_ALLOCA:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, align 8
+// CHECK: %[[FULL_ARR_ALLOCA:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, align 8
+// CHECK: %[[SCALAR_ALLOCA:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
+// CHECK: %[[FULL_ARR_SIZE6:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[FULL_ARR_ALLOCA]], i32 0, i32 7, i64 0, i32 1
+// CHECK: %[[FULL_ARR_SIZE5:.*]] = load i64, ptr %[[FULL_ARR_SIZE6]], align 4
+// CHECK: %[[FULL_ARR_SIZE4:.*]] = sub i64 %[[FULL_ARR_SIZE5]], 1
+// CHECK: %[[ARR_SECT_OFFSET4:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[ARR_SECT_ALLOCA]], i32 0, i32 7, i64 0, i32 0
+// CHECK: %[[ARR_SECT_OFFSET3:.*]] = load i64, ptr %[[ARR_SECT_OFFSET4]], align 4
+// CHECK: %[[ARR_SECT_OFFSET2:.*]] = sub i64 2, %[[ARR_SECT_OFFSET3]]
+// CHECK: %[[ARR_SECT_SIZE4:.*]] = sub i64 5, %[[ARR_SECT_OFFSET3]]
+// CHECK: %[[SCALAR_BASE:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %[[SCALAR_ALLOCA]], i32 0, i32 0
+// CHECK: %[[FULL_ARR_SIZE3:.*]] = sub i64 %[[FULL_ARR_SIZE4]], 0
+// CHECK: %[[FULL_ARR_SIZE2:.*]] = add i64 %[[FULL_ARR_SIZE3]], 1
+// CHECK: %[[FULL_ARR_SIZE1:.*]] = mul i64 1, %[[FULL_ARR_SIZE2]]
+// CHECK: %[[FULL_ARR_SIZE:.*]] = mul i64 %[[FULL_ARR_SIZE1]], 4
+// CHECK: %[[ARR_SECT_SIZE3:.*]] = sub i64 %[[ARR_SECT_SIZE4]], %[[ARR_SECT_OFFSET2]]
+// CHECK: %[[ARR_SECT_SIZE2:.*]] = add i64 %[[ARR_SECT_SIZE3]], 1
+// CHECK: %[[ARR_SECT_SIZE1:.*]] = mul i64 1, %[[ARR_SECT_SIZE2]]
+// CHECK: %[[ARR_SECT_SIZE:.*]] = mul i64 %[[ARR_SECT_SIZE1]], 4
+// CHECK: %[[FULL_ARR_DESC_SIZE:.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr getelementptr inbounds ({ ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr @_QFEfull_arr, i32 1) to i64), i64 ptrtoint (ptr @_QFEfull_arr to i64)), ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK: %[[LFULL_ARR:.*]] = load ptr, ptr @_QFEfull_arr, align 8
+// CHECK: %[[FULL_ARR_PTR:.*]] = getelementptr inbounds float, ptr %[[LFULL_ARR]], i64 0
+// CHECK: %[[ARR_SECT_DESC_SIZE:.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr getelementptr inbounds ({ ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr @_QFEsect_arr, i32 1) to i64), i64 ptrtoint (ptr @_QFEsect_arr to i64)), ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK: %[[ARR_SECT_OFFSET1:.*]] = mul i64 %[[ARR_SECT_OFFSET2]], 1
+// CHECK: %[[LARR_SECT:.*]] = load ptr, ptr @_QFEsect_arr, align 8
+// CHECK: %[[ARR_SECT_PTR:.*]] = getelementptr inbounds i32, ptr %[[LARR_SECT]], i64 %[[ARR_SECT_OFFSET1]]
+// CHECK: %[[SCALAR_DESC_SZ4:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %[[SCALAR_ALLOCA]], i32 1
+// CHECK: %[[SCALAR_DESC_SZ3:.*]] = ptrtoint ptr %[[SCALAR_DESC_SZ4]] to i64
+// CHECK: %[[SCALAR_DESC_SZ2:.*]] = ptrtoint ptr %[[SCALAR_ALLOCA]] to i64
+// CHECK: %[[SCALAR_DESC_SZ1:.*]] = sub i64 %[[SCALAR_DESC_SZ3]], %[[SCALAR_DESC_SZ2]]
+// CHECK: %[[SCALAR_DESC_SZ:.*]] = sdiv exact i64 %[[SCALAR_DESC_SZ1]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK: %[[SCALAR_PTR_LOAD:.*]] = load ptr, ptr %[[SCALAR_BASE]], align 8
+// CHECK: %[[SCALAR_PTR:.*]] = getelementptr inbounds float, ptr %[[SCALAR_PTR_LOAD]], i64 0
+
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
+// CHECK: store ptr @_QFEfull_arr, ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 0
+// CHECK: store ptr @_QFEfull_arr, ptr %[[OFFLOADPTRS]], align 8
+
+// CHECK: %[[OFFLOADSIZES:.*]] = getelementptr inbounds [9 x i64], ptr %.offload_sizes, i32 0, i32 0
+// CHECK: store i64 %[[FULL_ARR_DESC_SIZE]], ptr %[[OFFLOADSIZES]], align 8
+
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 1
+// CHECK: store ptr @_QFEfull_arr, ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 1
+// CHECK: store ptr @_QFEfull_arr, ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 2
+// CHECK: store ptr @_QFEfull_arr, ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 2
+// CHECK: store ptr %[[FULL_ARR_PTR]], ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADSIZES:.*]] = getelementptr inbounds [9 x i64], ptr %.offload_sizes, i32 0, i32 2
+// CHECK: store i64 %[[FULL_ARR_SIZE]], ptr %[[OFFLOADSIZES]], align 8
+
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 3
+// CHECK: store ptr @_QFEsect_arr, ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 3
+// CHECK: store ptr @_QFEsect_arr, ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADSIZES:.*]] = getelementptr inbounds [9 x i64], ptr %.offload_sizes, i32 0, i32 3
+// CHECK: store i64 %[[ARR_SECT_DESC_SIZE]], ptr %[[OFFLOADSIZES]], align 8
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 4
+// CHECK: store ptr @_QFEsect_arr, ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 4
+// CHECK: store ptr @_QFEsect_arr, ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 5
+// CHECK: store ptr @_QFEsect_arr, ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 5
+// CHECK: store ptr %[[ARR_SECT_PTR]], ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADSIZES:.*]] = getelementptr inbounds [9 x i64], ptr %.offload_sizes, i32 0, i32 5
+// CHECK: store i64 %[[ARR_SECT_SIZE]], ptr %[[OFFLOADSIZES]], align 8
+
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 6
+// CHECK: store ptr %[[SCALAR_ALLOCA]], ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 6
+// CHECK: store ptr %[[SCALAR_ALLOCA]], ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADSIZES:.*]] = getelementptr inbounds [9 x i64], ptr %.offload_sizes, i32 0, i32 6
+// CHECK: store i64 %[[SCALAR_DESC_SZ]], ptr %[[OFFLOADSIZES]], align 8
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 7
+// CHECK: store ptr %[[SCALAR_ALLOCA]], ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 7
+// CHECK: store ptr %[[SCALAR_ALLOCA]], ptr %[[OFFLOADPTRS]], align 8
+// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 8
+// CHECK: store ptr %[[SCALAR_BASE]], ptr %[[OFFLOADBASEPTRS]], align 8
+// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 8
+// CHECK: store ptr %[[SCALAR_PTR]], ptr %[[OFFLOADPTRS]], align 8
diff --git a/openmp/libomptarget/test/offloading/fortran/allocatable-array-section-1d-bounds.f90 b/openmp/libomptarget/test/offloading/fortran/allocatable-array-section-1d-bounds.f90
new file mode 100644
index 000000000000000..99dbe99d40497c7
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/allocatable-array-section-1d-bounds.f90
@@ -0,0 +1,46 @@
+! Offloading test checking interaction of a
+! two 1-D allocatable arrays with a target region
+! while providing the map upper and lower bounds
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+    integer,  allocatable :: sp_read(:), sp_write(:)
+    allocate(sp_read(10))
+    allocate(sp_write(10))
+
+    do i = 1, 10
+        sp_read(i) = i
+        sp_write(i) = 0
+    end do
+
+    !$omp target map(tofrom:sp_read(2:6)) map(tofrom:sp_write(2:6))
+        do i = 1, 10
+            sp_write(i) = sp_read(i)
+        end do
+    !$omp end target
+
+    do i = 1, 10
+        print *, sp_write(i)
+    end do
+
+    deallocate(sp_read)
+    deallocate(sp_write)
+end program
+
+! CHECK: 0
+! CHECK: 2
+! CHECK: 3
+! CHECK: 4
+! CHECK: 5
+! CHECK: 6
+! CHECK: 0
+! CHECK: 0
+! CHECK: 0
+! CHECK: 0
diff --git a/openmp/libomptarget/test/offloading/fortran/allocatable-array-section-3d-bounds.f90 b/openmp/libomptarget/test/offloading/fortran/allocatable-array-section-3d-bounds.f90
new file mode 100644
index 000000000000000..0786e0fd744e786
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/allocatable-array-section-3d-bounds.f90
@@ -0,0 +1,44 @@
+! Offloading test checking interaction of allocatables
+! with multi-dimensional bounds (3-D in this case) and
+! a target region
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+    integer, allocatable :: inArray(:,:,:)
+    integer, allocatable :: outArray(:,:,:)
+
+    allocate(inArray(3,3,3))
+    allocate(outArray(3,3,3))
+
+    do i = 1, 3
+      do j = 1, 3
+        do k = 1, 3
+            inArray(i, j, k) = 42
+            outArray(i, j, k) = 0
+        end do
+       end do
+    end do
+
+!$omp target map(tofrom:inArray(1:3, 1:3, 2:2), outArray(1:3, 1:3, 1:3))
+    do j = 1, 3
+      do k = 1, 3
+        outArray(k, j, 2) = inArray(k, j, 2)
+      end do
+    end do
+!$omp end target
+
+print *, outArray
+
+deallocate(inArray)
+deallocate(outArray)
+
+end program
+
+! CHECK: 0 0 0 0 0 0 0 0 0 42 42 42 42 42 42 42 42 42 0 0 0 0 0 0 0 0 0
diff --git a/openmp/libomptarget/test/offloading/fortran/allocatable-map-scopes.f95 b/openmp/libomptarget/test/offloading/fortran/allocatable-map-scopes.f95
new file mode 100644
index 000000000000000..bb47d3de96d2a9b
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/allocatable-map-scopes.f95
@@ -0,0 +1,66 @@
+! Offloading test checking interaction of allocatables
+! with target in different scopes
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+module test
+    contains
+  subroutine func_arg(arg_alloc) 
+    integer,  allocatable, intent (inout) :: arg_alloc(:)
+  
+  !$omp target map(tofrom: arg_alloc) 
+    do index = 1, 10
+      arg_alloc(index) = arg_alloc(index) + index
+    end do
+  !$omp end target
+    
+    print *, arg_alloc
+  end subroutine func_arg
+end module 
+  
+subroutine func 
+    integer,  allocatable :: local_alloc(:) 
+    allocate(local_alloc(10))
+  
+  !$omp target map(tofrom: local_alloc) 
+    do index = 1, 10
+      local_alloc(index) = index
+    end do
+  !$omp end target
+  
+    print *, local_alloc
+  
+    deallocate(local_alloc)
+end subroutine func 
+  
+  
+program main 
+  use test 
+  integer,  allocatable :: map_ptr(:) 
+  
+  allocate(map_ptr(10))
+  
+  !$omp target map(tofrom: map_ptr) 
+    do index = 1, 10
+      map_ptr(index) = index
+    end do
+  !$omp end target
+
+   call func 
+
+   print *, map_ptr
+
+   call func_arg(map_ptr)
+
+   deallocate(map_ptr)
+end program 
+
+! CHECK: 1 2 3 4 5 6 7 8 9 10
+! CHECK: 1 2 3 4 5 6 7 8 9 10
+! CHECK: 2 4 6 8 10 12 14 16 18 20
diff --git a/openmp/libomptarget/test/offloading/fortran/failing/target_single_value_allocate.f90 b/openmp/libomptarget/test/offloading/fortran/failing/target_single_value_allocate.f90
new file mode 100644
index 000000000000000..adb5bad4278db68
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/failing/target_single_value_allocate.f90
@@ -0,0 +1,27 @@
+! Offloading test checking interaction of a
+! non-array allocatable with a target region
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+! XFAIL: *
+program main
+    integer, allocatable :: test
+    allocate(test)
+    test = 10
+
+!$omp target map(tofrom:test)
+    test = 50
+!$omp end target
+
+    print *, test
+
+    deallocate(test)
+end program
+
+! CHECK: 
diff --git a/openmp/libomptarget/test/offloading/fortran/pointer-scopes-enter-exit-map.f90 b/openmp/libomptarget/test/offloading/fortran/pointer-scopes-enter-exit-map.f90
new file mode 100644
index 000000000000000..dee75af06927bee
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/pointer-scopes-enter-exit-map.f90
@@ -0,0 +1,83 @@
+! Offloading test checking interaction of pointers
+! with target in different scopes
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+module test
+    contains
+  subroutine func_arg(arg_alloc) 
+    integer,  pointer, intent (inout) :: arg_alloc(:)
+    
+  !$omp target enter data map(alloc: arg_alloc)
+    
+  !$omp target 
+    do index = 1, 10
+      arg_alloc(index) = arg_alloc(index) + index
+    end do
+  !$omp end target
+    
+  !$omp target exit data map(from: arg_alloc)
+  
+  !$omp target exit data map(delete: arg_alloc)
+    
+    print *, arg_alloc
+  end subroutine func_arg
+end module 
+  
+subroutine func 
+  integer,  pointer :: local_alloc(:) 
+  allocate(local_alloc(10))
+
+  !$omp target enter data map(alloc: local_alloc)
+
+  !$omp target 
+    do index = 1, 10
+      local_alloc(index) = index
+    end do
+  !$omp end target
+  
+  !$omp target exit data map(from: local_alloc)
+
+  !$omp target exit data map(delete: local_alloc)
+
+  print *, local_alloc
+    
+  deallocate(local_alloc)
+end subroutine func 
+  
+  
+program main 
+  use test 
+  integer,  pointer :: map_ptr(:) 
+  allocate(map_ptr(10))
+  
+  !$omp target enter data map(alloc: map_ptr)
+
+  !$omp target
+    do index = 1, 10
+      map_ptr(index) = index
+    end do
+  !$omp end target
+  
+  !$omp target exit data map(from: map_ptr)
+
+  !$omp target exit data map(delete: map_ptr)
+
+  call func 
+
+  print *, map_ptr
+
+  call func_arg(map_ptr)
+
+  deallocate(map_ptr)
+end program
+
+! CHECK: 1 2 3 4 5 6 7 8 9 10
+! CHECK: 1 2 3 4 5 6 7 8 9 10
+! CHECK: 2 4 6 8 10 12 14 16 18 20
diff --git a/openmp/libomptarget/test/offloading/fortran/pointer-target-array-section-3d-bounds.f90 b/openmp/libomptarget/test/offloading/fortran/pointer-target-array-section-3d-bounds.f90
new file mode 100644
index 000000000000000..ff2298cf5dbc9dc
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/pointer-target-array-section-3d-bounds.f90
@@ -0,0 +1,43 @@
+! Offloading test checking interaction of pointer
+! and target with target where 3-D bounds have 
+! been specified
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+    integer, pointer :: inArray(:,:,:)
+    integer, pointer :: outArray(:,:,:)
+    integer, target :: in(3,3,3)
+    integer, target :: out(3,3,3)
+    
+    inArray => in
+    outArray => out
+    
+    do i = 1, 3
+      do j = 1, 3
+        do k = 1, 3
+            inArray(i, j, k) = 42
+            outArray(i, j, k) = 0
+        end do
+       end do
+    end do
+
+!$omp target map(tofrom:inArray(1:3, 1:3, 2:2), outArray(1:3, 1:3, 1:3))
+    do j = 1, 3
+      do k = 1, 3
+        outArray(k, j, 2) = inArray(k, j, 2)
+      end do
+    end do
+!$omp end target
+
+ print *, outArray
+
+end program
+
+! CHECK: 0 0 0 0 0 0 0 0 0 42 42 42 42 42 42 42 42 42 0 0 0 0 0 0 0 0 0
diff --git a/openmp/libomptarget/test/offloading/fortran/pointer-target-map-scopes.f95 b/openmp/libomptarget/test/offloading/fortran/pointer-target-map-scopes.f95
new file mode 100644
index 000000000000000..d9a7000719f0e85
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/pointer-target-map-scopes.f95
@@ -0,0 +1,64 @@
+! Offloading test checking interaction of pointer
+! and target with target across multiple scopes 
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+module test
+    contains
+  subroutine func_arg(arg_alloc) 
+    integer,  pointer, intent (inout) :: arg_alloc(:)
+  
+  !$omp target map(tofrom: arg_alloc) 
+    do index = 1, 10
+      arg_alloc(index) = arg_alloc(index) + index
+    end do
+  !$omp end target
+    
+    print *, arg_alloc
+  end subroutine func_arg
+end module
+  
+subroutine func 
+   integer,  pointer :: local_alloc(:) 
+   integer, target :: b(10)
+   local_alloc => b
+  
+  !$omp target map(tofrom: local_alloc) 
+    do index = 1, 10
+      local_alloc(index) = index
+    end do
+  !$omp end target
+  
+    print *, local_alloc
+  end subroutine func 
+  
+  
+  program main 
+  use test 
+  integer,  pointer :: map_ptr(:) 
+  integer, target :: b(10)
+  
+  map_ptr => b
+  
+  !$omp target map(tofrom: map_ptr) 
+    do index = 1, 10
+      map_ptr(index) = index
+    end do
+  !$omp end target
+  
+   call func 
+
+   print *, map_ptr
+
+   call func_arg(map_ptr)
+end program
+
+!CHECK: 1 2 3 4 5 6 7 8 9 10
+!CHECK: 1 2 3 4 5 6 7 8 9 10
+!CHECK: 2 4 6 8 10 12 14 16 18 20
diff --git a/openmp/libomptarget/test/offloading/fortran/target_enter_exit_allocatables.f90 b/openmp/libomptarget/test/offloading/fortran/target_enter_exit_allocatables.f90
new file mode 100644
index 000000000000000..865be95ba9682d3
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/target_enter_exit_allocatables.f90
@@ -0,0 +1,44 @@
+! Offloading test checking interaction of allocatables
+! with enter, exit and target 
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+    integer, allocatable :: A(:)    
+    allocate(A(10))
+    
+   !$omp target enter data map(alloc: A)
+
+    !$omp target
+        do I = 1, 10
+            A(I) = I
+        end do
+    !$omp end target
+
+    !$omp target exit data map(from: A)
+
+    !$omp target exit data map(delete: A)
+    
+    do i = 1, 10
+        print *, A(i)
+    end do
+    
+    deallocate(A)
+end program
+
+! CHECK: 1
+! CHECK: 2
+! CHECK: 3
+! CHECK: 4
+! CHECK: 5
+! CHECK: 6
+! CHECK: 7
+! CHECK: 8
+! CHECK: 9
+! CHECK: 10
diff --git a/openmp/libomptarget/test/offloading/fortran/target_enter_exit_array.f90 b/openmp/libomptarget/test/offloading/fortran/target_enter_exit_array.f90
new file mode 100644
index 000000000000000..4a9fb6ee177f654
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/target_enter_exit_array.f90
@@ -0,0 +1,41 @@
+! Offloading test checking interaction of fixed size
+! arrays with enter, exit and target 
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+    integer :: A(10)
+    
+   !$omp target enter data map(alloc: A)
+
+    !$omp target
+        do I = 1, 10
+            A(I) = I
+        end do
+    !$omp end target
+
+    !$omp target exit data map(from: A)
+
+    !$omp target exit data map(delete: A)
+    
+    do i = 1, 10
+        print *, A(i)
+    end do
+end program
+
+! CHECK: 1
+! CHECK: 2
+! CHECK: 3
+! CHECK: 4
+! CHECK: 5
+! CHECK: 6
+! CHECK: 7
+! CHECK: 8
+! CHECK: 9
+! CHECK: 10



More information about the flang-commits mailing list