[Openmp-commits] [openmp] 48ffd40 - [Clang][OpenMP] Codegen generation for has_device_addr claues.

Jennifer Yu via Openmp-commits openmp-commits at lists.llvm.org
Tue Sep 20 21:32:13 PDT 2022


Author: Jennifer Yu
Date: 2022-09-20T21:12:30-07:00
New Revision: 48ffd40ba295f2cb194237737d9c378348c7b1c9

URL: https://github.com/llvm/llvm-project/commit/48ffd40ba295f2cb194237737d9c378348c7b1c9
DIFF: https://github.com/llvm/llvm-project/commit/48ffd40ba295f2cb194237737d9c378348c7b1c9.diff

LOG: [Clang][OpenMP] Codegen generation for has_device_addr claues.

This patch add codegen support for the has_device_addr clause. It use
the same logic of is_device_ptr. But passing &var instead pointer to var
to kernal.

Differential Revision: https://reviews.llvm.org/D134268

Added: 
    clang/test/OpenMP/target_has_device_addr_codegen.cpp
    clang/test/OpenMP/target_has_device_addr_codegen_01.cpp
    openmp/libomptarget/test/mapping/has_device_addr.cpp
    openmp/libomptarget/test/mapping/target_has_device_addr.c

Modified: 
    clang/lib/CodeGen/CGOpenMPRuntime.cpp
    clang/lib/Sema/SemaOpenMP.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index dd219cc76d402..6ff36c72e0319 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -7389,6 +7389,13 @@ class MappableExprsHandler {
       SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
       DevPointersMap;
 
+  /// Map between device addr declarations and their expression components.
+  /// The key value for declarations in 'this' is null.
+  llvm::DenseMap<
+      const ValueDecl *,
+      SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
+      HasDevAddrsMap;
+
   /// Map between lambda declarations and their map type.
   llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
 
@@ -8819,6 +8826,10 @@ class MappableExprsHandler {
     for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
       for (auto L : C->component_lists())
         DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
+    // Extract device addr clause information.
+    for (const auto *C : Dir.getClausesOfKind<OMPHasDeviceAddrClause>())
+      for (auto L : C->component_lists())
+        HasDevAddrsMap[std::get<0>(L)].push_back(std::get<1>(L));
     // Extract map information.
     for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
       if (C->getMapType() != OMPC_MAP_to)
@@ -9065,6 +9076,30 @@ class MappableExprsHandler {
       CombinedInfo.Mappers.push_back(nullptr);
       return;
     }
+    if (VD && HasDevAddrsMap.count(VD)) {
+      auto I = HasDevAddrsMap.find(VD);
+      CombinedInfo.Exprs.push_back(VD);
+      Expr *E = nullptr;
+      for (auto &MCL : I->second) {
+        E = MCL.begin()->getAssociatedExpression();
+        break;
+      }
+      llvm::Value *Ptr = nullptr;
+      if (E->isGLValue())
+        Ptr = CGF.EmitLValue(E).getPointer(CGF);
+      else
+        Ptr = CGF.EmitScalarExpr(E);
+      CombinedInfo.BasePointers.emplace_back(Ptr, VD);
+      CombinedInfo.Pointers.push_back(Ptr);
+      CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+          CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
+          /*isSigned=*/true));
+      CombinedInfo.Types.push_back(
+          (Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
+          OMP_MAP_TARGET_PARAM);
+      CombinedInfo.Mappers.push_back(nullptr);
+      return;
+    }
 
     using MapData =
         std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
@@ -9073,14 +9108,19 @@ class MappableExprsHandler {
     SmallVector<MapData, 4> DeclComponentLists;
     // For member fields list in is_device_ptr, store it in
     // DeclComponentLists for generating components info.
+    static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
     auto It = DevPointersMap.find(VD);
     if (It != DevPointersMap.end())
-      for (const auto &MCL : It->second) {
-        static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
+      for (const auto &MCL : It->second)
         DeclComponentLists.emplace_back(MCL, OMPC_MAP_to, Unknown,
                                         /*IsImpicit = */ true, nullptr,
                                         nullptr);
-      }
+    auto I = HasDevAddrsMap.find(VD);
+    if (I != HasDevAddrsMap.end())
+      for (const auto &MCL : I->second)
+        DeclComponentLists.emplace_back(MCL, OMPC_MAP_tofrom, Unknown,
+                                        /*IsImpicit = */ true, nullptr,
+                                        nullptr);
     assert(CurDir.is<const OMPExecutableDirective *>() &&
            "Expect a executable directive");
     const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();

diff  --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index d05380b30e47f..c53253243ab91 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -2093,7 +2093,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
     //
     // =========================================================================
     // | type |  defaultmap   | pvt | first | is_device_ptr |    map   | res.  |
-    // |      |(tofrom:scalar)|     |  pvt  |               |          |       |
+    // |      |(tofrom:scalar)|     |  pvt  |               |has_dv_adr|       |
     // =========================================================================
     // | scl  |               |     |       |       -       |          | bycopy|
     // | scl  |               |  -  |   x   |       -       |     -    | bycopy|
@@ -2154,10 +2154,11 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
          D](OMPClauseMappableExprCommon::MappableExprComponentListRef
                 MapExprComponents,
             OpenMPClauseKind WhereFoundClauseKind) {
-          // Only the map clause information influences how a variable is
-          // captured. E.g. is_device_ptr does not require changing the default
-          // behavior.
-          if (WhereFoundClauseKind != OMPC_map)
+          // Both map and has_device_addr clauses information influences how a
+          // variable is captured. E.g. is_device_ptr does not require changing
+          // the default behavior.
+          if (WhereFoundClauseKind != OMPC_map &&
+              WhereFoundClauseKind != OMPC_has_device_addr)
             return false;
 
           auto EI = MapExprComponents.rbegin();
@@ -23070,13 +23071,17 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
 
     // Store the components in the stack so that they can be used to check
     // against other clauses later on.
+    Expr *Component = SimpleRefExpr;
+    auto *VD = dyn_cast<VarDecl>(D);
+    if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
+               isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
+      Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
     OMPClauseMappableExprCommon::MappableComponent MC(
-        SimpleRefExpr, D, /*IsNonContiguous=*/false);
+        Component, D, /*IsNonContiguous=*/false);
     DSAStack->addMappableExpressionComponents(
         D, MC, /*WhereFoundClauseKind=*/OMPC_has_device_addr);
 
     // Record the expression we've just processed.
-    auto *VD = dyn_cast<VarDecl>(D);
     if (!VD && !CurContext->isDependentContext()) {
       DeclRefExpr *Ref =
           buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);

diff  --git a/clang/test/OpenMP/target_has_device_addr_codegen.cpp b/clang/test/OpenMP/target_has_device_addr_codegen.cpp
new file mode 100644
index 0000000000000..40543655b462d
--- /dev/null
+++ b/clang/test/OpenMP/target_has_device_addr_codegen.cpp
@@ -0,0 +1,1539 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _
+// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+
+// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
+// expected-no-diagnostics
+
+struct ST {
+  int *a;
+};
+typedef int arr[10];
+typedef ST STarr[10];
+struct SA {
+  const int da[5] = { 0 };
+  ST g[10];
+  STarr &rg = g;
+  int i;
+  int &j = i;
+  int *k = &j;
+  int *&z = k;
+  int aa[10];
+  arr &raa = aa;
+  void func(int arg) {
+#pragma omp target has_device_addr(k)
+    {k++;}
+#pragma omp target has_device_addr(z)
+    {z++;}
+#pragma omp target has_device_addr(aa)
+    {aa[0]=1;}
+#pragma omp target has_device_addr(raa)
+    {raa[0] = 10;}
+#pragma omp target has_device_addr(g)
+    {g[0].a= &i;}
+#pragma omp target has_device_addr(da)
+    {int a = da[1];}
+  return;
+ }
+};
+
+struct SB {
+  unsigned A;
+  unsigned B;
+  float Arr[100];
+  float *Ptr;
+  float *foo() {
+    return &Arr[0];
+  }
+};
+
+struct SC {
+  unsigned A : 2;
+  unsigned B : 3;
+  unsigned C;
+  unsigned D;
+  float Arr[100];
+  SB S;
+  SB ArrS[100];
+  SB *PtrS;
+  SB *&RPtrS;
+  float *Ptr;
+
+  SC(SB *&_RPtrS) : RPtrS(_RPtrS) {}
+};
+
+union SD {
+  unsigned A;
+  float B;
+};
+
+struct S1;
+extern S1 a;
+class S2 {
+  mutable int a;
+public:
+  S2():a(0) { }
+  S2(S2 &s2):a(s2.a) { }
+  static float S2s;
+  static const float S2sc;
+};
+const float S2::S2sc = 0;
+const S2 b;
+const S2 ba[5];
+class S3 {
+  int a;
+public:
+  S3():a(0) { }
+  S3(S3 &s3):a(s3.a) { }
+};
+const S3 c;
+const S3 ca[5];
+extern const int f;
+class S4 {
+  int a;
+  S4();
+  S4(const S4 &s4);
+public:
+  S4(int v):a(v) { }
+};
+class S5 {
+  int a;
+  S5():a(0) {}
+  S5(const S5 &s5):a(s5.a) { }
+public:
+  S5(int v):a(v) { }
+};
+
+S3 h;
+#pragma omp threadprivate(h)
+
+typedef struct {
+  int a;
+} S6;
+
+template <typename T>
+T tmain(T argc) {
+  const T da[5] = { 0 };
+  S6 h[10];
+  auto &rh = h;
+  T i;
+  T &j = i;
+  T *k = &j;
+  T *&z = k;
+  T aa[10];
+#pragma omp target has_device_addr(k)
+  {k++;}
+#pragma omp target has_device_addr(z)
+  {z++;}
+#pragma omp target has_device_addr(aa)
+  {T a = aa[0];}
+#pragma omp target has_device_addr(h)
+  {int a = h[0].a;}
+  return 0;
+}
+
+
+int main(int argc, char **argv) {
+  const int da[5] = { 0 };
+  S6 h[10];
+  auto &rh = h;
+  int i;
+  int &j = i;
+  int *k = &j;
+  int *&z = k;
+  int aa[10];
+  auto &raa = aa;
+#pragma omp target has_device_addr(k)
+  {k++;}
+#pragma omp target has_device_addr(z)
+  {z++;}
+#pragma omp target has_device_addr(aa)
+  {aa[0]=1;}
+#pragma omp target has_device_addr(raa)
+  {int a = raa[0];}
+#pragma omp target has_device_addr(h)
+  {int a = h[1].a;}
+#pragma omp target has_device_addr(da[1:3])
+  {int a = da[1];}
+  return tmain<int>(argc) + *tmain<int *>(&argc);
+}
+
+struct SomeKernel {
+  int targetDev;
+  float devPtr;
+  SomeKernel();
+  ~SomeKernel();
+
+  template<unsigned int nRHS>
+  void apply() {
+    #pragma omp target has_device_addr(devPtr) device(targetDev)
+    {
+      devPtr++;
+      targetDev++;
+    }
+  }
+};
+
+void use_template() {
+  SomeKernel aKern;
+  aKern.apply<32>();
+}
+// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init
+// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) @_ZL1b)
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN2S2C1Ev
+// CHECK-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
+// CHECK-NEXT:    store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN2S2C2Ev
+// CHECK-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
+// CHECK-NEXT:    store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    store i32 0, i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
+// CHECK:       arrayctor.loop:
+// CHECK-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
+// CHECK-NEXT:    call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
+// CHECK-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAYCTOR_CUR]], i64 1
+// CHECK-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i64 1, i64 0)
+// CHECK-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
+// CHECK:       arrayctor.cont:
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @_ZL1c)
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN2S3C1Ev
+// CHECK-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
+// CHECK-NEXT:    store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    call void @_ZN2S3C2Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN2S3C2Ev
+// CHECK-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
+// CHECK-NEXT:    store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    store i32 0, i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
+// CHECK:       arrayctor.loop:
+// CHECK-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %class.S3* [ getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
+// CHECK-NEXT:    call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
+// CHECK-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[ARRAYCTOR_CUR]], i64 1
+// CHECK-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S3* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i64 1, i64 0)
+// CHECK-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
+// CHECK:       arrayctor.cont:
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @h)
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@main
+// CHECK-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR2:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
+// CHECK-NEXT:    [[DA:%.*]] = alloca [5 x i32], align 4
+// CHECK-NEXT:    [[H:%.*]] = alloca [10 x %struct.S6], align 4
+// CHECK-NEXT:    [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
+// CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[J:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[K:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[Z:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[AA:%.*]] = alloca [10 x i32], align 4
+// CHECK-NEXT:    [[RAA:%.*]] = alloca [10 x i32]*, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[_TMP13:%.*]] = alloca [10 x i32]*, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS15:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS21:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS27:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
+// CHECK-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
+// CHECK-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
+// CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
+// CHECK-NEXT:    store i32* [[I]], i32** [[J]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
+// CHECK-NEXT:    store i32* [[TMP1]], i32** [[K]], align 8
+// CHECK-NEXT:    store i32** [[K]], i32*** [[Z]], align 8
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[RAA]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32***
+// CHECK-NEXT:    store i32** [[K]], i32*** [[TMP3]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32***
+// CHECK-NEXT:    store i32** [[K]], i32*** [[TMP5]], align 8
+// CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP6]], align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP9]], align 4
+// CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP10]], align 4
+// CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP8]], i8*** [[TMP12]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP13]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP14]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP15]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP16]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP17]], align 8
+// CHECK-NEXT:    [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l145.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
+// CHECK-NEXT:    br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK:       omp_offload.failed:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l145(i32** [[K]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
+// CHECK:       omp_offload.cont:
+// CHECK-NEXT:    [[TMP20:%.*]] = load i32**, i32*** [[Z]], align 8
+// CHECK-NEXT:    store i32** [[TMP20]], i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP21:%.*]] = load i32**, i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP22:%.*]] = load i32**, i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32***
+// CHECK-NEXT:    store i32** [[TMP22]], i32*** [[TMP24]], align 8
+// CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32***
+// CHECK-NEXT:    store i32** [[TMP22]], i32*** [[TMP26]], align 8
+// CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP27]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP30]], align 4
+// CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP31]], align 4
+// CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP28]], i8*** [[TMP32]], align 8
+// CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP29]], i8*** [[TMP33]], align 8
+// CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP34]], align 8
+// CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP35]], align 8
+// CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP36]], align 8
+// CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP37]], align 8
+// CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP38]], align 8
+// CHECK-NEXT:    [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
+// CHECK-NEXT:    [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
+// CHECK-NEXT:    br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
+// CHECK:       omp_offload.failed5:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32** [[TMP21]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
+// CHECK:       omp_offload.cont6:
+// CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to [10 x i32]**
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[TMP42]], align 8
+// CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to [10 x i32]**
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[TMP44]], align 8
+// CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP45]], align 8
+// CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP48]], align 4
+// CHECK-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP49]], align 4
+// CHECK-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP46]], i8*** [[TMP50]], align 8
+// CHECK-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP47]], i8*** [[TMP51]], align 8
+// CHECK-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.7, i32 0, i32 0), i64** [[TMP52]], align 8
+// CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.8, i32 0, i32 0), i64** [[TMP53]], align 8
+// CHECK-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP54]], align 8
+// CHECK-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP55]], align 8
+// CHECK-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP56]], align 8
+// CHECK-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l149.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
+// CHECK-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
+// CHECK-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
+// CHECK:       omp_offload.failed11:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l149([10 x i32]* [[AA]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
+// CHECK:       omp_offload.cont12:
+// CHECK-NEXT:    [[TMP59:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA]], align 8
+// CHECK-NEXT:    store [10 x i32]* [[TMP59]], [10 x i32]** [[_TMP13]], align 8
+// CHECK-NEXT:    [[TMP60:%.*]] = load [10 x i32]*, [10 x i32]** [[_TMP13]], align 8
+// CHECK-NEXT:    [[TMP61:%.*]] = load [10 x i32]*, [10 x i32]** [[_TMP13]], align 8
+// CHECK-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP63:%.*]] = bitcast i8** [[TMP62]] to [10 x i32]**
+// CHECK-NEXT:    store [10 x i32]* [[TMP61]], [10 x i32]** [[TMP63]], align 8
+// CHECK-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP65:%.*]] = bitcast i8** [[TMP64]] to [10 x i32]**
+// CHECK-NEXT:    store [10 x i32]* [[TMP61]], [10 x i32]** [[TMP65]], align 8
+// CHECK-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP66]], align 8
+// CHECK-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS17:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP69]], align 4
+// CHECK-NEXT:    [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP70]], align 4
+// CHECK-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP67]], i8*** [[TMP71]], align 8
+// CHECK-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP68]], i8*** [[TMP72]], align 8
+// CHECK-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.9, i32 0, i32 0), i64** [[TMP73]], align 8
+// CHECK-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i64** [[TMP74]], align 8
+// CHECK-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP75]], align 8
+// CHECK-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP76]], align 8
+// CHECK-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP77]], align 8
+// CHECK-NEXT:    [[TMP78:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS17]])
+// CHECK-NEXT:    [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
+// CHECK-NEXT:    br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED18:%.*]], label [[OMP_OFFLOAD_CONT19:%.*]]
+// CHECK:       omp_offload.failed18:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151([10 x i32]* [[TMP60]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT19]]
+// CHECK:       omp_offload.cont19:
+// CHECK-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP81:%.*]] = bitcast i8** [[TMP80]] to [10 x %struct.S6]**
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP81]], align 8
+// CHECK-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP83:%.*]] = bitcast i8** [[TMP82]] to [10 x %struct.S6]**
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP83]], align 8
+// CHECK-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP84]], align 8
+// CHECK-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP87]], align 4
+// CHECK-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP88]], align 4
+// CHECK-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP85]], i8*** [[TMP89]], align 8
+// CHECK-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP86]], i8*** [[TMP90]], align 8
+// CHECK-NEXT:    [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP91]], align 8
+// CHECK-NEXT:    [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP92]], align 8
+// CHECK-NEXT:    [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP93]], align 8
+// CHECK-NEXT:    [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP94]], align 8
+// CHECK-NEXT:    [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP95]], align 8
+// CHECK-NEXT:    [[TMP96:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS23]])
+// CHECK-NEXT:    [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0
+// CHECK-NEXT:    br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
+// CHECK:       omp_offload.failed24:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153([10 x %struct.S6]* [[H]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT25]]
+// CHECK:       omp_offload.cont25:
+// CHECK-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[DA]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP98:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32**
+// CHECK-NEXT:    store i32* [[ARRAYDECAY]], i32** [[TMP99]], align 8
+// CHECK-NEXT:    [[TMP100:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32**
+// CHECK-NEXT:    store i32* [[ARRAYDECAY]], i32** [[TMP101]], align 8
+// CHECK-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP102]], align 8
+// CHECK-NEXT:    [[TMP103:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP104:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS29:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP105]], align 4
+// CHECK-NEXT:    [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP106]], align 4
+// CHECK-NEXT:    [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP103]], i8*** [[TMP107]], align 8
+// CHECK-NEXT:    [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP104]], i8*** [[TMP108]], align 8
+// CHECK-NEXT:    [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64** [[TMP109]], align 8
+// CHECK-NEXT:    [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i64** [[TMP110]], align 8
+// CHECK-NEXT:    [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP111]], align 8
+// CHECK-NEXT:    [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP112]], align 8
+// CHECK-NEXT:    [[TMP113:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP113]], align 8
+// CHECK-NEXT:    [[TMP114:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS29]])
+// CHECK-NEXT:    [[TMP115:%.*]] = icmp ne i32 [[TMP114]], 0
+// CHECK-NEXT:    br i1 [[TMP115]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]]
+// CHECK:       omp_offload.failed30:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155([5 x i32]* [[DA]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT31]]
+// CHECK:       omp_offload.cont31:
+// CHECK-NEXT:    [[TMP116:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
+// CHECK-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_S0_(i32 noundef signext [[TMP116]])
+// CHECK-NEXT:    [[CALL32:%.*]] = call noundef i32* @_Z5tmainIPiET_S1_(i32* noundef [[ARGC_ADDR]])
+// CHECK-NEXT:    [[TMP117:%.*]] = load i32, i32* [[CALL32]], align 4
+// CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[CALL]], [[TMP117]]
+// CHECK-NEXT:    ret i32 [[ADD]]
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l145
+// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[K:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[K_ADDR:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    store i32** [[K]], i32*** [[K_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32**, i32*** [[K_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 8
+// CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 1
+// CHECK-NEXT:    store i32* [[INCDEC_PTR]], i32** [[TMP0]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147
+// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[Z:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[Z_ADDR:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    store i32** [[Z]], i32*** [[Z_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32**, i32*** [[Z_ADDR]], align 8
+// CHECK-NEXT:    store i32** [[TMP0]], i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
+// CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
+// CHECK-NEXT:    store i32* [[INCDEC_PTR]], i32** [[TMP1]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l149
+// CHECK-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[AA:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[AA_ADDR:%.*]] = alloca [10 x i32]*, align 8
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[AA_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[AA_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 0
+// CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151
+// CHECK-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[RAA:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[RAA_ADDR:%.*]] = alloca [10 x i32]*, align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca [10 x i32]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store [10 x i32]* [[RAA]], [10 x i32]** [[RAA_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA_ADDR]], align 8
+// CHECK-NEXT:    store [10 x i32]* [[TMP0]], [10 x i32]** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP1]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    store i32 [[TMP2]], i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153
+// CHECK-SAME: ([10 x %struct.S6]* noundef nonnull align 4 dereferenceable(40) [[H:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[H_ADDR:%.*]] = alloca [10 x %struct.S6]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[H_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x %struct.S6]*, [10 x %struct.S6]** [[H_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[TMP0]], i64 0, i64 1
+// CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A1]], align 4
+// CHECK-NEXT:    store i32 [[TMP1]], i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
+// CHECK-SAME: ([5 x i32]* noundef nonnull align 4 dereferenceable(20) [[DA:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DA_ADDR:%.*]] = alloca [5 x i32]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store [5 x i32]* [[DA]], [5 x i32]** [[DA_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [5 x i32]*, [5 x i32]** [[DA_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[TMP0]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    store i32 [[TMP1]], i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_Z5tmainIiET_S0_
+// CHECK-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[DA:%.*]] = alloca [5 x i32], align 4
+// CHECK-NEXT:    [[H:%.*]] = alloca [10 x %struct.S6], align 4
+// CHECK-NEXT:    [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
+// CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[J:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[K:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[Z:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[AA:%.*]] = alloca [10 x i32], align 4
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
+// CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
+// CHECK-NEXT:    store i32* [[I]], i32** [[J]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
+// CHECK-NEXT:    store i32* [[TMP1]], i32** [[K]], align 8
+// CHECK-NEXT:    store i32** [[K]], i32*** [[Z]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32***
+// CHECK-NEXT:    store i32** [[K]], i32*** [[TMP3]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32***
+// CHECK-NEXT:    store i32** [[K]], i32*** [[TMP5]], align 8
+// CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP6]], align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP9]], align 4
+// CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP10]], align 4
+// CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP8]], i8*** [[TMP12]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.15, i32 0, i32 0), i64** [[TMP13]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i64** [[TMP14]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP15]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP16]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP17]], align 8
+// CHECK-NEXT:    [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l123.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
+// CHECK-NEXT:    br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK:       omp_offload.failed:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l123(i32** [[K]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
+// CHECK:       omp_offload.cont:
+// CHECK-NEXT:    [[TMP20:%.*]] = load i32**, i32*** [[Z]], align 8
+// CHECK-NEXT:    store i32** [[TMP20]], i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP21:%.*]] = load i32**, i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP22:%.*]] = load i32**, i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32***
+// CHECK-NEXT:    store i32** [[TMP22]], i32*** [[TMP24]], align 8
+// CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32***
+// CHECK-NEXT:    store i32** [[TMP22]], i32*** [[TMP26]], align 8
+// CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP27]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP30]], align 4
+// CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP31]], align 4
+// CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP28]], i8*** [[TMP32]], align 8
+// CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP29]], i8*** [[TMP33]], align 8
+// CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64** [[TMP34]], align 8
+// CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i64** [[TMP35]], align 8
+// CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP36]], align 8
+// CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP37]], align 8
+// CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP38]], align 8
+// CHECK-NEXT:    [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l125.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
+// CHECK-NEXT:    [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
+// CHECK-NEXT:    br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
+// CHECK:       omp_offload.failed5:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l125(i32** [[TMP21]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
+// CHECK:       omp_offload.cont6:
+// CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to [10 x i32]**
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[TMP42]], align 8
+// CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to [10 x i32]**
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[TMP44]], align 8
+// CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP45]], align 8
+// CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP48]], align 4
+// CHECK-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP49]], align 4
+// CHECK-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP46]], i8*** [[TMP50]], align 8
+// CHECK-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP47]], i8*** [[TMP51]], align 8
+// CHECK-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64** [[TMP52]], align 8
+// CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i64** [[TMP53]], align 8
+// CHECK-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP54]], align 8
+// CHECK-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP55]], align 8
+// CHECK-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP56]], align 8
+// CHECK-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l127.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
+// CHECK-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
+// CHECK-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
+// CHECK:       omp_offload.failed11:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l127([10 x i32]* [[AA]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
+// CHECK:       omp_offload.cont12:
+// CHECK-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP60:%.*]] = bitcast i8** [[TMP59]] to [10 x %struct.S6]**
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP60]], align 8
+// CHECK-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to [10 x %struct.S6]**
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP62]], align 8
+// CHECK-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP63]], align 8
+// CHECK-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS16:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP66]], align 4
+// CHECK-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP67]], align 4
+// CHECK-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP64]], i8*** [[TMP68]], align 8
+// CHECK-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP65]], i8*** [[TMP69]], align 8
+// CHECK-NEXT:    [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64** [[TMP70]], align 8
+// CHECK-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i64** [[TMP71]], align 8
+// CHECK-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP72]], align 8
+// CHECK-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP73]], align 8
+// CHECK-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP74]], align 8
+// CHECK-NEXT:    [[TMP75:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l129.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]])
+// CHECK-NEXT:    [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0
+// CHECK-NEXT:    br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
+// CHECK:       omp_offload.failed17:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l129([10 x %struct.S6]* [[H]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
+// CHECK:       omp_offload.cont18:
+// CHECK-NEXT:    ret i32 0
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_Z5tmainIPiET_S1_
+// CHECK-SAME: (i32* noundef [[ARGC:%.*]]) #[[ATTR6]] comdat {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[DA:%.*]] = alloca [5 x i32*], align 8
+// CHECK-NEXT:    [[H:%.*]] = alloca [10 x %struct.S6], align 4
+// CHECK-NEXT:    [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
+// CHECK-NEXT:    [[I:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[J:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[K:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[Z:%.*]] = alloca i32***, align 8
+// CHECK-NEXT:    [[AA:%.*]] = alloca [10 x i32*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i32***, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS14:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [1 x i8*], align 8
+// CHECK-NEXT:    store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast [5 x i32*]* [[DA]] to i8*
+// CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 40, i1 false)
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
+// CHECK-NEXT:    store i32** [[I]], i32*** [[J]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[J]], align 8
+// CHECK-NEXT:    store i32** [[TMP1]], i32*** [[K]], align 8
+// CHECK-NEXT:    store i32*** [[K]], i32**** [[Z]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32****
+// CHECK-NEXT:    store i32*** [[K]], i32**** [[TMP3]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32****
+// CHECK-NEXT:    store i32*** [[K]], i32**** [[TMP5]], align 8
+// CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP6]], align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP9]], align 4
+// CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP10]], align 4
+// CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP7]], i8*** [[TMP11]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP8]], i8*** [[TMP12]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.23, i32 0, i32 0), i64** [[TMP13]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.24, i32 0, i32 0), i64** [[TMP14]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP15]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP16]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP17]], align 8
+// CHECK-NEXT:    [[TMP18:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l123.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
+// CHECK-NEXT:    br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK:       omp_offload.failed:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l123(i32*** [[K]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
+// CHECK:       omp_offload.cont:
+// CHECK-NEXT:    [[TMP20:%.*]] = load i32***, i32**** [[Z]], align 8
+// CHECK-NEXT:    store i32*** [[TMP20]], i32**** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP21:%.*]] = load i32***, i32**** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP22:%.*]] = load i32***, i32**** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32****
+// CHECK-NEXT:    store i32*** [[TMP22]], i32**** [[TMP24]], align 8
+// CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32****
+// CHECK-NEXT:    store i32*** [[TMP22]], i32**** [[TMP26]], align 8
+// CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP27]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP30]], align 4
+// CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP31]], align 4
+// CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP28]], i8*** [[TMP32]], align 8
+// CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP29]], i8*** [[TMP33]], align 8
+// CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.25, i32 0, i32 0), i64** [[TMP34]], align 8
+// CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.26, i32 0, i32 0), i64** [[TMP35]], align 8
+// CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP36]], align 8
+// CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP37]], align 8
+// CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP38]], align 8
+// CHECK-NEXT:    [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l125.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
+// CHECK-NEXT:    [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
+// CHECK-NEXT:    br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
+// CHECK:       omp_offload.failed5:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l125(i32*** [[TMP21]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT6]]
+// CHECK:       omp_offload.cont6:
+// CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP42:%.*]] = bitcast i8** [[TMP41]] to [10 x i32*]**
+// CHECK-NEXT:    store [10 x i32*]* [[AA]], [10 x i32*]** [[TMP42]], align 8
+// CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to [10 x i32*]**
+// CHECK-NEXT:    store [10 x i32*]* [[AA]], [10 x i32*]** [[TMP44]], align 8
+// CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP45]], align 8
+// CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS10:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP48]], align 4
+// CHECK-NEXT:    [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP49]], align 4
+// CHECK-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP46]], i8*** [[TMP50]], align 8
+// CHECK-NEXT:    [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP47]], i8*** [[TMP51]], align 8
+// CHECK-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64** [[TMP52]], align 8
+// CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i64** [[TMP53]], align 8
+// CHECK-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP54]], align 8
+// CHECK-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP55]], align 8
+// CHECK-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP56]], align 8
+// CHECK-NEXT:    [[TMP57:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l127.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS10]])
+// CHECK-NEXT:    [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0
+// CHECK-NEXT:    br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]]
+// CHECK:       omp_offload.failed11:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l127([10 x i32*]* [[AA]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT12]]
+// CHECK:       omp_offload.cont12:
+// CHECK-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP60:%.*]] = bitcast i8** [[TMP59]] to [10 x %struct.S6]**
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP60]], align 8
+// CHECK-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP62:%.*]] = bitcast i8** [[TMP61]] to [10 x %struct.S6]**
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[TMP62]], align 8
+// CHECK-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP63]], align 8
+// CHECK-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS16:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
+// CHECK-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP66]], align 4
+// CHECK-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 1
+// CHECK-NEXT:    store i32 1, i32* [[TMP67]], align 4
+// CHECK-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP64]], i8*** [[TMP68]], align 8
+// CHECK-NEXT:    [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP65]], i8*** [[TMP69]], align 8
+// CHECK-NEXT:    [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.29, i32 0, i32 0), i64** [[TMP70]], align 8
+// CHECK-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.30, i32 0, i32 0), i64** [[TMP71]], align 8
+// CHECK-NEXT:    [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP72]], align 8
+// CHECK-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP73]], align 8
+// CHECK-NEXT:    [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP74]], align 8
+// CHECK-NEXT:    [[TMP75:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l129.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS16]])
+// CHECK-NEXT:    [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0
+// CHECK-NEXT:    br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]]
+// CHECK:       omp_offload.failed17:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l129([10 x %struct.S6]* [[H]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT18]]
+// CHECK:       omp_offload.cont18:
+// CHECK-NEXT:    ret i32* null
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l123
+// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[K:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[K_ADDR:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    store i32** [[K]], i32*** [[K_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32**, i32*** [[K_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP0]], align 8
+// CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 1
+// CHECK-NEXT:    store i32* [[INCDEC_PTR]], i32** [[TMP0]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l125
+// CHECK-SAME: (i32** noundef nonnull align 8 dereferenceable(8) [[Z:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[Z_ADDR:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i32**, align 8
+// CHECK-NEXT:    store i32** [[Z]], i32*** [[Z_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32**, i32*** [[Z_ADDR]], align 8
+// CHECK-NEXT:    store i32** [[TMP0]], i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
+// CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
+// CHECK-NEXT:    store i32* [[INCDEC_PTR]], i32** [[TMP1]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l127
+// CHECK-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[AA:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[AA_ADDR:%.*]] = alloca [10 x i32]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[AA_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[AA_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    store i32 [[TMP1]], i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_S0__l129
+// CHECK-SAME: ([10 x %struct.S6]* noundef nonnull align 4 dereferenceable(40) [[H:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[H_ADDR:%.*]] = alloca [10 x %struct.S6]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[H_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x %struct.S6]*, [10 x %struct.S6]** [[H_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[TMP0]], i64 0, i64 0
+// CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A1]], align 4
+// CHECK-NEXT:    store i32 [[TMP1]], i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l123
+// CHECK-SAME: (i32*** noundef nonnull align 8 dereferenceable(8) [[K:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[K_ADDR:%.*]] = alloca i32***, align 8
+// CHECK-NEXT:    store i32*** [[K]], i32**** [[K_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32***, i32**** [[K_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[TMP0]], align 8
+// CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32*, i32** [[TMP1]], i32 1
+// CHECK-NEXT:    store i32** [[INCDEC_PTR]], i32*** [[TMP0]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l125
+// CHECK-SAME: (i32*** noundef nonnull align 8 dereferenceable(8) [[Z:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[Z_ADDR:%.*]] = alloca i32***, align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i32***, align 8
+// CHECK-NEXT:    store i32*** [[Z]], i32**** [[Z_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32***, i32**** [[Z_ADDR]], align 8
+// CHECK-NEXT:    store i32*** [[TMP0]], i32**** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32***, i32**** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[TMP1]], align 8
+// CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32*, i32** [[TMP2]], i32 1
+// CHECK-NEXT:    store i32** [[INCDEC_PTR]], i32*** [[TMP1]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l127
+// CHECK-SAME: ([10 x i32*]* noundef nonnull align 8 dereferenceable(80) [[AA:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[AA_ADDR:%.*]] = alloca [10 x i32*]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    store [10 x i32*]* [[AA]], [10 x i32*]** [[AA_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x i32*]*, [10 x i32*]** [[AA_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32*], [10 x i32*]* [[TMP0]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[ARRAYIDX]], align 8
+// CHECK-NEXT:    store i32* [[TMP1]], i32** [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIPiET_S1__l129
+// CHECK-SAME: ([10 x %struct.S6]* noundef nonnull align 4 dereferenceable(40) [[H:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[H_ADDR:%.*]] = alloca [10 x %struct.S6]*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[H_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load [10 x %struct.S6]*, [10 x %struct.S6]** [[H_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[TMP0]], i64 0, i64 0
+// CHECK-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A1]], align 4
+// CHECK-NEXT:    store i32 [[TMP1]], i32* [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_Z12use_templatev
+// CHECK-SAME: () #[[ATTR6]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[AKERN:%.*]] = alloca [[STRUCT_SOMEKERNEL:%.*]], align 4
+// CHECK-NEXT:    call void @_ZN10SomeKernelC1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
+// CHECK-NEXT:    call void @_ZN10SomeKernel5applyILj32EEEvv(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
+// CHECK-NEXT:    call void @_ZN10SomeKernelD1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]]) #[[ATTR5]]
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN10SomeKernel5applyILj32EEEvv
+// CHECK-SAME: (%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR6]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SomeKernel*, align 8
+// CHECK-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
+// CHECK-NEXT:    store %struct.SomeKernel* [[THIS]], %struct.SomeKernel** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %struct.SomeKernel*, %struct.SomeKernel** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[TARGETDEV:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL:%.*]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TARGETDEV]], align 4
+// CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
+// CHECK-NEXT:    [[DEVPTR:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 1
+// CHECK-NEXT:    [[TARGETDEV2:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, float* [[DEVPTR]], i32 1
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[TARGETDEV2]] to i8*
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[TMP1]] to i8*
+// CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
+// CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
+// CHECK-NEXT:    [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
+// CHECK-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
+// CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP8]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes.31 to i8*), i64 24, i1 false)
+// CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SomeKernel**
+// CHECK-NEXT:    store %struct.SomeKernel* [[THIS1]], %struct.SomeKernel** [[TMP10]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32**
+// CHECK-NEXT:    store i32* [[TARGETDEV2]], i32** [[TMP12]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NEXT:    store i64 [[TMP7]], i64* [[TMP13]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP14]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.SomeKernel**
+// CHECK-NEXT:    store %struct.SomeKernel* [[THIS1]], %struct.SomeKernel** [[TMP16]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to float**
+// CHECK-NEXT:    store float* [[DEVPTR]], float** [[TMP18]], align 8
+// CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-NEXT:    store i8* null, i8** [[TMP19]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.SomeKernel**
+// CHECK-NEXT:    store %struct.SomeKernel* [[THIS1]], %struct.SomeKernel** [[TMP21]], align 8
+// CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
+// CHECK-NEXT:    store i32* [[TARGETDEV2]], i32** [[TMP23]], align 8
+// CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-NEXT:    store i8* null, i8** [[TMP24]], align 8
+// CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
+// CHECK-NEXT:    [[TMP29:%.*]] = sext i32 [[TMP28]] to i64
+// CHECK-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP30]], align 4
+// CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT:    store i32 3, i32* [[TMP31]], align 4
+// CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP25]], i8*** [[TMP32]], align 8
+// CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP26]], i8*** [[TMP33]], align 8
+// CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NEXT:    store i64* [[TMP27]], i64** [[TMP34]], align 8
+// CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.32, i32 0, i32 0), i64** [[TMP35]], align 8
+// CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP36]], align 8
+// CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP37]], align 8
+// CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP38]], align 8
+// CHECK-NEXT:    [[TMP39:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 [[TMP29]], i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN10SomeKernel5applyILj32EEEvv_l168.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NEXT:    [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0
+// CHECK-NEXT:    br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK:       omp_offload.failed:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN10SomeKernel5applyILj32EEEvv_l168(%struct.SomeKernel* [[THIS1]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
+// CHECK:       omp_offload.cont:
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN10SomeKernel5applyILj32EEEvv_l168
+// CHECK-SAME: (%struct.SomeKernel* noundef [[THIS:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SomeKernel*, align 8
+// CHECK-NEXT:    store %struct.SomeKernel* [[THIS]], %struct.SomeKernel** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load %struct.SomeKernel*, %struct.SomeKernel** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[DEVPTR:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL:%.*]], %struct.SomeKernel* [[TMP0]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[DEVPTR]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = fadd float [[TMP1]], 1.000000e+00
+// CHECK-NEXT:    store float [[INC]], float* [[DEVPTR]], align 4
+// CHECK-NEXT:    [[TARGETDEV:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[TMP0]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TARGETDEV]], align 4
+// CHECK-NEXT:    [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
+// CHECK-NEXT:    store i32 [[INC1]], i32* [[TARGETDEV]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_has_device_addr_codegen.cpp
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @__cxx_global_var_init()
+// CHECK-NEXT:    call void @__cxx_global_var_init.1()
+// CHECK-NEXT:    call void @__cxx_global_var_init.2()
+// CHECK-NEXT:    call void @__cxx_global_var_init.3()
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@__tls_init
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8, i8* @__tls_guard, align 1
+// CHECK-NEXT:    [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
+// CHECK-NEXT:    br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !prof [[PROF18:![0-9]+]]
+// CHECK:       init:
+// CHECK-NEXT:    store i8 1, i8* @__tls_guard, align 1
+// CHECK-NEXT:    call void @__cxx_global_var_init.4()
+// CHECK-NEXT:    br label [[EXIT]]
+// CHECK:       exit:
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZTW1h
+// CHECK-SAME: () #[[ATTR10:[0-9]+]] comdat {
+// CHECK-NEXT:    call void @_ZTH1h()
+// CHECK-NEXT:    [[TMP1:%.*]] = call align 4 %class.S3* @llvm.threadlocal.address.p0s_class.S3s(%class.S3* align 4 @h)
+// CHECK-NEXT:    ret %class.S3* [[TMP1]]
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @__tgt_register_requires(i64 1)
+// CHECK-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init
+// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) @_ZL1b)
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S2C1Ev
+// SIMD-ONLY0-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
+// SIMD-ONLY0-NEXT:    store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
+// SIMD-ONLY0-SAME: () #[[ATTR0]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
+// SIMD-ONLY0:       arrayctor.loop:
+// SIMD-ONLY0-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
+// SIMD-ONLY0-NEXT:    call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
+// SIMD-ONLY0-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAYCTOR_CUR]], i64 1
+// SIMD-ONLY0-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S2], [5 x %class.S2]* @_ZL2ba, i64 1, i64 0)
+// SIMD-ONLY0-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
+// SIMD-ONLY0:       arrayctor.cont:
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
+// SIMD-ONLY0-SAME: () #[[ATTR0]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @_ZL1c)
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S3C1Ev
+// SIMD-ONLY0-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
+// SIMD-ONLY0-NEXT:    store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    call void @_ZN2S3C2Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
+// SIMD-ONLY0-SAME: () #[[ATTR0]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
+// SIMD-ONLY0:       arrayctor.loop:
+// SIMD-ONLY0-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %class.S3* [ getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
+// SIMD-ONLY0-NEXT:    call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
+// SIMD-ONLY0-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[ARRAYCTOR_CUR]], i64 1
+// SIMD-ONLY0-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S3* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([5 x %class.S3], [5 x %class.S3]* @_ZL2ca, i64 1, i64 0)
+// SIMD-ONLY0-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
+// SIMD-ONLY0:       arrayctor.cont:
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
+// SIMD-ONLY0-SAME: () #[[ATTR0]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    call void @_ZN2S3C1Ev(%class.S3* noundef nonnull align 4 dereferenceable(4) @h)
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@main
+// SIMD-ONLY0-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR2:[0-9]+]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
+// SIMD-ONLY0-NEXT:    [[DA:%.*]] = alloca [5 x i32], align 4
+// SIMD-ONLY0-NEXT:    [[H:%.*]] = alloca [10 x %struct.S6], align 4
+// SIMD-ONLY0-NEXT:    [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
+// SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[J:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[K:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[Z:%.*]] = alloca i32**, align 8
+// SIMD-ONLY0-NEXT:    [[AA:%.*]] = alloca [10 x i32], align 4
+// SIMD-ONLY0-NEXT:    [[RAA:%.*]] = alloca [10 x i32]*, align 8
+// SIMD-ONLY0-NEXT:    [[TMP:%.*]] = alloca i32**, align 8
+// SIMD-ONLY0-NEXT:    [[_TMP2:%.*]] = alloca [10 x i32]*, align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[A4:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[A7:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    store i32 0, i32* [[RETVAL]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
+// SIMD-ONLY0-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
+// SIMD-ONLY0-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
+// SIMD-ONLY0-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
+// SIMD-ONLY0-NEXT:    store i32* [[I]], i32** [[J]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
+// SIMD-ONLY0-NEXT:    store i32* [[TMP1]], i32** [[K]], align 8
+// SIMD-ONLY0-NEXT:    store i32** [[K]], i32*** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    store [10 x i32]* [[AA]], [10 x i32]** [[RAA]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[K]], align 8
+// SIMD-ONLY0-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
+// SIMD-ONLY0-NEXT:    store i32* [[INCDEC_PTR]], i32** [[K]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    store i32** [[TMP3]], i32*** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32**, i32*** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
+// SIMD-ONLY0-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 1
+// SIMD-ONLY0-NEXT:    store i32* [[INCDEC_PTR1]], i32** [[TMP5]], align 8
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[AA]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA]], align 8
+// SIMD-ONLY0-NEXT:    store [10 x i32]* [[TMP7]], [10 x i32]** [[_TMP2]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP8:%.*]] = load [10 x i32]*, [10 x i32]** [[RAA]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP9:%.*]] = load [10 x i32]*, [10 x i32]** [[_TMP2]], align 8
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP9]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX3]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP10]], i32* [[A]], align 4
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[H]], i64 0, i64 1
+// SIMD-ONLY0-NEXT:    [[A6:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX5]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    [[TMP11:%.*]] = load i32, i32* [[A6]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP11]], i32* [[A4]], align 4
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [5 x i32], [5 x i32]* [[DA]], i64 0, i64 1
+// SIMD-ONLY0-NEXT:    [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP12]], i32* [[A7]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
+// SIMD-ONLY0-NEXT:    [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_S0_(i32 noundef signext [[TMP13]])
+// SIMD-ONLY0-NEXT:    [[CALL9:%.*]] = call noundef i32* @_Z5tmainIPiET_S1_(i32* noundef [[ARGC_ADDR]])
+// SIMD-ONLY0-NEXT:    [[TMP14:%.*]] = load i32, i32* [[CALL9]], align 4
+// SIMD-ONLY0-NEXT:    [[ADD:%.*]] = add nsw i32 [[CALL]], [[TMP14]]
+// SIMD-ONLY0-NEXT:    ret i32 [[ADD]]
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z5tmainIiET_S0_
+// SIMD-ONLY0-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[DA:%.*]] = alloca [5 x i32], align 4
+// SIMD-ONLY0-NEXT:    [[H:%.*]] = alloca [10 x %struct.S6], align 4
+// SIMD-ONLY0-NEXT:    [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
+// SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[J:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[K:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[Z:%.*]] = alloca i32**, align 8
+// SIMD-ONLY0-NEXT:    [[AA:%.*]] = alloca [10 x i32], align 4
+// SIMD-ONLY0-NEXT:    [[TMP:%.*]] = alloca i32**, align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[A2:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [5 x i32]* [[DA]] to i8*
+// SIMD-ONLY0-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 20, i1 false)
+// SIMD-ONLY0-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
+// SIMD-ONLY0-NEXT:    store i32* [[I]], i32** [[J]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[J]], align 8
+// SIMD-ONLY0-NEXT:    store i32* [[TMP1]], i32** [[K]], align 8
+// SIMD-ONLY0-NEXT:    store i32** [[K]], i32*** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[K]], align 8
+// SIMD-ONLY0-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 1
+// SIMD-ONLY0-NEXT:    store i32* [[INCDEC_PTR]], i32** [[K]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load i32**, i32*** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    store i32** [[TMP3]], i32*** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load i32**, i32*** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32**, i32*** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
+// SIMD-ONLY0-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 1
+// SIMD-ONLY0-NEXT:    store i32* [[INCDEC_PTR1]], i32** [[TMP5]], align 8
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[AA]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP7]], i32* [[A]], align 4
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[H]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX3]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A4]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP8]], i32* [[A2]], align 4
+// SIMD-ONLY0-NEXT:    ret i32 0
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z5tmainIPiET_S1_
+// SIMD-ONLY0-SAME: (i32* noundef [[ARGC:%.*]]) #[[ATTR4]] comdat {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[DA:%.*]] = alloca [5 x i32*], align 8
+// SIMD-ONLY0-NEXT:    [[H:%.*]] = alloca [10 x %struct.S6], align 4
+// SIMD-ONLY0-NEXT:    [[RH:%.*]] = alloca [10 x %struct.S6]*, align 8
+// SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[J:%.*]] = alloca i32**, align 8
+// SIMD-ONLY0-NEXT:    [[K:%.*]] = alloca i32**, align 8
+// SIMD-ONLY0-NEXT:    [[Z:%.*]] = alloca i32***, align 8
+// SIMD-ONLY0-NEXT:    [[AA:%.*]] = alloca [10 x i32*], align 8
+// SIMD-ONLY0-NEXT:    [[TMP:%.*]] = alloca i32***, align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = alloca i32*, align 8
+// SIMD-ONLY0-NEXT:    [[A2:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    store i32* [[ARGC]], i32** [[ARGC_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [5 x i32*]* [[DA]] to i8*
+// SIMD-ONLY0-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP0]], i8 0, i64 40, i1 false)
+// SIMD-ONLY0-NEXT:    store [10 x %struct.S6]* [[H]], [10 x %struct.S6]** [[RH]], align 8
+// SIMD-ONLY0-NEXT:    store i32** [[I]], i32*** [[J]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[J]], align 8
+// SIMD-ONLY0-NEXT:    store i32** [[TMP1]], i32*** [[K]], align 8
+// SIMD-ONLY0-NEXT:    store i32*** [[K]], i32**** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[K]], align 8
+// SIMD-ONLY0-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32*, i32** [[TMP2]], i32 1
+// SIMD-ONLY0-NEXT:    store i32** [[INCDEC_PTR]], i32*** [[K]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load i32***, i32**** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    store i32*** [[TMP3]], i32**** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load i32***, i32**** [[Z]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32***, i32**** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = load i32**, i32*** [[TMP5]], align 8
+// SIMD-ONLY0-NEXT:    [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32*, i32** [[TMP6]], i32 1
+// SIMD-ONLY0-NEXT:    store i32** [[INCDEC_PTR1]], i32*** [[TMP5]], align 8
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32*], [10 x i32*]* [[AA]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[ARRAYIDX]], align 8
+// SIMD-ONLY0-NEXT:    store i32* [[TMP7]], i32** [[A]], align 8
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [10 x %struct.S6], [10 x %struct.S6]* [[H]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[A4:%.*]] = getelementptr inbounds [[STRUCT_S6:%.*]], %struct.S6* [[ARRAYIDX3]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    [[TMP8:%.*]] = load i32, i32* [[A4]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP8]], i32* [[A2]], align 4
+// SIMD-ONLY0-NEXT:    ret i32* null
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z12use_templatev
+// SIMD-ONLY0-SAME: () #[[ATTR4]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[AKERN:%.*]] = alloca [[STRUCT_SOMEKERNEL:%.*]], align 4
+// SIMD-ONLY0-NEXT:    call void @_ZN10SomeKernelC1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
+// SIMD-ONLY0-NEXT:    call void @_ZN10SomeKernel5applyILj32EEEvv(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]])
+// SIMD-ONLY0-NEXT:    call void @_ZN10SomeKernelD1Ev(%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[AKERN]]) #[[ATTR7:[0-9]+]]
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN10SomeKernel5applyILj32EEEvv
+// SIMD-ONLY0-SAME: (%struct.SomeKernel* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR4]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SomeKernel*, align 8
+// SIMD-ONLY0-NEXT:    [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    store %struct.SomeKernel* [[THIS]], %struct.SomeKernel** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.SomeKernel*, %struct.SomeKernel** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[TARGETDEV:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL:%.*]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TARGETDEV]], align 4
+// SIMD-ONLY0-NEXT:    store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
+// SIMD-ONLY0-NEXT:    [[DEVPTR:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 1
+// SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load float, float* [[DEVPTR]], align 4
+// SIMD-ONLY0-NEXT:    [[INC:%.*]] = fadd float [[TMP1]], 1.000000e+00
+// SIMD-ONLY0-NEXT:    store float [[INC]], float* [[DEVPTR]], align 4
+// SIMD-ONLY0-NEXT:    [[TARGETDEV2:%.*]] = getelementptr inbounds [[STRUCT_SOMEKERNEL]], %struct.SomeKernel* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TARGETDEV2]], align 4
+// SIMD-ONLY0-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP2]], 1
+// SIMD-ONLY0-NEXT:    store i32 [[INC3]], i32* [[TARGETDEV2]], align 4
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S2C2Ev
+// SIMD-ONLY0-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8
+// SIMD-ONLY0-NEXT:    store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    store i32 0, i32* [[A]], align 4
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN2S3C2Ev
+// SIMD-ONLY0-SAME: (%class.S3* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.S3*, align 8
+// SIMD-ONLY0-NEXT:    store %class.S3* [[THIS]], %class.S3** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %class.S3*, %class.S3** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = getelementptr inbounds [[CLASS_S3:%.*]], %class.S3* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    store i32 0, i32* [[A]], align 4
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_target_has_device_addr_codegen.cpp
+// SIMD-ONLY0-SAME: () #[[ATTR0]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    call void @__cxx_global_var_init()
+// SIMD-ONLY0-NEXT:    call void @__cxx_global_var_init.1()
+// SIMD-ONLY0-NEXT:    call void @__cxx_global_var_init.2()
+// SIMD-ONLY0-NEXT:    call void @__cxx_global_var_init.3()
+// SIMD-ONLY0-NEXT:    call void @__cxx_global_var_init.4()
+// SIMD-ONLY0-NEXT:    ret void
+//

diff  --git a/clang/test/OpenMP/target_has_device_addr_codegen_01.cpp b/clang/test/OpenMP/target_has_device_addr_codegen_01.cpp
new file mode 100644
index 0000000000000..2e84648585fe0
--- /dev/null
+++ b/clang/test/OpenMP/target_has_device_addr_codegen_01.cpp
@@ -0,0 +1,466 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _
+// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+
+// RUN: %clang_cc1 -no-opaque-pointers -DCK1 -verify -fopenmp-simd -fopenmp-version=51 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
+// expected-no-diagnostics
+
+struct S {
+  int a = 0;
+  int *ptr = &a;
+  int &ref = a;
+  int arr[4];
+  S() {}
+  void foo() {
+#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:a])
+    ++a, ++*ptr, ++ref, ++arr[0];
+  }
+};
+
+int main() {
+  float a = 0;
+  float *ptr = &a;
+  float &ref = a;
+  float arr[4];
+  float vla[(int)a];
+  S s;
+  s.foo();
+#pragma omp target has_device_addr(a, ref, ptr[0:4], arr[:(int)a], vla[0])
+  ++a, ++*ptr, ++ref, ++arr[0], ++vla[0];
+  return a;
+}
+
+
+// CHECK-LABEL: define {{[^@]+}}@main
+// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[A:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[PTR:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[REF:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x float], align 4
+// CHECK-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
+// CHECK-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8
+// CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
+// CHECK-NEXT:    store float 0.000000e+00, float* [[A]], align 4
+// CHECK-NEXT:    store float* [[A]], float** [[PTR]], align 8
+// CHECK-NEXT:    store float* [[A]], float** [[REF]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[A]], align 4
+// CHECK-NEXT:    [[CONV:%.*]] = fptosi float [[TMP0]] to i32
+// CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[CONV]] to i64
+// CHECK-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
+// CHECK-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
+// CHECK-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
+// CHECK-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
+// CHECK-NEXT:    call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
+// CHECK-NEXT:    call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
+// CHECK-NEXT:    [[TMP3:%.*]] = load float*, float** [[REF]], align 8
+// CHECK-NEXT:    store float* [[TMP3]], float** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = load float*, float** [[PTR]], align 8
+// CHECK-NEXT:    [[TMP5:%.*]] = load float*, float** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = load float*, float** [[TMP]], align 8
+// CHECK-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to float**
+// CHECK-NEXT:    store float* [[A]], float** [[TMP9]], align 8
+// CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to float**
+// CHECK-NEXT:    store float* [[A]], float** [[TMP11]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP12]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i8** [[TMP13]] to float**
+// CHECK-NEXT:    store float* [[TMP6]], float** [[TMP14]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float**
+// CHECK-NEXT:    store float* [[TMP6]], float** [[TMP16]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-NEXT:    store i8* null, i8** [[TMP17]], align 8
+// CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to float**
+// CHECK-NEXT:    store float* [[TMP7]], float** [[TMP19]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to float**
+// CHECK-NEXT:    store float* [[TMP7]], float** [[TMP21]], align 8
+// CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-NEXT:    store i8* null, i8** [[TMP22]], align 8
+// CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast i8** [[TMP23]] to float**
+// CHECK-NEXT:    store float* [[ARRAYDECAY]], float** [[TMP24]], align 8
+// CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to float**
+// CHECK-NEXT:    store float* [[ARRAYDECAY]], float** [[TMP26]], align 8
+// CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
+// CHECK-NEXT:    store i8* null, i8** [[TMP27]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
+// CHECK-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64*
+// CHECK-NEXT:    store i64 [[TMP1]], i64* [[TMP29]], align 8
+// CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
+// CHECK-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64*
+// CHECK-NEXT:    store i64 [[TMP1]], i64* [[TMP31]], align 8
+// CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
+// CHECK-NEXT:    store i8* null, i8** [[TMP32]], align 8
+// CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast i8** [[TMP33]] to float**
+// CHECK-NEXT:    store float* [[VLA]], float** [[TMP34]], align 8
+// CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
+// CHECK-NEXT:    [[TMP36:%.*]] = bitcast i8** [[TMP35]] to float**
+// CHECK-NEXT:    store float* [[VLA]], float** [[TMP36]], align 8
+// CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
+// CHECK-NEXT:    store i8* null, i8** [[TMP37]], align 8
+// CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP40]], align 4
+// CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT:    store i32 6, i32* [[TMP41]], align 4
+// CHECK-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP38]], i8*** [[TMP42]], align 8
+// CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP39]], i8*** [[TMP43]], align 8
+// CHECK-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NEXT:    store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP44]], align 8
+// CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP45]], align 8
+// CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP46]], align 8
+// CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP47]], align 8
+// CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP48]], align 8
+// CHECK-NEXT:    [[TMP49:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NEXT:    [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0
+// CHECK-NEXT:    br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK:       omp_offload.failed:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27(float* [[A]], float* [[TMP4]], float* [[TMP5]], [4 x float]* [[ARR]], i64 [[TMP1]], float* [[VLA]]) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
+// CHECK:       omp_offload.cont:
+// CHECK-NEXT:    [[TMP51:%.*]] = load float, float* [[A]], align 4
+// CHECK-NEXT:    [[CONV1:%.*]] = fptosi float [[TMP51]] to i32
+// CHECK-NEXT:    store i32 [[CONV1]], i32* [[RETVAL]], align 4
+// CHECK-NEXT:    [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
+// CHECK-NEXT:    call void @llvm.stackrestore(i8* [[TMP52]])
+// CHECK-NEXT:    [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP53]]
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN1SC1Ev
+// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN1S3fooEv
+// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8
+// CHECK-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8
+// CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[REF]], align 8
+// CHECK-NEXT:    [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
+// CHECK-NEXT:    [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
+// CHECK-NEXT:    [[TMP1:%.*]] = getelementptr [4 x i32], [4 x i32]* [[ARR]], i32 1
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[A]] to i8*
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast [4 x i32]* [[TMP1]] to i8*
+// CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
+// CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
+// CHECK-NEXT:    [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
+// CHECK-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
+// CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP8]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.1 to i8*), i64 40, i1 false)
+// CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.S**
+// CHECK-NEXT:    store %struct.S* [[THIS1]], %struct.S** [[TMP10]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32**
+// CHECK-NEXT:    store i32* [[A]], i32** [[TMP12]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NEXT:    store i64 [[TMP7]], i64* [[TMP13]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NEXT:    store i8* null, i8** [[TMP14]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to %struct.S**
+// CHECK-NEXT:    store %struct.S* [[THIS1]], %struct.S** [[TMP16]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32**
+// CHECK-NEXT:    store i32* [[A]], i32** [[TMP18]], align 8
+// CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-NEXT:    store i8* null, i8** [[TMP19]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.S**
+// CHECK-NEXT:    store %struct.S* [[THIS1]], %struct.S** [[TMP21]], align 8
+// CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32**
+// CHECK-NEXT:    store i32* [[TMP0]], i32** [[TMP23]], align 8
+// CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-NEXT:    store i8* null, i8** [[TMP24]], align 8
+// CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.S**
+// CHECK-NEXT:    store %struct.S* [[THIS1]], %struct.S** [[TMP26]], align 8
+// CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32***
+// CHECK-NEXT:    store i32** [[PTR]], i32*** [[TMP28]], align 8
+// CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
+// CHECK-NEXT:    store i8* null, i8** [[TMP29]], align 8
+// CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
+// CHECK-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to %struct.S**
+// CHECK-NEXT:    store %struct.S* [[THIS1]], %struct.S** [[TMP31]], align 8
+// CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to [4 x i32]**
+// CHECK-NEXT:    store [4 x i32]* [[ARR]], [4 x i32]** [[TMP33]], align 8
+// CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
+// CHECK-NEXT:    store i8* null, i8** [[TMP34]], align 8
+// CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
+// CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NEXT:    store i32 1, i32* [[TMP38]], align 4
+// CHECK-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NEXT:    store i32 5, i32* [[TMP39]], align 4
+// CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NEXT:    store i8** [[TMP35]], i8*** [[TMP40]], align 8
+// CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NEXT:    store i8** [[TMP36]], i8*** [[TMP41]], align 8
+// CHECK-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NEXT:    store i64* [[TMP37]], i64** [[TMP42]], align 8
+// CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NEXT:    store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP43]], align 8
+// CHECK-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NEXT:    store i8** null, i8*** [[TMP44]], align 8
+// CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NEXT:    store i8** null, i8*** [[TMP45]], align 8
+// CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NEXT:    store i64 0, i64* [[TMP46]], align 8
+// CHECK-NEXT:    [[TMP47:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NEXT:    [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0
+// CHECK-NEXT:    br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
+// CHECK:       omp_offload.failed:
+// CHECK-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14(%struct.S* [[THIS1]]) #[[ATTR5]]
+// CHECK-NEXT:    br label [[OMP_OFFLOAD_CONT]]
+// CHECK:       omp_offload.cont:
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
+// CHECK-SAME: (float* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], float* noundef [[PTR:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[REF:%.*]], [4 x float]* noundef nonnull align 4 dereferenceable(16) [[ARR:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[VLA1:%.*]]) #[[ATTR4:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[PTR_ADDR:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[REF_ADDR:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[ARR_ADDR:%.*]] = alloca [4 x float]*, align 8
+// CHECK-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[VLA_ADDR2:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    [[TMP:%.*]] = alloca float*, align 8
+// CHECK-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
+// CHECK-NEXT:    store float* [[PTR]], float** [[PTR_ADDR]], align 8
+// CHECK-NEXT:    store float* [[REF]], float** [[REF_ADDR]], align 8
+// CHECK-NEXT:    store [4 x float]* [[ARR]], [4 x float]** [[ARR_ADDR]], align 8
+// CHECK-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
+// CHECK-NEXT:    store float* [[VLA1]], float** [[VLA_ADDR2]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load float*, float** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load float*, float** [[REF_ADDR]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load [4 x float]*, [4 x float]** [[ARR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = load float*, float** [[VLA_ADDR2]], align 8
+// CHECK-NEXT:    store float* [[TMP1]], float** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP5:%.*]] = load float, float* [[TMP0]], align 4
+// CHECK-NEXT:    [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
+// CHECK-NEXT:    store float [[INC]], float* [[TMP0]], align 4
+// CHECK-NEXT:    [[TMP6:%.*]] = load float*, float** [[PTR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
+// CHECK-NEXT:    [[INC3:%.*]] = fadd float [[TMP7]], 1.000000e+00
+// CHECK-NEXT:    store float [[INC3]], float* [[TMP6]], align 4
+// CHECK-NEXT:    [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
+// CHECK-NEXT:    [[INC4:%.*]] = fadd float [[TMP9]], 1.000000e+00
+// CHECK-NEXT:    store float [[INC4]], float* [[TMP8]], align 4
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[TMP2]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    [[INC5:%.*]] = fadd float [[TMP10]], 1.000000e+00
+// CHECK-NEXT:    store float [[INC5]], float* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds float, float* [[TMP4]], i64 0
+// CHECK-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX6]], align 4
+// CHECK-NEXT:    [[INC7:%.*]] = fadd float [[TMP11]], 1.000000e+00
+// CHECK-NEXT:    store float [[INC7]], float* [[ARRAYIDX6]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@_ZN1SC2Ev
+// CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    store i32 0, i32* [[A]], align 8
+// CHECK-NEXT:    [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
+// CHECK-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    store i32* [[A2]], i32** [[PTR]], align 8
+// CHECK-NEXT:    [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
+// CHECK-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
+// CHECK-NEXT:    store i32* [[A3]], i32** [[REF]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN1S3fooEv_l14
+// CHECK-SAME: (%struct.S* noundef [[THIS:%.*]]) #[[ATTR4]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[TMP0]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[A]], align 8
+// CHECK-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
+// CHECK-NEXT:    store i32 [[INC]], i32* [[A]], align 8
+// CHECK-NEXT:    [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[PTR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
+// CHECK-NEXT:    [[INC1:%.*]] = add nsw i32 [[TMP3]], 1
+// CHECK-NEXT:    store i32 [[INC1]], i32* [[TMP2]], align 4
+// CHECK-NEXT:    [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 2
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[REF]], align 8
+// CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
+// CHECK-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP5]], 1
+// CHECK-NEXT:    store i32 [[INC2]], i32* [[TMP4]], align 4
+// CHECK-NEXT:    [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 3
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP6]], 1
+// CHECK-NEXT:    store i32 [[INC3]], i32* [[ARRAYIDX]], align 8
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
+// CHECK-SAME: () #[[ATTR7:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @__tgt_register_requires(i64 1)
+// CHECK-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@main
+// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = alloca float, align 4
+// SIMD-ONLY0-NEXT:    [[PTR:%.*]] = alloca float*, align 8
+// SIMD-ONLY0-NEXT:    [[REF:%.*]] = alloca float*, align 8
+// SIMD-ONLY0-NEXT:    [[ARR:%.*]] = alloca [4 x float], align 4
+// SIMD-ONLY0-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
+// SIMD-ONLY0-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
+// SIMD-ONLY0-NEXT:    [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP:%.*]] = alloca float*, align 8
+// SIMD-ONLY0-NEXT:    store i32 0, i32* [[RETVAL]], align 4
+// SIMD-ONLY0-NEXT:    store float 0.000000e+00, float* [[A]], align 4
+// SIMD-ONLY0-NEXT:    store float* [[A]], float** [[PTR]], align 8
+// SIMD-ONLY0-NEXT:    store float* [[A]], float** [[REF]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load float, float* [[A]], align 4
+// SIMD-ONLY0-NEXT:    [[CONV:%.*]] = fptosi float [[TMP0]] to i32
+// SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = zext i32 [[CONV]] to i64
+// SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
+// SIMD-ONLY0-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
+// SIMD-ONLY0-NEXT:    [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4
+// SIMD-ONLY0-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
+// SIMD-ONLY0-NEXT:    call void @_ZN1SC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
+// SIMD-ONLY0-NEXT:    call void @_ZN1S3fooEv(%struct.S* noundef nonnull align 8 dereferenceable(40) [[S]])
+// SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load float*, float** [[REF]], align 8
+// SIMD-ONLY0-NEXT:    store float* [[TMP3]], float** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load float*, float** [[REF]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load float, float* [[A]], align 4
+// SIMD-ONLY0-NEXT:    [[INC:%.*]] = fadd float [[TMP5]], 1.000000e+00
+// SIMD-ONLY0-NEXT:    store float [[INC]], float* [[A]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = load float*, float** [[PTR]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = load float, float* [[TMP6]], align 4
+// SIMD-ONLY0-NEXT:    [[INC1:%.*]] = fadd float [[TMP7]], 1.000000e+00
+// SIMD-ONLY0-NEXT:    store float [[INC1]], float* [[TMP6]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP8:%.*]] = load float*, float** [[TMP]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP9:%.*]] = load float, float* [[TMP8]], align 4
+// SIMD-ONLY0-NEXT:    [[INC2:%.*]] = fadd float [[TMP9]], 1.000000e+00
+// SIMD-ONLY0-NEXT:    store float [[INC2]], float* [[TMP8]], align 4
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x float], [4 x float]* [[ARR]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4
+// SIMD-ONLY0-NEXT:    [[INC3:%.*]] = fadd float [[TMP10]], 1.000000e+00
+// SIMD-ONLY0-NEXT:    store float [[INC3]], float* [[ARRAYIDX]], align 4
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 0
+// SIMD-ONLY0-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX4]], align 4
+// SIMD-ONLY0-NEXT:    [[INC5:%.*]] = fadd float [[TMP11]], 1.000000e+00
+// SIMD-ONLY0-NEXT:    store float [[INC5]], float* [[ARRAYIDX4]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP12:%.*]] = load float, float* [[A]], align 4
+// SIMD-ONLY0-NEXT:    [[CONV6:%.*]] = fptosi float [[TMP12]] to i32
+// SIMD-ONLY0-NEXT:    store i32 [[CONV6]], i32* [[RETVAL]], align 4
+// SIMD-ONLY0-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
+// SIMD-ONLY0-NEXT:    call void @llvm.stackrestore(i8* [[TMP13]])
+// SIMD-ONLY0-NEXT:    [[TMP14:%.*]] = load i32, i32* [[RETVAL]], align 4
+// SIMD-ONLY0-NEXT:    ret i32 [[TMP14]]
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC1Ev
+// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    call void @_ZN1SC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS1]])
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1S3fooEv
+// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) #[[ATTR3:[0-9]+]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 8
+// SIMD-ONLY0-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
+// SIMD-ONLY0-NEXT:    store i32 [[INC]], i32* [[A]], align 8
+// SIMD-ONLY0-NEXT:    [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
+// SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[PTR]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
+// SIMD-ONLY0-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP2]], 1
+// SIMD-ONLY0-NEXT:    store i32 [[INC2]], i32* [[TMP1]], align 4
+// SIMD-ONLY0-NEXT:    [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
+// SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[REF]], align 8
+// SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
+// SIMD-ONLY0-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP4]], 1
+// SIMD-ONLY0-NEXT:    store i32 [[INC3]], i32* [[TMP3]], align 4
+// SIMD-ONLY0-NEXT:    [[ARR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 3
+// SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+// SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 8
+// SIMD-ONLY0-NEXT:    [[INC4:%.*]] = add nsw i32 [[TMP5]], 1
+// SIMD-ONLY0-NEXT:    store i32 [[INC4]], i32* [[ARRAYIDX]], align 8
+// SIMD-ONLY0-NEXT:    ret void
+//
+//
+// SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SC2Ev
+// SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(40) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
+// SIMD-ONLY0-NEXT:  entry:
+// SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
+// SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
+// SIMD-ONLY0-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    store i32 0, i32* [[A]], align 8
+// SIMD-ONLY0-NEXT:    [[PTR:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 1
+// SIMD-ONLY0-NEXT:    [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    store i32* [[A2]], i32** [[PTR]], align 8
+// SIMD-ONLY0-NEXT:    [[REF:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 2
+// SIMD-ONLY0-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[THIS1]], i32 0, i32 0
+// SIMD-ONLY0-NEXT:    store i32* [[A3]], i32** [[REF]], align 8
+// SIMD-ONLY0-NEXT:    ret void
+//

diff  --git a/openmp/libomptarget/test/mapping/has_device_addr.cpp b/openmp/libomptarget/test/mapping/has_device_addr.cpp
new file mode 100644
index 0000000000000..63fa313cf2780
--- /dev/null
+++ b/openmp/libomptarget/test/mapping/has_device_addr.cpp
@@ -0,0 +1,33 @@
+// RUN: %libomptarget-compilexx-generic -fopenmp-version=51
+// RUN: %libomptarget-run-generic 2>&1 \
+// RUN: | %fcheck-generic
+
+#include <assert.h>
+#include <iostream>
+#include <omp.h>
+
+struct view {
+  const int size = 10;
+  int *data_host;
+  int *data_device;
+  void foo() {
+    std::size_t bytes = size * sizeof(int);
+    const int host_id = omp_get_initial_device();
+    const int device_id = omp_get_default_device();
+    data_host = (int *)malloc(bytes);
+    data_device = (int *)omp_target_alloc(bytes, device_id);
+#pragma omp target teams distribute parallel for has_device_addr(data_device[0])
+    for (int i = 0; i < size; ++i)
+      data_device[i] = i;
+    omp_target_memcpy(data_host, data_device, bytes, 0, 0, host_id, device_id);
+    for (int i = 0; i < size; ++i)
+      assert(data_host[i] == i);
+  }
+};
+
+int main() {
+  view a;
+  a.foo();
+  // CHECK: PASSED
+  printf("PASSED\n");
+}

diff  --git a/openmp/libomptarget/test/mapping/target_has_device_addr.c b/openmp/libomptarget/test/mapping/target_has_device_addr.c
new file mode 100644
index 0000000000000..897b9e4837ef8
--- /dev/null
+++ b/openmp/libomptarget/test/mapping/target_has_device_addr.c
@@ -0,0 +1,102 @@
+// RUN: %libomptarget-compile-generic -fopenmp-version=51
+// RUN: %libomptarget-run-generic 2>&1 \
+// RUN: | %fcheck-generic
+
+// UNSUPPORTED: amdgcn-amd-amdhsa
+// UNSUPPORTED: amdgcn-amd-amdhsa-LTO
+
+#include <omp.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define N 1024
+#define FROM 64
+#define LENGTH 128
+
+void foo() {
+  const int device_id = omp_get_default_device();
+  float *A;
+  A = (float *)omp_target_alloc((FROM + LENGTH) * sizeof(float), device_id);
+
+  float *A_dev = NULL;
+#pragma omp target has_device_addr(A [FROM:LENGTH]) map(A_dev)
+  { A_dev = A; }
+  // CHECK: Success
+  if (A_dev == NULL || A_dev != A)
+    fprintf(stderr, "Failure %p %p \n", A_dev, A);
+  else
+    fprintf(stderr, "Success\n");
+}
+
+void bar() {
+  short x[10];
+  short *xp = &x[0];
+
+  x[1] = 111;
+#pragma omp target data map(tofrom : xp [0:2]) use_device_addr(xp [0:2])
+#pragma omp target has_device_addr(xp [0:2])
+  {
+    xp[1] = 222;
+    // CHECK: 222
+    printf("%d %p\n", xp[1], &xp[1]);
+  }
+  // CHECK: 222
+  printf("%d %p\n", xp[1], &xp[1]);
+}
+
+void moo() {
+  short *b = malloc(sizeof(short));
+  b = b - 1;
+
+  b[1] = 111;
+#pragma omp target data map(tofrom : b[1]) use_device_addr(b[1])
+#pragma omp target has_device_addr(b[1])
+  {
+    b[1] = 222;
+    // CHECK: 222
+    printf("%hd %p %p %p\n", b[1], b, &b[1], &b);
+  }
+  // CHECK: 222
+  printf("%hd %p %p %p\n", b[1], b, &b[1], &b);
+}
+
+void zoo() {
+  short x[10];
+  short *(xp[10]);
+  xp[1] = &x[0];
+  short **xpp = &xp[0];
+
+  x[1] = 111;
+#pragma omp target data map(tofrom : xpp[1][1]) use_device_addr(xpp[1][1])
+#pragma omp target has_device_addr(xpp[1][1])
+  {
+    xpp[1][1] = 222;
+    // CHECK: 222
+    printf("%d %p %p\n", xpp[1][1], xpp[1], &xpp[1][1]);
+  }
+  // CHECK: 222
+  printf("%d %p %p\n", xpp[1][1], xpp[1], &xpp[1][1]);
+}
+void xoo() {
+  short a[10], b[10];
+  a[1] = 111;
+  b[1] = 111;
+#pragma omp target data map(to : a [0:2], b [0:2]) use_device_addr(a, b)
+#pragma omp target has_device_addr(a) has_device_addr(b[0])
+  {
+    a[1] = 222;
+    b[1] = 222;
+    // CHECK: 222 222
+    printf("%hd %hd %p %p %p\n", a[1], b[1], &a, b, &b);
+  }
+  // CHECK:111
+  printf("%hd %hd %p %p %p\n", a[1], b[1], &a, b, &b); // 111 111 p1d p2d p3d
+}
+int main() {
+  foo();
+  bar();
+  moo();
+  zoo();
+  xoo();
+  return 0;
+}


        


More information about the Openmp-commits mailing list