[clang] 8421307 - [Clang] Convert some tests to opaque pointers (NFC)

Nikita Popov via cfe-commits cfe-commits at lists.llvm.org
Thu Feb 16 06:48:20 PST 2023


Author: Nikita Popov
Date: 2023-02-16T15:48:10+01:00
New Revision: 8421307b6b165778260b9814ca4d2256bc3711df

URL: https://github.com/llvm/llvm-project/commit/8421307b6b165778260b9814ca4d2256bc3711df
DIFF: https://github.com/llvm/llvm-project/commit/8421307b6b165778260b9814ca4d2256bc3711df.diff

LOG: [Clang] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
    clang/test/CodeGen/VE/ve-velintrin.c
    clang/test/CodeGen/alias.c
    clang/test/CodeGen/attributes.c
    clang/test/CodeGen/builtin-cpu-is.c
    clang/test/CodeGen/builtins-nvptx-ptx50.cu
    clang/test/CodeGen/global-blocks-win32.c
    clang/test/CodeGen/pr4349.c
    clang/test/CodeGen/volatile-2.c
    clang/test/CodeGen/volatile-complex.c
    clang/test/CodeGenCXX/builtin-bit-cast.cpp
    clang/test/CodeGenCXX/call-conv-thru-alias.cpp
    clang/test/CodeGenCXX/delete-two-arg.cpp
    clang/test/CodeGenCXX/microsoft-abi-structors-alias.cpp
    clang/test/CodeGenCoroutines/coro-always-inline-exp-namespace.cpp
    clang/test/CodeGenCoroutines/coro-always-inline.cpp
    clang/test/CodeGenCoroutines/coro-builtins.c
    clang/test/CodeGenCoroutines/coro-symmetric-transfer-02-exp-namespace.cpp
    clang/test/CodeGenCoroutines/coro-symmetric-transfer-02.cpp
    clang/test/CodeGenObjC/arc-property.m
    clang/test/CodeGenObjC/blocks-2.m
    clang/test/CodeGenObjC/builtin-constant-p.m
    clang/test/CodeGenObjC/class-stubs.m
    clang/test/Index/pipe-size.cl
    clang/test/OpenMP/target_map_codegen_hold.cpp
    clang/test/SemaOpenCL/block-array-capturing.cl

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp b/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
index 252ae36d9d8e5..4c91fefad749d 100644
--- a/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
+++ b/clang/test/CodeGen/RISCV/riscv-v-lifetime.cpp
@@ -1,5 +1,5 @@
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -std=c++11 -triple riscv64 -target-feature +v \
+// RUN: %clang_cc1 -std=c++11 -triple riscv64 -target-feature +v \
 // RUN:   -O1 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s
 
 #include <riscv_vector.h>
@@ -8,16 +8,12 @@ vint32m1_t Baz();
 
 // CHECK-LABEL: @_Z4Testv(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 2 x i32>*, align 8
+// CHECK-NEXT:    [[A:%.*]] = alloca ptr, align 8
 // CHECK-NEXT:    [[REF_TMP:%.*]] = alloca <vscale x 2 x i32>, align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32>** [[A]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 8, i8* [[TMP0]]) #[[ATTR3:[0-9]+]]
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <vscale x 2 x i32>* [[REF_TMP]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[TMP1]]) #[[ATTR3]]
-// CHECK:         [[TMP4:%.*]] = bitcast <vscale x 2 x i32>* [[REF_TMP]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[TMP4]]) #[[ATTR3]]
-// CHECK-NEXT:    [[TMP5:%.*]] = bitcast <vscale x 2 x i32>** [[A]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 8, i8* [[TMP5]]) #[[ATTR3]]
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr [[A]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[REF_TMP]]) #[[ATTR3]]
+// CHECK:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[REF_TMP]]) #[[ATTR3]]
+// CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 8, ptr [[A]]) #[[ATTR3]]
 //
 vint32m1_t Test() {
   const vint32m1_t &a = Baz();

diff  --git a/clang/test/CodeGen/VE/ve-velintrin.c b/clang/test/CodeGen/VE/ve-velintrin.c
index f9942751fff9c..9f68b79e54760 100644
--- a/clang/test/CodeGen/VE/ve-velintrin.c
+++ b/clang/test/CodeGen/VE/ve-velintrin.c
@@ -1,7 +1,7 @@
 // REQUIRES: ve-registered-target
 
 // RUN: %clang_cc1 -S -emit-llvm -triple ve-unknown-linux-gnu \
-// RUN:   -no-opaque-pointers -ffreestanding %s -o - | FileCheck %s
+// RUN:   -ffreestanding %s -o - | FileCheck %s
 
 #include <velintrin.h>
 
@@ -15,574 +15,574 @@ __vm512 vm1_512, vm2_512, vm3_512;
 void __attribute__((noinline))
 test_vld_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vld_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vld.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vld.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vld_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vld_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vld_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vld.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vld.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vld_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldu_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldu_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldu.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldu.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldu_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldu_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldu_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldu_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldunc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldunc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldunc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldunc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldunc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldunc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldlsx_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlsx_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldlsx_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldlsx_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlsx_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldlsx_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldlsxnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlsxnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldlsxnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldlsxnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlsxnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldlsxnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldlzx_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlzx_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldlzx_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldlzx_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlzx_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldlzx_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldlzxnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlzxnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldlzxnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldlzxnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldlzxnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldlzxnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vld2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vld2d_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vld2d_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vld2d_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vld2d_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vld2d_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vld2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vld2dnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vld2dnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vld2dnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vld2dnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vld2dnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldu2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldu2d_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldu2d_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldu2d_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldu2d_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldu2d_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldu2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldu2dnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldu2dnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldu2dnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldu2dnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldu2dnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dsx_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dsx_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldl2dsx_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dsx_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dsx_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldl2dsx_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dsxnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dsxnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldl2dsxnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dsxnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dsxnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldl2dsxnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dzx_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dzx_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldl2dzx_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dzx_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dzx_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldl2dzx_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dzxnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dzxnc_vssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   vr1 = _vel_vldl2dzxnc_vssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vldl2dzxnc_vssvl(char* p, long idx) {
   // CHECK-LABEL: @test_vldl2dzxnc_vssvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %{{.*}}, ptr %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldl2dzxnc_vssvl(idx, p, vr1, 256);
 }
 
 void __attribute__((noinline))
 test_vst_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst_vssl
-  // CHECK: call void @llvm.ve.vl.vst.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vst_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vst_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vst_vssml
-  // CHECK: call void @llvm.ve.vl.vst.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vst_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstnc_vssl
-  // CHECK: call void @llvm.ve.vl.vstnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstnc_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstnc_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstnc_vssml
-  // CHECK: call void @llvm.ve.vl.vstnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstnc_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstot_vssl
-  // CHECK: call void @llvm.ve.vl.vstot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstot_vssml
-  // CHECK: call void @llvm.ve.vl.vstot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstncot_vssl
-  // CHECK: call void @llvm.ve.vl.vstncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstncot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstncot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstncot_vssml
-  // CHECK: call void @llvm.ve.vl.vstncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstncot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstu_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu_vssl
-  // CHECK: call void @llvm.ve.vl.vstu.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstu_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstu_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstu_vssml
-  // CHECK: call void @llvm.ve.vl.vstu.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstu_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstunc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstunc_vssl
-  // CHECK: call void @llvm.ve.vl.vstunc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstunc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstunc_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstunc_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstunc_vssml
-  // CHECK: call void @llvm.ve.vl.vstunc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstunc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstunc_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstuot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstuot_vssl
-  // CHECK: call void @llvm.ve.vl.vstuot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstuot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstuot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstuot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstuot_vssml
-  // CHECK: call void @llvm.ve.vl.vstuot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstuot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstuot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstuncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstuncot_vssl
-  // CHECK: call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstuncot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstuncot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstuncot_vssml
-  // CHECK: call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstuncot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstl_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl_vssl
-  // CHECK: call void @llvm.ve.vl.vstl.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstl_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstl_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstl_vssml
-  // CHECK: call void @llvm.ve.vl.vstl.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstl_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstlnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstlnc_vssl
-  // CHECK: call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstlnc_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstlnc_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstlnc_vssml
-  // CHECK: call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstlnc_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstlot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstlot_vssl
-  // CHECK: call void @llvm.ve.vl.vstlot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstlot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstlot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstlot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstlot_vssml
-  // CHECK: call void @llvm.ve.vl.vstlot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstlot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstlot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstlncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstlncot_vssl
-  // CHECK: call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstlncot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstlncot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstlncot_vssml
-  // CHECK: call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstlncot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vst2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2d_vssl
-  // CHECK: call void @llvm.ve.vl.vst2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vst2d_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vst2d_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vst2d_vssml
-  // CHECK: call void @llvm.ve.vl.vst2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vst2d_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vst2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dnc_vssl
-  // CHECK: call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vst2dnc_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vst2dnc_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dnc_vssml
-  // CHECK: call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vst2dnc_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vst2dot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dot_vssl
-  // CHECK: call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vst2dot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vst2dot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dot_vssml
-  // CHECK: call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vst2dot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vst2dncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dncot_vssl
-  // CHECK: call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vst2dncot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vst2dncot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dncot_vssml
-  // CHECK: call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vst2dncot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2d_vssl
-  // CHECK: call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstu2d_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2d_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2d_vssml
-  // CHECK: call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstu2d_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dnc_vssl
-  // CHECK: call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstu2dnc_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2dnc_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dnc_vssml
-  // CHECK: call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstu2dnc_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2dot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dot_vssl
-  // CHECK: call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstu2dot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2dot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dot_vssml
-  // CHECK: call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstu2dot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2dncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dncot_vssl
-  // CHECK: call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstu2dncot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstu2dncot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dncot_vssml
-  // CHECK: call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstu2dncot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2d_vssl
-  // CHECK: call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstl2d_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2d_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2d_vssml
-  // CHECK: call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstl2d_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dnc_vssl
-  // CHECK: call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstl2dnc_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2dnc_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dnc_vssml
-  // CHECK: call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstl2dnc_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2dot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dot_vssl
-  // CHECK: call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstl2dot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2dot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dot_vssml
-  // CHECK: call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstl2dot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2dncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dncot_vssl
-  // CHECK: call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_vstl2dncot_vssl(vr1, idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_vstl2dncot_vssml(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dncot_vssml
-  // CHECK: call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, ptr %{{.*}}, <256 x i1> %{{.*}}, i32 256)
   _vel_vstl2dncot_vssml(vr1, idx, p, vm1, 256);
 }
 
 void __attribute__((noinline))
 test_pfchv_ssl(char* p, long idx) {
   // CHECK-LABEL: @test_pfchv_ssl
-  // CHECK: call void @llvm.ve.vl.pfchv.ssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.pfchv.ssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_pfchv_ssl(idx, p, 256);
 }
 
 void __attribute__((noinline))
 test_pfchvnc_ssl(char* p, long idx) {
   // CHECK-LABEL: @test_pfchvnc_ssl
-  // CHECK: call void @llvm.ve.vl.pfchvnc.ssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  // CHECK: call void @llvm.ve.vl.pfchvnc.ssl(i64 %{{.*}}, ptr %{{.*}}, i32 256)
   _vel_pfchvnc_ssl(idx, p, 256);
 }
 
@@ -8815,17 +8815,14 @@ test_svob() {
 void __attribute__((noinline))
 test_pack_f32p(float* p1, float* p2) {
   // CHECK-LABEL: @test_pack_f32p
-  // CHECK: %[[VAR1:[A-Za-z0-9.]+]] = bitcast float* %{{.*}} to i8*
-  // CHECK: %[[VAR2:[A-Za-z0-9.]+]] = bitcast float* %{{.*}} to i8*
-  // CHECK-NEXT: call i64 @llvm.ve.vl.pack.f32p(i8* %[[VAR1]], i8* %[[VAR2]])
+  // CHECK: call i64 @llvm.ve.vl.pack.f32p(ptr %{{.*}}, ptr %{{.*}})
   v1 = _vel_pack_f32p(p1, p2);
 }
 
 void __attribute__((noinline))
 test_pack_f32a(float* p) {
   // CHECK-LABEL: @test_pack_f32a
-  // CHECK: %[[VAR3:[A-Za-z0-9.]+]] = bitcast float* %{{.*}} to i8*
-  // CHECK-NEXT: call i64 @llvm.ve.vl.pack.f32a(i8* %[[VAR3]])
+  // CHECK: call i64 @llvm.ve.vl.pack.f32a(ptr %{{.*}})
   v1 = _vel_pack_f32a(p);
 }
 

diff  --git a/clang/test/CodeGen/alias.c b/clang/test/CodeGen/alias.c
index 0897f354fd8f9..bc4167adf53f6 100644
--- a/clang/test/CodeGen/alias.c
+++ b/clang/test/CodeGen/alias.c
@@ -1,8 +1,8 @@
 // REQUIRES: arm-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-pc-linux-gnu -emit-llvm -o - %s | FileCheck -check-prefix=CHECKBASIC %s
-// RUN: %clang_cc1 -no-opaque-pointers -triple armv7a-eabi -mfloat-abi hard -emit-llvm -o - %s | FileCheck -check-prefix=CHECKCC %s
-// RUN: %clang_cc1 -no-opaque-pointers -triple armv7a-eabi -mfloat-abi hard -S -o - %s | FileCheck -check-prefix=CHECKASM %s
-// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64-linux-gnu -emit-llvm -o - %s | FileCheck -check-prefix=CHECKGLOBALS %s
+// RUN: %clang_cc1 -triple i386-pc-linux-gnu -emit-llvm -o - %s | FileCheck -check-prefix=CHECKBASIC %s
+// RUN: %clang_cc1 -triple armv7a-eabi -mfloat-abi hard -emit-llvm -o - %s | FileCheck -check-prefix=CHECKCC %s
+// RUN: %clang_cc1 -triple armv7a-eabi -mfloat-abi hard -S -o - %s | FileCheck -check-prefix=CHECKASM %s
+// RUN: %clang_cc1 -triple aarch64-linux-gnu -emit-llvm -o - %s | FileCheck -check-prefix=CHECKGLOBALS %s
 
 int g0;
 // CHECKBASIC-DAG: @g0 ={{.*}} global i32 0
@@ -27,20 +27,20 @@ const int wacom_usb_ids[] = {1, 1, 2, 3, 5, 8, 13, 0};
 // CHECKASM-DAG: .globl wacom_usb_ids
 // CHECKASM-DAG: .size wacom_usb_ids, 32
 extern const int __mod_usb_device_table __attribute__ ((alias("wacom_usb_ids")));
-// CHECKBASIC-DAG: @__mod_usb_device_table ={{.*}} alias i32, getelementptr inbounds ([8 x i32], [8 x i32]* @wacom_usb_ids, i32 0, i32 0)
+// CHECKBASIC-DAG: @__mod_usb_device_table ={{.*}} alias i32, ptr @wacom_usb_ids
 // CHECKASM-DAG: .globl __mod_usb_device_table
 // CHECKASM-DAG: .set __mod_usb_device_table, wacom_usb_ids
 // CHECKASM-NOT: .size __mod_usb_device_table
 
 extern int g1;
 extern int g1 __attribute((alias("g0")));
-// CHECKBASIC-DAG: @g1 ={{.*}} alias i32, i32* @g0
+// CHECKBASIC-DAG: @g1 ={{.*}} alias i32, ptr @g0
 // CHECKASM-DAG: .globl g1
 // CHECKASM-DAG: .set g1, g0
 // CHECKASM-NOT: .size g1
 
 extern __thread int __libc_errno __attribute__ ((alias ("TL_WITH_ALIAS")));
-// CHECKBASIC-DAG: @__libc_errno ={{.*}} thread_local alias i32, i32* @TL_WITH_ALIAS
+// CHECKBASIC-DAG: @__libc_errno ={{.*}} thread_local alias i32, ptr @TL_WITH_ALIAS
 // CHECKASM-DAG: .globl __libc_errno
 // CHECKASM-DAG: .set __libc_errno, TL_WITH_ALIAS
 // CHECKASM-NOT: .size __libc_errno
@@ -48,10 +48,10 @@ extern __thread int __libc_errno __attribute__ ((alias ("TL_WITH_ALIAS")));
 void f0(void) { }
 extern void f1(void);
 extern void f1(void) __attribute((alias("f0")));
-// CHECKBASIC-DAG: @f1 ={{.*}} alias void (), void ()* @f0
-// CHECKBASIC-DAG: @test8_foo = weak{{.*}} alias void (...), bitcast (void ()* @test8_bar to void (...)*)
-// CHECKBASIC-DAG: @test8_zed ={{.*}} alias void (...), bitcast (void ()* @test8_bar to void (...)*)
-// CHECKBASIC-DAG: @test9_zed ={{.*}} alias void (), void ()* @test9_bar
+// CHECKBASIC-DAG: @f1 ={{.*}} alias void (), ptr @f0
+// CHECKBASIC-DAG: @test8_foo = weak{{.*}} alias void (...), ptr @test8_bar
+// CHECKBASIC-DAG: @test8_zed ={{.*}} alias void (...), ptr @test8_bar
+// CHECKBASIC-DAG: @test9_zed ={{.*}} alias void (), ptr @test9_bar
 // CHECKBASIC: define{{.*}} void @f0() [[NUW:#[0-9]+]] {
 
 // Make sure that aliases cause referenced values to be emitted.
@@ -71,7 +71,7 @@ static int inner(int a) { return 0; }
 static int inner_weak(int a) { return 0; }
 extern __typeof(inner) inner_a __attribute__((alias("inner")));
 static __typeof(inner_weak) inner_weak_a __attribute__((weakref, alias("inner_weak")));
-// CHECKCC: @inner_a ={{.*}} alias i32 (i32), i32 (i32)* @inner
+// CHECKCC: @inner_a ={{.*}} alias i32 (i32), ptr @inner
 // CHECKCC: define internal arm_aapcs_vfpcc i32 @inner(i32 noundef %a) [[NUW:#[0-9]+]] {
 
 int outer(int a) { return inner(a); }
@@ -106,12 +106,12 @@ void test11(void) {}
 static void test11_foo(void) __attribute__((alias("test11")));
 
 // Test that gnu_inline+alias work.
-// CHECKGLOBALS: @test12_alias ={{.*}} alias void (), void ()* @test12
+// CHECKGLOBALS: @test12_alias ={{.*}} alias void (), ptr @test12
 void test12(void) {}
 inline void test12_alias(void) __attribute__((gnu_inline, alias("test12")));
 
 // Test that a non visible (-Wvisibility) type doesn't assert.
-// CHECKGLOBALS: @test13_alias ={{.*}} alias {}, bitcast (void (i32)* @test13 to {}*)
+// CHECKGLOBALS: @test13_alias ={{.*}} alias {}, ptr @test13
 enum a_type { test13_a };
 void test13(enum a_type y) {}
 void test13_alias(enum undeclared_type y) __attribute__((alias ("test13")));

diff  --git a/clang/test/CodeGen/attributes.c b/clang/test/CodeGen/attributes.c
index 530f80975a524..5afef72b747af 100644
--- a/clang/test/CodeGen/attributes.c
+++ b/clang/test/CodeGen/attributes.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -emit-llvm -Wno-strict-prototypes -Wno-incompatible-function-pointer-types -fcf-protection=branch -triple i386-linux-gnu %s -o - | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm -Wno-strict-prototypes -Wno-incompatible-function-pointer-types -fcf-protection=branch -triple i386-linux-gnu %s -o - | FileCheck %s
 
 // CHECK: @t5 = weak{{.*}} global i32 2
 int t5 __attribute__((weak)) = 2;
@@ -25,7 +25,7 @@ int t6 __attribute__((visibility("protected")));
 // CHECK: @t12 ={{.*}} global i32 0, section "SECT"
 int t12 __attribute__((section("SECT")));
 
-// CHECK: @t9 = weak{{.*}} alias void (...), bitcast (void ()* @__t8 to void (...)*)
+// CHECK: @t9 = weak{{.*}} alias void (...), ptr @__t8
 void __t8() {}
 void t9() __attribute__((weak, alias("__t8")));
 
@@ -92,7 +92,7 @@ void (__attribute__((fastcall)) *fptr)(int);
 void t21(void) {
   fptr(10);
 }
-// CHECK: [[FPTRVAR:%[a-z0-9]+]] = load void (i32)*, void (i32)** @fptr
+// CHECK: [[FPTRVAR:%[a-z0-9]+]] = load ptr, ptr @fptr
 // CHECK-NEXT: call x86_fastcallcc void [[FPTRVAR]](i32 inreg noundef 10)
 
 

diff  --git a/clang/test/CodeGen/builtin-cpu-is.c b/clang/test/CodeGen/builtin-cpu-is.c
index 88e8dee302802..dab6d2f9d11ae 100644
--- a/clang/test/CodeGen/builtin-cpu-is.c
+++ b/clang/test/CodeGen/builtin-cpu-is.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-pc-linux-gnu -emit-llvm < %s| FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm < %s| FileCheck %s
 
 // Test that we have the structure definition, the gep offsets, the name of the
 // global, the bit grab, and the icmp correct.
@@ -10,7 +10,7 @@ void intel(void) {
   if (__builtin_cpu_is("intel"))
     a("intel");
 
-  // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 0)
+  // CHECK: [[LOAD:%[^ ]+]] = load i32, ptr @__cpu_model
   // CHECK: = icmp eq i32 [[LOAD]], 1
 }
 
@@ -18,7 +18,7 @@ void amd(void) {
   if (__builtin_cpu_is("amd"))
     a("amd");
 
-  // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 0)
+  // CHECK: [[LOAD:%[^ ]+]] = load i32, ptr @__cpu_model
   // CHECK: = icmp eq i32 [[LOAD]], 2
 }
 
@@ -26,7 +26,7 @@ void atom(void) {
   if (__builtin_cpu_is("atom"))
     a("atom");
 
-  // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 1)
+  // CHECK: [[LOAD:%[^ ]+]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 1)
   // CHECK: = icmp eq i32 [[LOAD]], 1
 }
 
@@ -34,7 +34,7 @@ void amdfam10h(void) {
   if (__builtin_cpu_is("amdfam10h"))
     a("amdfam10h");
 
-  // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 1)
+  // CHECK: [[LOAD:%[^ ]+]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 1)
   // CHECK: = icmp eq i32 [[LOAD]], 4
 }
 
@@ -42,7 +42,7 @@ void barcelona(void) {
   if (__builtin_cpu_is("barcelona"))
     a("barcelona");
 
-  // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 2)
+  // CHECK: [[LOAD:%[^ ]+]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 2)
   // CHECK: = icmp eq i32 [[LOAD]], 4
 }
 
@@ -50,6 +50,6 @@ void nehalem(void) {
   if (__builtin_cpu_is("nehalem"))
     a("nehalem");
 
-  // CHECK: [[LOAD:%[^ ]+]] = load i32, i32* getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, { i32, i32, i32, [1 x i32] }* @__cpu_model, i32 0, i32 2)
+  // CHECK: [[LOAD:%[^ ]+]] = load i32, ptr getelementptr inbounds ({ i32, i32, i32, [1 x i32] }, ptr @__cpu_model, i32 0, i32 2)
   // CHECK: = icmp eq i32 [[LOAD]], 1
 }

diff  --git a/clang/test/CodeGen/builtins-nvptx-ptx50.cu b/clang/test/CodeGen/builtins-nvptx-ptx50.cu
index 569e3fdf07e5d..c297c0a58abf9 100644
--- a/clang/test/CodeGen/builtins-nvptx-ptx50.cu
+++ b/clang/test/CodeGen/builtins-nvptx-ptx50.cu
@@ -1,8 +1,8 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple nvptx64-unknown-unknown -target-cpu sm_60 \
+// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_60 \
 // RUN:            -fcuda-is-device -S -emit-llvm -o - -x cuda %s \
 // RUN:   | FileCheck -check-prefix=CHECK %s
 //
-// RUN: %clang_cc1 -no-opaque-pointers -triple nvptx-unknown-unknown -target-cpu sm_50 \
+// RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_50 \
 // RUN:   -fcuda-is-device -S -o /dev/null -x cuda -verify %s
 
 #define __device__ __attribute__((device))
@@ -17,7 +17,7 @@
 
 // CHECK-LABEL: test_fn
 __device__ void test_fn(double d, double* double_ptr) {
-  // CHECK: atomicrmw fadd double* {{.*}} seq_cst, align 8
+  // CHECK: atomicrmw fadd ptr {{.*}} seq_cst, align 8
   // expected-error at +1 {{'__nvvm_atom_add_gen_d' needs target feature sm_60}}
   __nvvm_atom_add_gen_d(double_ptr, d);
 }

diff  --git a/clang/test/CodeGen/global-blocks-win32.c b/clang/test/CodeGen/global-blocks-win32.c
index 2c4e2a6284481..acae63f7b98bd 100644
--- a/clang/test/CodeGen/global-blocks-win32.c
+++ b/clang/test/CodeGen/global-blocks-win32.c
@@ -1,18 +1,18 @@
-// RUN: %clang_cc1 -no-opaque-pointers -fblocks -triple i386-pc-windows-msvc %s -emit-llvm -o - -fblocks | FileCheck %s
+// RUN: %clang_cc1 -fblocks -triple i386-pc-windows-msvc %s -emit-llvm -o - -fblocks | FileCheck %s
 
 
 int (^x)(void) = ^() { return 21; };
 
 
 // Check that the block literal is emitted with a null isa pointer
-// CHECK: @__block_literal_global = internal global { i8**, i32, i32, i8*, %struct.__block_descriptor* } { i8** null, 
+// CHECK: @__block_literal_global = internal global { ptr, i32, i32, ptr, ptr } { ptr null, 
 
 // Check that _NSConcreteGlobalBlock has the correct dllimport specifier.
-// CHECK: @_NSConcreteGlobalBlock = external dllimport global i8*
+// CHECK: @_NSConcreteGlobalBlock = external dllimport global ptr
 // Check that we create an initialiser pointer in the correct section (early library initialisation).
-// CHECK: @.block_isa_init_ptr = internal constant void ()* @.block_isa_init, section ".CRT$XCLa"
+// CHECK: @.block_isa_init_ptr = internal constant ptr @.block_isa_init, section ".CRT$XCLa"
 
 // Check that we emit an initialiser for it.
 // CHECK: define internal void @.block_isa_init() {
-// CHECK: store i8** @_NSConcreteGlobalBlock, i8*** getelementptr inbounds ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }, { i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global, i32 0, i32 0), align 4
+// CHECK: store ptr @_NSConcreteGlobalBlock, ptr @__block_literal_global, align 4
 

diff  --git a/clang/test/CodeGen/pr4349.c b/clang/test/CodeGen/pr4349.c
index 88a1415691ffa..3bec499e0b3f5 100644
--- a/clang/test/CodeGen/pr4349.c
+++ b/clang/test/CodeGen/pr4349.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s
 // PR 4349
 
 union reg
@@ -16,22 +16,22 @@ struct svar
 {
     void *ptr;
 };
-// CHECK: @svars1 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { i8* bitcast (%struct.cpu* @cpu to i8*) }]
+// CHECK: @svars1 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { ptr @cpu }]
 struct svar svars1[] =
 {
     { &((cpu.pc).w[0]) }
 };
-// CHECK: @svars2 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { i8* getelementptr (i8, i8* bitcast (%struct.cpu* @cpu to i8*), i64 1) }]
+// CHECK: @svars2 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { ptr getelementptr (i8, ptr @cpu, i64 1) }]
 struct svar svars2[] =
 {
     { &((cpu.pc).b[0][1]) }
 };
-// CHECK: @svars3 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { i8* getelementptr (i8, i8* bitcast (%struct.cpu* @cpu to i8*), i64 2) }]
+// CHECK: @svars3 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { ptr getelementptr (i8, ptr @cpu, i64 2) }]
 struct svar svars3[] =
 {
     { &((cpu.pc).w[1]) }
 };
-// CHECK: @svars4 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { i8* getelementptr (i8, i8* bitcast (%struct.cpu* @cpu to i8*), i64 3) }]
+// CHECK: @svars4 = {{(dso_local )?}}global [1 x %struct.svar] [%struct.svar { ptr getelementptr (i8, ptr @cpu, i64 3) }]
 struct svar svars4[] =
 {
     { &((cpu.pc).b[1][1]) }

diff  --git a/clang/test/CodeGen/volatile-2.c b/clang/test/CodeGen/volatile-2.c
index cb8899a737d8a..36f0907fb38d5 100644
--- a/clang/test/CodeGen/volatile-2.c
+++ b/clang/test/CodeGen/volatile-2.c
@@ -1,11 +1,11 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o - %s | FileCheck %s
 
 void test0(void) {
   // CHECK-LABEL: define{{.*}} void @test0()
   // CHECK:      [[F:%.*]] = alloca float
-  // CHECK-NEXT: [[REAL:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @test0_v, i32 0, i32 0), align 4
-  // CHECK-NEXT: load volatile float, float* getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1), align 4
-  // CHECK-NEXT: store float [[REAL]], float* [[F]], align 4
+  // CHECK-NEXT: [[REAL:%.*]] = load volatile float, ptr @test0_v, align 4
+  // CHECK-NEXT: load volatile float, ptr getelementptr inbounds ({{.*}} @test0_v, i32 0, i32 1), align 4
+  // CHECK-NEXT: store float [[REAL]], ptr [[F]], align 4
   // CHECK-NEXT: ret void
   extern volatile _Complex float test0_v;
   float f = (float) test0_v;
@@ -13,10 +13,10 @@ void test0(void) {
 
 void test1(void) {
   // CHECK-LABEL: define{{.*}} void @test1()
-  // CHECK:      [[REAL:%.*]] = load volatile float, float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4
-  // CHECK-NEXT: [[IMAG:%.*]] = load volatile float, float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4
-  // CHECK-NEXT: store volatile float [[REAL]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 0), align 4
-  // CHECK-NEXT: store volatile float [[IMAG]], float* getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4
+  // CHECK:      [[REAL:%.*]] = load volatile float, ptr @test1_v, align 4
+  // CHECK-NEXT: [[IMAG:%.*]] = load volatile float, ptr getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4
+  // CHECK-NEXT: store volatile float [[REAL]], ptr @test1_v, align 4
+  // CHECK-NEXT: store volatile float [[IMAG]], ptr getelementptr inbounds ({{.*}} @test1_v, i32 0, i32 1), align 4
   // CHECK-NEXT: ret void
   extern volatile _Complex float test1_v;
   test1_v = test1_v;

diff  --git a/clang/test/CodeGen/volatile-complex.c b/clang/test/CodeGen/volatile-complex.c
index 8bc4cacc522d0..ca272910153b8 100644
--- a/clang/test/CodeGen/volatile-complex.c
+++ b/clang/test/CodeGen/volatile-complex.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
 
 // Validate that volatile _Complex loads and stores are generated
 // properly, including their alignment (even when overaligned).
@@ -14,52 +14,52 @@ volatile _Complex double cd32 __attribute__((aligned(32)));
 
 // CHECK-LABEL: define{{.*}} void @test_cf()
 void test_cf(void) {
-  // CHECK:      load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
-  // CHECK-NEXT: load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+  // CHECK:      load volatile float, ptr @cf, align 4
+  // CHECK-NEXT: load volatile float, ptr getelementptr inbounds ({ float, float }, ptr @cf, i32 0, i32 1), align 4
   (void)(cf);
-  // CHECK-NEXT: [[R:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
-  // CHECK-NEXT: [[I:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
-  // CHECK-NEXT: store volatile float [[R]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 0), align 4
-  // CHECK-NEXT: store volatile float [[I]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf, i32 0, i32 1), align 4
+  // CHECK-NEXT: [[R:%.*]] = load volatile float, ptr @cf, align 4
+  // CHECK-NEXT: [[I:%.*]] = load volatile float, ptr getelementptr inbounds ({ float, float }, ptr @cf, i32 0, i32 1), align 4
+  // CHECK-NEXT: store volatile float [[R]], ptr @cf, align 4
+  // CHECK-NEXT: store volatile float [[I]], ptr getelementptr inbounds ({ float, float }, ptr @cf, i32 0, i32 1), align 4
   (void)(cf=cf);
   // CHECK-NEXT: ret void
 }
 
 // CHECK-LABEL: define{{.*}} void @test_cd()
 void test_cd(void) {
-  // CHECK:      load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd, i32 0, i32 0), align 8
-  // CHECK-NEXT: load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd, i32 0, i32 1), align 8
+  // CHECK:      load volatile double, ptr @cd, align 8
+  // CHECK-NEXT: load volatile double, ptr getelementptr inbounds ({ double, double }, ptr @cd, i32 0, i32 1), align 8
   (void)(cd);
-  // CHECK-NEXT: [[R:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd, i32 0, i32 0), align 8
-  // CHECK-NEXT: [[I:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd, i32 0, i32 1), align 8
-  // CHECK-NEXT: store volatile double [[R]], double* getelementptr inbounds ({ double, double }, { double, double }* @cd, i32 0, i32 0), align 8
-  // CHECK-NEXT: store volatile double [[I]], double* getelementptr inbounds ({ double, double }, { double, double }* @cd, i32 0, i32 1), align 8
+  // CHECK-NEXT: [[R:%.*]] = load volatile double, ptr @cd, align 8
+  // CHECK-NEXT: [[I:%.*]] = load volatile double, ptr getelementptr inbounds ({ double, double }, ptr @cd, i32 0, i32 1), align 8
+  // CHECK-NEXT: store volatile double [[R]], ptr @cd, align 8
+  // CHECK-NEXT: store volatile double [[I]], ptr getelementptr inbounds ({ double, double }, ptr @cd, i32 0, i32 1), align 8
   (void)(cd=cd);
   // CHECK-NEXT: ret void
 }
 
 // CHECK-LABEL: define{{.*}} void @test_cf32()
 void test_cf32(void) {
-  // CHECK:      load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf32, i32 0, i32 0), align 32
-  // CHECK-NEXT: load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf32, i32 0, i32 1), align 4
+  // CHECK:      load volatile float, ptr @cf32, align 32
+  // CHECK-NEXT: load volatile float, ptr getelementptr inbounds ({ float, float }, ptr @cf32, i32 0, i32 1), align 4
   (void)(cf32);
-  // CHECK-NEXT: [[R:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf32, i32 0, i32 0), align 32
-  // CHECK-NEXT: [[I:%.*]] = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @cf32, i32 0, i32 1), align 4
-  // CHECK-NEXT: store volatile float [[R]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf32, i32 0, i32 0), align 32
-  // CHECK-NEXT: store volatile float [[I]], float* getelementptr inbounds ({ float, float }, { float, float }* @cf32, i32 0, i32 1), align 4
+  // CHECK-NEXT: [[R:%.*]] = load volatile float, ptr @cf32, align 32
+  // CHECK-NEXT: [[I:%.*]] = load volatile float, ptr getelementptr inbounds ({ float, float }, ptr @cf32, i32 0, i32 1), align 4
+  // CHECK-NEXT: store volatile float [[R]], ptr @cf32, align 32
+  // CHECK-NEXT: store volatile float [[I]], ptr getelementptr inbounds ({ float, float }, ptr @cf32, i32 0, i32 1), align 4
   (void)(cf32=cf32);
   // CHECK-NEXT: ret void
 }
 
 // CHECK-LABEL: define{{.*}} void @test_cd32()
 void test_cd32(void) {
-  // CHECK:      load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd32, i32 0, i32 0), align 32
-  // CHECK-NEXT: load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd32, i32 0, i32 1), align 8
+  // CHECK:      load volatile double, ptr @cd32, align 32
+  // CHECK-NEXT: load volatile double, ptr getelementptr inbounds ({ double, double }, ptr @cd32, i32 0, i32 1), align 8
   (void)(cd32);
-  // CHECK-NEXT: [[R:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd32, i32 0, i32 0), align 32
-  // CHECK-NEXT: [[I:%.*]] = load volatile double, double* getelementptr inbounds ({ double, double }, { double, double }* @cd32, i32 0, i32 1), align 8
-  // CHECK-NEXT: store volatile double [[R]], double* getelementptr inbounds ({ double, double }, { double, double }* @cd32, i32 0, i32 0), align 32
-  // CHECK-NEXT: store volatile double [[I]], double* getelementptr inbounds ({ double, double }, { double, double }* @cd32, i32 0, i32 1), align 8
+  // CHECK-NEXT: [[R:%.*]] = load volatile double, ptr @cd32, align 32
+  // CHECK-NEXT: [[I:%.*]] = load volatile double, ptr getelementptr inbounds ({ double, double }, ptr @cd32, i32 0, i32 1), align 8
+  // CHECK-NEXT: store volatile double [[R]], ptr @cd32, align 32
+  // CHECK-NEXT: store volatile double [[I]], ptr getelementptr inbounds ({ double, double }, ptr @cd32, i32 0, i32 1), align 8
   (void)(cd32=cd32);
   // CHECK-NEXT: ret void
 }

diff  --git a/clang/test/CodeGenCXX/builtin-bit-cast.cpp b/clang/test/CodeGenCXX/builtin-bit-cast.cpp
index eb7abf2f273c0..5f2f48b8fff62 100644
--- a/clang/test/CodeGenCXX/builtin-bit-cast.cpp
+++ b/clang/test/CodeGenCXX/builtin-bit-cast.cpp
@@ -1,13 +1,12 @@
-// RUN: %clang_cc1 -no-opaque-pointers -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
+// RUN: %clang_cc1 -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
 
 void test_scalar(int &oper) {
   // CHECK-LABEL: define{{.*}} void @_Z11test_scalarRi
   __builtin_bit_cast(float, oper);
 
-  // CHECK: [[OPER:%.*]] = alloca i32*
-  // CHECK: [[REF:%.*]] = load i32*, i32**
-  // CHECK-NEXT: [[CASTED:%.*]] = bitcast i32* [[REF]] to float*
-  // CHECK-NEXT: load float, float* [[CASTED]]
+  // CHECK: [[OPER:%.*]] = alloca ptr
+  // CHECK: [[REF:%.*]] = load ptr, ptr
+  // CHECK-NEXT: load float, ptr [[REF]]
 }
 
 struct two_ints {
@@ -19,10 +18,9 @@ unsigned long test_aggregate_to_scalar(two_ints &ti) {
   // CHECK-LABEL: define{{.*}} i64 @_Z24test_aggregate_to_scalarR8two_ints
   return __builtin_bit_cast(unsigned long, ti);
 
-  // CHECK: [[TI_ADDR:%.*]] = alloca %struct.two_ints*, align 8
-  // CHECK: [[TI_LOAD:%.*]] = load %struct.two_ints*, %struct.two_ints** [[TI_ADDR]]
-  // CHECK-NEXT: [[CASTED:%.*]] = bitcast %struct.two_ints* [[TI_LOAD]] to i64*
-  // CHECK-NEXT: load i64, i64* [[CASTED]]
+  // CHECK: [[TI_ADDR:%.*]] = alloca ptr, align 8
+  // CHECK: [[TI_LOAD:%.*]] = load ptr, ptr [[TI_ADDR]]
+  // CHECK-NEXT: load i64, ptr [[TI_LOAD]]
 }
 
 struct two_floats {
@@ -35,12 +33,10 @@ two_floats test_aggregate_record(two_ints& ti) {
    return __builtin_bit_cast(two_floats, ti);
 
   // CHECK: [[RETVAL:%.*]] = alloca %struct.two_floats, align 4
-  // CHECK: [[TI:%.*]] = alloca %struct.two_ints*, align 8
+  // CHECK: [[TI:%.*]] = alloca ptr, align 8
 
-  // CHECK: [[LOAD_TI:%.*]] = load %struct.two_ints*, %struct.two_ints** [[TI]]
-  // CHECK: [[MEMCPY_SRC:%.*]] = bitcast %struct.two_ints* [[LOAD_TI]] to i8*
-  // CHECK: [[MEMCPY_DST:%.*]] = bitcast %struct.two_floats* [[RETVAL]] to i8*
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[MEMCPY_DST]], i8* align 4 [[MEMCPY_SRC]]
+  // CHECK: [[LOAD_TI:%.*]] = load ptr, ptr [[TI]]
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[RETVAL]], ptr align 4 [[LOAD_TI]]
 }
 
 two_floats test_aggregate_array(int (&ary)[2]) {
@@ -48,12 +44,10 @@ two_floats test_aggregate_array(int (&ary)[2]) {
   return __builtin_bit_cast(two_floats, ary);
 
   // CHECK: [[RETVAL:%.*]] = alloca %struct.two_floats, align 4
-  // CHECK: [[ARY:%.*]] = alloca [2 x i32]*, align 8
+  // CHECK: [[ARY:%.*]] = alloca ptr, align 8
 
-  // CHECK: [[LOAD_ARY:%.*]] = load [2 x i32]*, [2 x i32]** [[ARY]]
-  // CHECK: [[MEMCPY_SRC:%.*]] = bitcast [2 x i32]* [[LOAD_ARY]] to i8*
-  // CHECK: [[MEMCPY_DST:%.*]] = bitcast %struct.two_floats* [[RETVAL]] to i8*
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[MEMCPY_DST]], i8* align 4 [[MEMCPY_SRC]]
+  // CHECK: [[LOAD_ARY:%.*]] = load ptr, ptr [[ARY]]
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[RETVAL]], ptr align 4 [[LOAD_ARY]]
 }
 
 two_ints test_scalar_to_aggregate(unsigned long ul) {
@@ -61,19 +55,17 @@ two_ints test_scalar_to_aggregate(unsigned long ul) {
   return __builtin_bit_cast(two_ints, ul);
 
   // CHECK: [[TI:%.*]] = alloca %struct.two_ints, align 4
-  // CHECK: [[TITMP:%.*]] = bitcast %struct.two_ints* [[TI]] to i8*
-  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TITMP]]
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TI]]
 }
 
 unsigned long test_complex(_Complex unsigned &cu) {
   // CHECK-LABEL: define{{.*}} i64 @_Z12test_complexRCj
   return __builtin_bit_cast(unsigned long, cu);
 
-  // CHECK: [[REF_ALLOCA:%.*]] = alloca { i32, i32 }*, align 8
-  // CHECK-NEXT: store { i32, i32 }* {{.*}}, { i32, i32 }** [[REF_ALLOCA]]
-  // CHECK-NEXT: [[REF:%.*]] = load { i32, i32 }*, { i32, i32 }** [[REF_ALLOCA]]
-  // CHECK-NEXT: [[CASTED:%.*]] = bitcast { i32, i32 }* [[REF]] to i64*
-  // CHECK-NEXT: load i64, i64* [[CASTED]], align 4
+  // CHECK: [[REF_ALLOCA:%.*]] = alloca ptr, align 8
+  // CHECK-NEXT: store ptr {{.*}}, ptr [[REF_ALLOCA]]
+  // CHECK-NEXT: [[REF:%.*]] = load ptr, ptr [[REF_ALLOCA]]
+  // CHECK-NEXT: load i64, ptr [[REF]], align 4
 }
 
 _Complex unsigned test_to_complex(unsigned long &ul) {
@@ -81,19 +73,17 @@ _Complex unsigned test_to_complex(unsigned long &ul) {
 
   return __builtin_bit_cast(_Complex unsigned, ul);
 
-  // CHECK: [[REF:%.*]] = alloca i64*
-  // CHECK: [[LOAD_REF:%.*]] = load i64*, i64** [[REF]]
-  // CHECK: [[CASTED:%.*]] = bitcast i64* [[LOAD_REF]] to { i32, i32 }*
+  // CHECK: [[REF:%.*]] = alloca ptr
+  // CHECK: [[LOAD_REF:%.*]] = load ptr, ptr [[REF]]
 }
 
 unsigned long test_array(int (&ary)[2]) {
   // CHECK-LABEL: define{{.*}} i64 @_Z10test_arrayRA2_i
   return __builtin_bit_cast(unsigned long, ary);
 
-  // CHECK: [[REF_ALLOCA:%.*]] = alloca [2 x i32]*
-  // CHECK: [[LOAD_REF:%.*]] = load [2 x i32]*, [2 x i32]** [[REF_ALLOCA]]
-  // CHECK: [[CASTED:%.*]] = bitcast [2 x i32]* [[LOAD_REF]] to i64*
-  // CHECK: load i64, i64* [[CASTED]], align 4
+  // CHECK: [[REF_ALLOCA:%.*]] = alloca ptr
+  // CHECK: [[LOAD_REF:%.*]] = load ptr, ptr [[REF_ALLOCA]]
+  // CHECK: load i64, ptr [[LOAD_REF]], align 4
 }
 
 two_ints test_rvalue_aggregate() {
@@ -101,6 +91,5 @@ two_ints test_rvalue_aggregate() {
   return __builtin_bit_cast(two_ints, 42ul);
 
   // CHECK: [[TI:%.*]] = alloca %struct.two_ints, align 4
-  // CHECK: [[CASTED:%.*]] = bitcast %struct.two_ints* [[TI]] to i8*
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[CASTED]]
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TI]]
 }

diff  --git a/clang/test/CodeGenCXX/call-conv-thru-alias.cpp b/clang/test/CodeGenCXX/call-conv-thru-alias.cpp
index caa222e4ee8f5..fd0d4f2bbbee7 100644
--- a/clang/test/CodeGenCXX/call-conv-thru-alias.cpp
+++ b/clang/test/CodeGenCXX/call-conv-thru-alias.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i686-windows-pc -emit-llvm -o - -mconstructor-aliases -O1 -disable-llvm-passes %s | FileCheck %s
+// RUN: %clang_cc1 -triple i686-windows-pc -emit-llvm -o - -mconstructor-aliases -O1 -disable-llvm-passes %s | FileCheck %s
 
 struct Base { virtual ~Base(); };
 struct Derived : Base {
@@ -10,11 +10,11 @@ Base::~Base(){}
 Derived::~Derived(){}
 Derived Derived::inst;
 
-// CHECK: @"??1Derived@@UAE at XZ" = dso_local unnamed_addr alias void (%struct.Derived*), bitcast (void (%struct.Base*)* @"??1Base@@UAE at XZ" to void (%struct.Derived*)*)
+// CHECK: @"??1Derived@@UAE at XZ" = dso_local unnamed_addr alias void (ptr), ptr @"??1Base@@UAE at XZ"
 
 // CHECK: define dso_local x86_thiscallcc void @"??1Base@@UAE at XZ"
 // CHECK: define internal void @"??__E?inst at Derived@@2U1 at A@@YAXXZ"
-// CHECK: call i32 @atexit(void ()* @"??__F?inst at Derived@@2U1 at A@@YAXXZ"
+// CHECK: call i32 @atexit(ptr @"??__F?inst at Derived@@2U1 at A@@YAXXZ"
 //
 // CHECK: define internal void @"??__F?inst at Derived@@2U1 at A@@YAXXZ"
 // CHECK-NEXT: entry:

diff  --git a/clang/test/CodeGenCXX/delete-two-arg.cpp b/clang/test/CodeGenCXX/delete-two-arg.cpp
index f9d504c24a64c..9eb07e27f5716 100644
--- a/clang/test/CodeGenCXX/delete-two-arg.cpp
+++ b/clang/test/CodeGenCXX/delete-two-arg.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i686-pc-linux-gnu %s -o - -emit-llvm -verify | FileCheck %s
+// RUN: %clang_cc1 -triple i686-pc-linux-gnu %s -o - -emit-llvm -verify | FileCheck %s
 // expected-no-diagnostics
 
 typedef __typeof(sizeof(int)) size_t;
@@ -11,7 +11,7 @@ namespace test1 {
     // CHECK:      load
     // CHECK-NEXT: icmp eq {{.*}}, null
     // CHECK-NEXT: br i1
-    // CHECK:      call void @_ZN5test11AdlEPvj(i8* noundef %{{.*}}, i32 noundef 4)
+    // CHECK:      call void @_ZN5test11AdlEPvj(ptr noundef %{{.*}}, i32 noundef 4)
     delete x;
   }
 }
@@ -25,29 +25,25 @@ namespace test2 {
     void operator delete[](void *, size_t);
   };
 
-  // CHECK: define{{.*}} [[A:%.*]]* @_ZN5test24testEv()
+  // CHECK: define{{.*}} ptr @_ZN5test24testEv()
   A *test() {
-    // CHECK:      [[NEW:%.*]] = call noalias noundef nonnull i8* @_Znaj(i32 noundef 44)
-    // CHECK-NEXT: [[T0:%.*]] = bitcast i8* [[NEW]] to i32*
-    // CHECK-NEXT: store i32 10, i32* [[T0]]
-    // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[NEW]], i32 4
-    // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[A]]*
-    // CHECK-NEXT: ret [[A]]* [[T2]]
+    // CHECK:      [[NEW:%.*]] = call noalias noundef nonnull ptr @_Znaj(i32 noundef 44)
+    // CHECK-NEXT: store i32 10, ptr [[NEW]]
+    // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, ptr [[NEW]], i32 4
+    // CHECK-NEXT: ret ptr [[T1]]
     return ::new A[10];
   }
 
   // CHECK-LABEL: define{{.*}} void @_ZN5test24testEPNS_1AE(
   void test(A *p) {
-    // CHECK:      [[P:%.*]] = alloca [[A]]*, align 4
-    // CHECK-NEXT: store [[A]]* {{%.*}}, [[A]]** [[P]], align 4
-    // CHECK-NEXT: [[T0:%.*]] = load [[A]]*, [[A]]** [[P]], align 4
-    // CHECK-NEXT: [[T1:%.*]] = icmp eq [[A]]* [[T0]], null
+    // CHECK:      [[P:%.*]] = alloca ptr, align 4
+    // CHECK-NEXT: store ptr {{%.*}}, ptr [[P]], align 4
+    // CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[P]], align 4
+    // CHECK-NEXT: [[T1:%.*]] = icmp eq ptr [[T0]], null
     // CHECK-NEXT: br i1 [[T1]],
-    // CHECK:      [[T2:%.*]] = bitcast [[A]]* [[T0]] to i8*
-    // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i32 -4
-    // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i32*
-    // CHECK-NEXT: [[T5:%.*]] = load i32, i32* [[T4]]
-    // CHECK-NEXT: call void @_ZdaPv(i8* noundef [[T3]])
+    // CHECK: [[T3:%.*]] = getelementptr inbounds i8, ptr [[T0]], i32 -4
+    // CHECK-NEXT: [[T5:%.*]] = load i32, ptr [[T3]]
+    // CHECK-NEXT: call void @_ZdaPv(ptr noundef [[T3]])
     // CHECK-NEXT: br label
     ::delete[] p;
   }
@@ -63,8 +59,7 @@ namespace test3 {
 
   // CHECK-LABEL: define{{.*}} void @_ZN5test34testEv()
   void test() {
-    // CHECK:      [[CALL:%.*]] = call noalias noundef nonnull i8* @_Znaj(i32 noundef 24)
-    // CHECK-NEXT: bitcast i8* [[CALL]] to i32*
+    // CHECK:      [[CALL:%.*]] = call noalias noundef nonnull ptr @_Znaj(i32 noundef 24)
     // CHECK-NEXT: store i32 5
     (void) new B[5];
   }

diff  --git a/clang/test/CodeGenCXX/microsoft-abi-structors-alias.cpp b/clang/test/CodeGenCXX/microsoft-abi-structors-alias.cpp
index b5a044199ec7d..923ae3829c6b6 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-structors-alias.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-structors-alias.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -emit-llvm %s -o - -triple=i386-pc-win32 -fno-rtti -mconstructor-aliases -O1 -disable-llvm-passes | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm %s -o - -triple=i386-pc-win32 -fno-rtti -mconstructor-aliases -O1 -disable-llvm-passes | FileCheck %s
 
 namespace test1 {
 template <typename T> class A {
@@ -22,7 +22,7 @@ B::~B() {}
 void foo() {
   B b;
 }
-// CHECK-DAG: @"??1B at test2@@UAE at XZ" = dso_local unnamed_addr alias void (%"struct.test2::B"*), bitcast (void (%"struct.test2::A"*)* @"??1A at test2@@UAE at XZ" to void (%"struct.test2::B"*)*)
+// CHECK-DAG: @"??1B at test2@@UAE at XZ" = dso_local unnamed_addr alias void (ptr), ptr @"??1A at test2@@UAE at XZ"
 }
 
 namespace test3 {

diff  --git a/clang/test/CodeGenCoroutines/coro-always-inline-exp-namespace.cpp b/clang/test/CodeGenCoroutines/coro-always-inline-exp-namespace.cpp
index e3705cc0d0878..2db4764a934ec 100644
--- a/clang/test/CodeGenCoroutines/coro-always-inline-exp-namespace.cpp
+++ b/clang/test/CodeGenCoroutines/coro-always-inline-exp-namespace.cpp
@@ -1,6 +1,6 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -emit-llvm -fcoroutines-ts \
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fcoroutines-ts \
 // RUN:   -O0 %s -o - | FileCheck %s
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -emit-llvm -fcoroutines-ts \
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -fcoroutines-ts \
 // RUN:   -fno-inline -O0 %s -o - | FileCheck %s
 
 namespace std {
@@ -35,13 +35,9 @@ struct coroutine_traits {
 
 // CHECK-LABEL: @_Z3foov
 // CHECK-LABEL: entry:
-// CHECK: [[CAST0:%[0-9]+]] = bitcast %"struct.std::experimental::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* [[CAST0]])
-// CHECK: [[CAST1:%[0-9]+]] = bitcast %"struct.std::experimental::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* [[CAST1]])
+// CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %ref.tmp{{.*}})
+// CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %ref.tmp{{.*}})
 
-// CHECK: [[CAST2:%[0-9]+]] = bitcast %"struct.std::experimental::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* [[CAST2]])
-// CHECK: [[CAST3:%[0-9]+]] = bitcast %"struct.std::experimental::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* [[CAST3]])
+// CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %ref.tmp{{.*}})
+// CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %ref.tmp{{.*}})
 void foo() { co_return; }

diff  --git a/clang/test/CodeGenCoroutines/coro-always-inline.cpp b/clang/test/CodeGenCoroutines/coro-always-inline.cpp
index b3685b9b93484..9ec3cb57d7a6e 100644
--- a/clang/test/CodeGenCoroutines/coro-always-inline.cpp
+++ b/clang/test/CodeGenCoroutines/coro-always-inline.cpp
@@ -1,6 +1,6 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -emit-llvm -std=c++20 \
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -std=c++20 \
 // RUN:   -O0 %s -o - | FileCheck %s
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -emit-llvm -std=c++20 \
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -std=c++20 \
 // RUN:   -fno-inline -O0 %s -o - | FileCheck %s
 
 namespace std {
@@ -33,15 +33,11 @@ struct coroutine_traits {
 
 // CHECK-LABEL: @_Z3foov
 // CHECK-LABEL: entry:
-// CHECK: [[CAST0:%[0-9]+]] = bitcast %"struct.std::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* [[CAST0]])
-// CHECK: [[CAST1:%[0-9]+]] = bitcast %"struct.std::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* [[CAST1]])
-
-// CHECK: [[CAST2:%[0-9]+]] = bitcast %"struct.std::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* [[CAST2]])
-// CHECK: [[CAST3:%[0-9]+]] = bitcast %"struct.std::awaitable"* %ref.tmp{{.*}} to i8*
-// CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* [[CAST3]])
+// CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %ref.tmp{{.*}})
+// CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %ref.tmp{{.*}})
+
+// CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %ref.tmp{{.*}})
+// CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %ref.tmp{{.*}})
 void foo() { co_return; }
 
 // Check that bar is not inlined even it's marked as always_inline.

diff  --git a/clang/test/CodeGenCoroutines/coro-builtins.c b/clang/test/CodeGenCoroutines/coro-builtins.c
index 8cd52fcb5ab7e..316ca4250a7ed 100644
--- a/clang/test/CodeGenCoroutines/coro-builtins.c
+++ b/clang/test/CodeGenCoroutines/coro-builtins.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-pc-windows-msvc18.0.0 -fcoroutines-ts -emit-llvm %s -o - -disable-llvm-passes | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc18.0.0 -fcoroutines-ts -emit-llvm %s -o - -disable-llvm-passes | FileCheck %s
 
 void *myAlloc(long long);
 
@@ -8,37 +8,36 @@ void f(int n) {
   // CHECK: %promise = alloca i32
   int promise;
 
-  // CHECK: %[[PROM_ADDR:.+]] = bitcast i32* %promise to i8*
-  // CHECK-NEXT: %[[COROID:.+]] = call token @llvm.coro.id(i32 32, i8* %[[PROM_ADDR]], i8* null, i8* null)
+  // CHECK: %[[COROID:.+]] = call token @llvm.coro.id(i32 32, ptr %promise, ptr null, ptr null)
   __builtin_coro_id(32, &promise, 0, 0);
 
   // CHECK-NEXT: call i1 @llvm.coro.alloc(token %[[COROID]])
   __builtin_coro_alloc();
 
-  // CHECK-NEXT: call i8* @llvm.coro.noop()
+  // CHECK-NEXT: call ptr @llvm.coro.noop()
   __builtin_coro_noop();
 
   // CHECK-NEXT: %[[SIZE:.+]] = call i64 @llvm.coro.size.i64()
-  // CHECK-NEXT: %[[MEM:.+]] = call i8* @myAlloc(i64 noundef %[[SIZE]])
-  // CHECK-NEXT: %[[FRAME:.+]] = call i8* @llvm.coro.begin(token %[[COROID]], i8* %[[MEM]])
+  // CHECK-NEXT: %[[MEM:.+]] = call ptr @myAlloc(i64 noundef %[[SIZE]])
+  // CHECK-NEXT: %[[FRAME:.+]] = call ptr @llvm.coro.begin(token %[[COROID]], ptr %[[MEM]])
   __builtin_coro_begin(myAlloc(__builtin_coro_size()));
 
-  // CHECK-NEXT: call void @llvm.coro.resume(i8* %[[FRAME]])
+  // CHECK-NEXT: call void @llvm.coro.resume(ptr %[[FRAME]])
   __builtin_coro_resume(__builtin_coro_frame());
 
-  // CHECK-NEXT: call void @llvm.coro.destroy(i8* %[[FRAME]])
+  // CHECK-NEXT: call void @llvm.coro.destroy(ptr %[[FRAME]])
   __builtin_coro_destroy(__builtin_coro_frame());
 
-  // CHECK-NEXT: call i1 @llvm.coro.done(i8* %[[FRAME]])
+  // CHECK-NEXT: call i1 @llvm.coro.done(ptr %[[FRAME]])
   __builtin_coro_done(__builtin_coro_frame());
 
-  // CHECK-NEXT: call i8* @llvm.coro.promise(i8* %[[FRAME]], i32 48, i1 false)
+  // CHECK-NEXT: call ptr @llvm.coro.promise(ptr %[[FRAME]], i32 48, i1 false)
   __builtin_coro_promise(__builtin_coro_frame(), 48, 0);
 
-  // CHECK-NEXT: call i8* @llvm.coro.free(token %[[COROID]], i8* %[[FRAME]])
+  // CHECK-NEXT: call ptr @llvm.coro.free(token %[[COROID]], ptr %[[FRAME]])
   __builtin_coro_free(__builtin_coro_frame());
 
-  // CHECK-NEXT: call i1 @llvm.coro.end(i8* %[[FRAME]], i1 false)
+  // CHECK-NEXT: call i1 @llvm.coro.end(ptr %[[FRAME]], i1 false)
   __builtin_coro_end(__builtin_coro_frame(), 0);
 
   // CHECK-NEXT: call i8 @llvm.coro.suspend(token none, i1 true)

diff  --git a/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02-exp-namespace.cpp b/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02-exp-namespace.cpp
index 16030b104e63b..c9a496a1b747a 100644
--- a/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02-exp-namespace.cpp
+++ b/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02-exp-namespace.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -fcoroutines-ts -std=c++14 -O1 -emit-llvm %s -o - -disable-llvm-passes | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fcoroutines-ts -std=c++14 -O1 -emit-llvm %s -o - -disable-llvm-passes | FileCheck %s
 
 #include "Inputs/coroutine-exp-namespace.h"
 
@@ -81,7 +81,7 @@ Task bar() {
 }
 
 // CHECK-LABEL: define{{.*}} void @_Z3barv
-// CHECK:         %[[MODE:.+]] = load i32, i32* %mode
+// CHECK:         %[[MODE:.+]] = load i32, ptr %mode
 // CHECK-NEXT:    switch i32 %[[MODE]], label %{{.+}} [
 // CHECK-NEXT:      i32 1, label %[[CASE1:.+]]
 // CHECK-NEXT:      i32 2, label %[[CASE2:.+]]
@@ -90,12 +90,10 @@ Task bar() {
 // CHECK:       [[CASE1]]:
 // CHECK:         br i1 %{{.+}}, label %[[CASE1_AWAIT_READY:.+]], label %[[CASE1_AWAIT_SUSPEND:.+]]
 // CHECK:       [[CASE1_AWAIT_SUSPEND]]:
-// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(i8* null)
-// CHECK-NEXT:    %[[HANDLE11:.+]] = bitcast %"struct.std::experimental::coroutines_v1::coroutine_handle"* %[[TMP1:.+]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 8, i8* %[[HANDLE11]])
+// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(ptr null)
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr %[[TMP1:.+]])
 
-// CHECK:         %[[HANDLE12:.+]] = bitcast %"struct.std::experimental::coroutines_v1::coroutine_handle"* %[[TMP1]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 8, i8* %[[HANDLE12]])
+// CHECK:    call void @llvm.lifetime.end.p0(i64 8, ptr %[[TMP1]])
 // CHECK-NEXT:    call void @llvm.coro.resume
 // CHECK-NEXT:    %{{.+}} = call i8 @llvm.coro.suspend
 // CHECK-NEXT:    switch i8 %{{.+}}, label %coro.ret [
@@ -109,12 +107,10 @@ Task bar() {
 // CHECK:       [[CASE2]]:
 // CHECK:         br i1 %{{.+}}, label %[[CASE2_AWAIT_READY:.+]], label %[[CASE2_AWAIT_SUSPEND:.+]]
 // CHECK:       [[CASE2_AWAIT_SUSPEND]]:
-// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(i8* null)
-// CHECK-NEXT:    %[[HANDLE21:.+]] = bitcast %"struct.std::experimental::coroutines_v1::coroutine_handle"* %[[TMP2:.+]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 8, i8* %[[HANDLE21]])
+// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(ptr null)
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr %[[TMP2:.+]])
 
-// CHECK:         %[[HANDLE22:.+]] = bitcast %"struct.std::experimental::coroutines_v1::coroutine_handle"* %[[TMP2]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 8, i8* %[[HANDLE22]])
+// CHECK:    call void @llvm.lifetime.end.p0(i64 8, ptr %[[TMP2]])
 // CHECK-NEXT:    call void @llvm.coro.resume
 // CHECK-NEXT:    %{{.+}} = call i8 @llvm.coro.suspend
 // CHECK-NEXT:    switch i8 %{{.+}}, label %coro.ret [

diff  --git a/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02.cpp b/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02.cpp
index 2cee1dab58572..890d55e41de95 100644
--- a/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02.cpp
+++ b/clang/test/CodeGenCoroutines/coro-symmetric-transfer-02.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -emit-llvm %s -o - -disable-llvm-passes | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -O1 -emit-llvm %s -o - -disable-llvm-passes | FileCheck %s
 
 #include "Inputs/coroutine.h"
 
@@ -79,7 +79,7 @@ Task bar() {
 }
 
 // CHECK-LABEL: define{{.*}} void @_Z3barv
-// CHECK:         %[[MODE:.+]] = load i32, i32* %mode
+// CHECK:         %[[MODE:.+]] = load i32, ptr %mode
 // CHECK-NEXT:    switch i32 %[[MODE]], label %{{.+}} [
 // CHECK-NEXT:      i32 1, label %[[CASE1:.+]]
 // CHECK-NEXT:      i32 2, label %[[CASE2:.+]]
@@ -88,12 +88,10 @@ Task bar() {
 // CHECK:       [[CASE1]]:
 // CHECK:         br i1 %{{.+}}, label %[[CASE1_AWAIT_READY:.+]], label %[[CASE1_AWAIT_SUSPEND:.+]]
 // CHECK:       [[CASE1_AWAIT_SUSPEND]]:
-// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(i8* null)
-// CHECK-NEXT:    %[[HANDLE11:.+]] = bitcast %"struct.std::coroutine_handle"* %[[TMP1:.+]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 8, i8* %[[HANDLE11]])
+// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(ptr null)
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr %[[TMP1:.+]])
 
-// CHECK:         %[[HANDLE12:.+]] = bitcast %"struct.std::coroutine_handle"* %[[TMP1]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 8, i8* %[[HANDLE12]])
+// CHECK:    call void @llvm.lifetime.end.p0(i64 8, ptr %[[TMP1]])
 // CHECK-NEXT:    call void @llvm.coro.resume
 // CHECK-NEXT:    %{{.+}} = call i8 @llvm.coro.suspend
 // CHECK-NEXT:    switch i8 %{{.+}}, label %coro.ret [
@@ -107,12 +105,10 @@ Task bar() {
 // CHECK:       [[CASE2]]:
 // CHECK:         br i1 %{{.+}}, label %[[CASE2_AWAIT_READY:.+]], label %[[CASE2_AWAIT_SUSPEND:.+]]
 // CHECK:       [[CASE2_AWAIT_SUSPEND]]:
-// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(i8* null)
-// CHECK-NEXT:    %[[HANDLE21:.+]] = bitcast %"struct.std::coroutine_handle"* %[[TMP2:.+]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 8, i8* %[[HANDLE21]])
+// CHECK-NEXT:    %{{.+}} = call token @llvm.coro.save(ptr null)
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 8, ptr %[[TMP2:.+]])
 
-// CHECK:         %[[HANDLE22:.+]] = bitcast %"struct.std::coroutine_handle"* %[[TMP2]] to i8*
-// CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 8, i8* %[[HANDLE22]])
+// CHECK:    call void @llvm.lifetime.end.p0(i64 8, ptr %[[TMP2]])
 // CHECK-NEXT:    call void @llvm.coro.resume
 // CHECK-NEXT:    %{{.+}} = call i8 @llvm.coro.suspend
 // CHECK-NEXT:    switch i8 %{{.+}}, label %coro.ret [

diff  --git a/clang/test/CodeGenObjC/arc-property.m b/clang/test/CodeGenObjC/arc-property.m
index 0857170992b8a..a3cfd9ff55d03 100644
--- a/clang/test/CodeGenObjC/arc-property.m
+++ b/clang/test/CodeGenObjC/arc-property.m
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -fobjc-arc -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-arc -emit-llvm %s -o - | FileCheck %s
 
 // rdar://problem/10290317
 @interface Test0
@@ -22,21 +22,17 @@ @implementation Test1
 @synthesize pointer;
 @end
 //   The getter should be a simple load.
-// CHECK:    define internal [[S1:%.*]]* @"\01-[Test1 pointer]"(
-// CHECK:      [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test1.pointer"
-// CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST1:%.*]]* {{%.*}} to i8*
-// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
-// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[S1]]**
-// CHECK-NEXT: [[T3:%.*]] = load [[S1]]*, [[S1]]** [[T2]], align 8
-// CHECK-NEXT: ret [[S1]]* [[T3]]
+// CHECK:    define internal ptr @"\01-[Test1 pointer]"(
+// CHECK:      [[OFFSET:%.*]] = load i64, ptr @"OBJC_IVAR_$_Test1.pointer"
+// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i64 [[OFFSET]]
+// CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[T1]], align 8
+// CHECK-NEXT: ret ptr [[T3]]
 
 //   The setter should be using objc_setProperty.
 // CHECK:    define internal void @"\01-[Test1 setPointer:]"(
-// CHECK:      [[T0:%.*]] = bitcast [[TEST1]]* {{%.*}} to i8*
-// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test1.pointer"
-// CHECK-NEXT: [[T1:%.*]] = load [[S1]]*, [[S1]]** {{%.*}}
-// CHECK-NEXT: [[T2:%.*]] = bitcast [[S1]]* [[T1]] to i8*
-// CHECK-NEXT: call void @objc_setProperty(i8* noundef [[T0]], i8* noundef {{%.*}}, i64 noundef [[OFFSET]], i8* noundef [[T2]], i1 noundef zeroext false, i1 noundef zeroext false)
+// CHECK: [[OFFSET:%.*]] = load i64, ptr @"OBJC_IVAR_$_Test1.pointer"
+// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr {{%.*}}
+// CHECK-NEXT: call void @objc_setProperty(ptr noundef {{%.*}}, ptr noundef {{%.*}}, i64 noundef [[OFFSET]], ptr noundef [[T1]], i1 noundef zeroext false, i1 noundef zeroext false)
 // CHECK-NEXT: ret void
 
 
@@ -56,34 +52,29 @@ - (void) test {
 }
 @end
 // CHECK:    define internal void @"\01-[Test2 test]"(
-// CHECK:      [[T0:%.*]] = load i8*, i8** @theGlobalClass, align 8
-// CHECK-NEXT: [[T1:%.*]] = load [[TEST2:%.*]]*, [[TEST2:%.*]]**
-// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass"
-// CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST2]]* [[T1]] to i8*
-// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 [[OFFSET]]
-// CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8**
-// CHECK-NEXT: call void @llvm.objc.storeStrong(i8** [[T4]], i8* [[T0]]) [[NUW:#[0-9]+]]
+// CHECK:      [[T0:%.*]] = load ptr, ptr @theGlobalClass, align 8
+// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr
+// CHECK-NEXT: [[OFFSET:%.*]] = load i64, ptr @"OBJC_IVAR_$_Test2._theClass"
+// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, ptr [[T1]], i64 [[OFFSET]]
+// CHECK-NEXT: call void @llvm.objc.storeStrong(ptr [[T3]], ptr [[T0]]) [[NUW:#[0-9]+]]
 // CHECK-NEXT: ret void
 
-// CHECK:    define internal i8* @"\01-[Test2 theClass]"(
-// CHECK:      [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass"
-// CHECK-NEXT: [[T0:%.*]] = tail call i8* @objc_getProperty(i8* noundef {{.*}}, i8* noundef {{.*}}, i64 noundef [[OFFSET]], i1 noundef zeroext true)
-// CHECK-NEXT: ret i8* [[T0]]
+// CHECK:    define internal ptr @"\01-[Test2 theClass]"(
+// CHECK:      [[OFFSET:%.*]] = load i64, ptr @"OBJC_IVAR_$_Test2._theClass"
+// CHECK-NEXT: [[T0:%.*]] = tail call ptr @objc_getProperty(ptr noundef {{.*}}, ptr noundef {{.*}}, i64 noundef [[OFFSET]], i1 noundef zeroext true)
+// CHECK-NEXT: ret ptr [[T0]]
 
 // CHECK:    define internal void @"\01-[Test2 setTheClass:]"(
-// CHECK:      [[T0:%.*]] = bitcast [[TEST2]]* {{%.*}} to i8*
-// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass"
-// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** {{%.*}}
-// CHECK-NEXT: call void @objc_setProperty(i8* noundef [[T0]], i8* noundef {{%.*}}, i64 noundef [[OFFSET]], i8* noundef [[T1]], i1 noundef zeroext true, i1 noundef zeroext true)
+// CHECK: [[OFFSET:%.*]] = load i64, ptr @"OBJC_IVAR_$_Test2._theClass"
+// CHECK-NEXT: [[T1:%.*]] = load ptr, ptr {{%.*}}
+// CHECK-NEXT: call void @objc_setProperty(ptr noundef {{%.*}}, ptr noundef {{%.*}}, i64 noundef [[OFFSET]], ptr noundef [[T1]], i1 noundef zeroext true, i1 noundef zeroext true)
 // CHECK-NEXT: ret void
 
 // CHECK:    define internal void @"\01-[Test2 .cxx_destruct]"(
-// CHECK:      [[T0:%.*]] = load [[TEST2]]*, [[TEST2]]**
-// CHECK-NEXT: [[OFFSET:%.*]] = load i64, i64* @"OBJC_IVAR_$_Test2._theClass"
-// CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST2]]* [[T0]] to i8*
-// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, i8* [[T1]], i64 [[OFFSET]]
-// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8**
-// CHECK-NEXT: call void @llvm.objc.storeStrong(i8** [[T3]], i8* null) [[NUW]]
+// CHECK:      [[T0:%.*]] = load ptr, ptr
+// CHECK-NEXT: [[OFFSET:%.*]] = load i64, ptr @"OBJC_IVAR_$_Test2._theClass"
+// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, ptr [[T0]], i64 [[OFFSET]]
+// CHECK-NEXT: call void @llvm.objc.storeStrong(ptr [[T2]], ptr null) [[NUW]]
 // CHECK-NEXT: ret void
 
 // rdar://13115896
@@ -95,28 +86,25 @@ void test3(Test3 *t) {
   id x = t.copyMachine;
   x = [t copyMachine];
 }
-// CHECK:    define{{.*}} void @test3([[TEST3:%.*]]*
+// CHECK:    define{{.*}} void @test3(ptr
 //   Prologue.
-// CHECK:      [[T:%.*]] = alloca [[TEST3]]*,
-// CHECK-NEXT: [[X:%.*]] = alloca i8*,
+// CHECK:      [[T:%.*]] = alloca ptr,
+// CHECK-NEXT: [[X:%.*]] = alloca ptr,
 //   Property access.
-// CHECK:      [[T0:%.*]] = load [[TEST3]]*, [[TEST3]]** [[T]],
-// CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST3]]* [[T0]] to i8*
-// CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES
-// CHECK-NEXT: [[T2:%.*]] = call i8* bitcast ({{.*}} @objc_msgSend to {{.*}})(i8* noundef [[T1]], i8* noundef [[SEL]])
-// CHECK-NEXT: store i8* [[T2]], i8** [[X]],
+// CHECK:      [[T0:%.*]] = load ptr, ptr [[T]],
+// CHECK-NEXT: [[SEL:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES
+// CHECK-NEXT: [[T2:%.*]] = call ptr @objc_msgSend(ptr noundef [[T0]], ptr noundef [[SEL]])
+// CHECK-NEXT: store ptr [[T2]], ptr [[X]],
 //   Message send.
-// CHECK-NEXT: [[T0:%.*]] = load [[TEST3]]*, [[TEST3]]** [[T]],
-// CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST3]]* [[T0]] to i8*
-// CHECK-NEXT: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES
-// CHECK-NEXT: [[T2:%.*]] = call i8* bitcast ({{.*}} @objc_msgSend to {{.*}})(i8* noundef [[T1]], i8* noundef [[SEL]])
-// CHECK-NEXT: [[T3:%.*]] = load i8*, i8** [[X]],
-// CHECK-NEXT: store i8* [[T2]], i8** [[X]],
-// CHECK-NEXT: call void @llvm.objc.release(i8* [[T3]])
+// CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[T]],
+// CHECK-NEXT: [[SEL:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES
+// CHECK-NEXT: [[T2:%.*]] = call ptr @objc_msgSend(ptr noundef [[T0]], ptr noundef [[SEL]])
+// CHECK-NEXT: [[T3:%.*]] = load ptr, ptr [[X]],
+// CHECK-NEXT: store ptr [[T2]], ptr [[X]],
+// CHECK-NEXT: call void @llvm.objc.release(ptr [[T3]])
 //   Epilogue.
-// CHECK-NEXT: call void @llvm.objc.storeStrong(i8** [[X]], i8* null)
-// CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST3]]** [[T]] to i8**
-// CHECK-NEXT: call void @llvm.objc.storeStrong(i8** [[T0]], i8* null)
+// CHECK-NEXT: call void @llvm.objc.storeStrong(ptr [[X]], ptr null)
+// CHECK-NEXT: call void @llvm.objc.storeStrong(ptr [[T]], ptr null)
 // CHECK-NEXT: ret void
 
 @implementation Test3
@@ -124,10 +112,10 @@ - (id) copyMachine {
   extern id test3_helper(void);
   return test3_helper();
 }
-// CHECK:    define internal i8* @"\01-[Test3 copyMachine]"(
-// CHECK:      [[T0:%.*]] = call i8* @test3_helper()
-// CHECK-NEXT: [[T1:%.*]] = notail call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* [[T0]])
-// CHECK-NEXT: ret i8* [[T1]]
+// CHECK:    define internal ptr @"\01-[Test3 copyMachine]"(
+// CHECK:      [[T0:%.*]] = call ptr @test3_helper()
+// CHECK-NEXT: [[T1:%.*]] = notail call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr [[T0]])
+// CHECK-NEXT: ret ptr [[T1]]
 - (void) setCopyMachine: (id) x {}
 @end
 
@@ -146,7 +134,7 @@ @interface ABC_Class <ABC, ABC__Mutable>
 
 @implementation ABC_Class
 @synthesize someId = _someId;
-// CHECK:  define internal %{{.*}}* @"\01-[ABC_Class someId]"
+// CHECK:  define internal ptr @"\01-[ABC_Class someId]"
 // CHECK:  define internal void @"\01-[ABC_Class setSomeId:]"(
 @end
 

diff  --git a/clang/test/CodeGenObjC/blocks-2.m b/clang/test/CodeGenObjC/blocks-2.m
index e8faca3f04bff..1744e2e4e4852 100644
--- a/clang/test/CodeGenObjC/blocks-2.m
+++ b/clang/test/CodeGenObjC/blocks-2.m
@@ -1,11 +1,11 @@
 // We run this twice, once as Objective-C and once as Objective-C++.
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -fobjc-gc -fblocks -fexceptions -triple i386-apple-darwin10 -fobjc-runtime=macosx-fragile-10.5 | FileCheck %s
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -fobjc-gc -fblocks -fexceptions -triple i386-apple-darwin10 -fobjc-runtime=macosx-fragile-10.5 -x objective-c++ | FileCheck %s
+// RUN: %clang_cc1 %s -emit-llvm -o - -fobjc-gc -fblocks -fexceptions -triple i386-apple-darwin10 -fobjc-runtime=macosx-fragile-10.5 | FileCheck %s
+// RUN: %clang_cc1 %s -emit-llvm -o - -fobjc-gc -fblocks -fexceptions -triple i386-apple-darwin10 -fobjc-runtime=macosx-fragile-10.5 -x objective-c++ | FileCheck %s
 
 
-// CHECK: define{{.*}} i8* @{{.*}}test0
+// CHECK: define{{.*}} ptr @{{.*}}test0
 // CHECK: define internal void @{{.*}}_block_invoke(
-// CHECK:      call i8* @objc_assign_strongCast(
+// CHECK:      call ptr @objc_assign_strongCast(
 // CHECK-NEXT: ret void
 id test0(id x) {
   __block id result;
@@ -19,20 +19,18 @@ void test1(void) {
   extern void test1_help(void (^x)(void));
 
   // CHECK:      [[N:%.*]] = alloca [[N_T:%.*]], align 8
-  // CHECK:      [[T0:%.*]] = getelementptr inbounds [[N_T]], [[N_T]]* [[N]], i32 0, i32 4
-  // CHECK-NEXT: store double 1.000000e+{{0?}}01, double* [[T0]], align 8
+  // CHECK:      [[T0:%.*]] = getelementptr inbounds [[N_T]], ptr [[N]], i32 0, i32 4
+  // CHECK-NEXT: store double 1.000000e+{{0?}}01, ptr [[T0]], align 8
   __block double n = 10;
 
   // CHECK:      invoke void @{{.*}}test1_help
   test1_help(^{ n = 20; });
 
-  // CHECK:      [[T1:%.*]] = bitcast [[N_T]]* [[N]] to i8*
-  // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T1]], i32 8)
+  // CHECK: call void @_Block_object_dispose(ptr [[N]], i32 8)
   // CHECK-NEXT: ret void
 
-  // CHECK:      landingpad { i8*, i32 }
+  // CHECK:      landingpad { ptr, i32 }
   // CHECK-NEXT:   cleanup
-  // CHECK:      [[T1:%.*]] = bitcast [[N_T]]* [[N]] to i8*
-  // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T1]], i32 8)
-  // CHECK:      resume { i8*, i32 }
+  // CHECK: call void @_Block_object_dispose(ptr [[N]], i32 8)
+  // CHECK:      resume { ptr, i32 }
 }

diff  --git a/clang/test/CodeGenObjC/builtin-constant-p.m b/clang/test/CodeGenObjC/builtin-constant-p.m
index 97e7509d7f8a2..a85722d368034 100644
--- a/clang/test/CodeGenObjC/builtin-constant-p.m
+++ b/clang/test/CodeGenObjC/builtin-constant-p.m
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -emit-llvm -O3 -disable-llvm-passes -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -O3 -disable-llvm-passes -o - %s | FileCheck %s
 
 // Test that can call `__builtin_constant_p` with instances of 
diff erent
 // Objective-C classes.
@@ -8,21 +8,19 @@
 
 extern void callee(void);
 
-// CHECK-LABEL: define{{.*}} void @test(%0* noundef %foo, %1* noundef %bar)
+// CHECK-LABEL: define{{.*}} void @test(ptr noundef %foo, ptr noundef %bar)
 void test(Foo *foo, Bar *bar) {
-  // CHECK: [[ADDR_FOO:%.*]] = bitcast %0* %{{.*}} to i8*
-  // CHECK-NEXT: call i1 @llvm.is.constant.p0i8(i8* [[ADDR_FOO]])
-  // CHECK: [[ADDR_BAR:%.*]] = bitcast %1* %{{.*}} to i8*
-  // CHECK-NEXT: call i1 @llvm.is.constant.p0i8(i8* [[ADDR_BAR]])
+  // CHECK: call i1 @llvm.is.constant.p0(ptr %{{.*}})
+  // CHECK: call i1 @llvm.is.constant.p0(ptr %{{.*}})
   if (__builtin_constant_p(foo) && __builtin_constant_p(bar))
     callee();
 }
 
 // Test other Objective-C types.
-// CHECK-LABEL: define{{.*}} void @test_more(i8* noundef %object, i8* noundef %klass)
+// CHECK-LABEL: define{{.*}} void @test_more(ptr noundef %object, ptr noundef %klass)
 void test_more(id object, Class klass) {
-  // CHECK: call i1 @llvm.is.constant.p0i8(i8* %{{.*}})
-  // CHECK: call i1 @llvm.is.constant.p0i8(i8* %{{.*}})
+  // CHECK: call i1 @llvm.is.constant.p0(ptr %{{.*}})
+  // CHECK: call i1 @llvm.is.constant.p0(ptr %{{.*}})
   if (__builtin_constant_p(object) && __builtin_constant_p(klass))
     callee();
 }

diff  --git a/clang/test/CodeGenObjC/class-stubs.m b/clang/test/CodeGenObjC/class-stubs.m
index d73b541dd97ba..39a636fb98156 100644
--- a/clang/test/CodeGenObjC/class-stubs.m
+++ b/clang/test/CodeGenObjC/class-stubs.m
@@ -1,28 +1,28 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -Wno-objc-root-class -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -Wno-objc-root-class -emit-llvm -o - %s | FileCheck %s
 
 // -- classref for the message send in main()
 //
 // The class is declared with objc_class_stub, so LSB of the class pointer
 // must be set to 1.
 //
-// CHECK-LABEL: @"OBJC_CLASSLIST_REFERENCES_$_" = internal global i8* getelementptr (i8, i8* bitcast (%struct._class_t* @"OBJC_CLASS_$_Base" to i8*), i32 1), align 8
+// CHECK-LABEL: @"OBJC_CLASSLIST_REFERENCES_$_" = internal global ptr getelementptr (i8, ptr @"OBJC_CLASS_$_Base", i32 1), align 8
 
 // -- classref for the super message send in anotherClassMethod()
 //
 // Metaclasses do not use the "stub" mechanism and are referenced statically.
 //
-// CHECK-LABEL: @"OBJC_CLASSLIST_SUP_REFS_$_" = private global %struct._class_t* @"OBJC_METACLASS_$_Derived", section "__DATA,__objc_superrefs,regular,no_dead_strip", align 8
+// CHECK-LABEL: @"OBJC_CLASSLIST_SUP_REFS_$_" = private global ptr @"OBJC_METACLASS_$_Derived", section "__DATA,__objc_superrefs,regular,no_dead_strip", align 8
 
 // -- classref for the super message send in anotherInstanceMethod()
 //
 // The class is declared with objc_class_stub, so LSB of the class pointer
 // must be set to 1.
 //
-// CHECK-LABEL: @"OBJC_CLASSLIST_SUP_REFS_$_.1" = private global i8* getelementptr (i8, i8* bitcast (%struct._class_t* @"OBJC_CLASS_$_Derived" to i8*), i32 1), section "__DATA,__objc_superrefs,regular,no_dead_strip", align 8
+// CHECK-LABEL: @"OBJC_CLASSLIST_SUP_REFS_$_.1" = private global ptr getelementptr (i8, ptr @"OBJC_CLASS_$_Derived", i32 1), section "__DATA,__objc_superrefs,regular,no_dead_strip", align 8
 
 // -- category list for class stubs goes in __objc_catlist2.
 //
-// CHECK-LABEL: @"OBJC_LABEL_STUB_CATEGORY_$" = private global [1 x i8*] [i8* bitcast (%struct._category_t* @"_OBJC_$_CATEGORY_Derived_$_MyCategory" to i8*)], section "__DATA,__objc_catlist2,regular,no_dead_strip", align 8
+// CHECK-LABEL: @"OBJC_LABEL_STUB_CATEGORY_$" = private global [1 x ptr] [ptr @"_OBJC_$_CATEGORY_Derived_$_MyCategory"], section "__DATA,__objc_catlist2,regular,no_dead_strip", align 8
 
 __attribute__((objc_class_stub))
 __attribute__((objc_subclassing_restricted))
@@ -41,13 +41,12 @@ int main(void) {
 }
 // CHECK-LABEL: define{{.*}} i32 @main()
 // CHECK-NEXT: entry:
-// CHECK-NEXT:   [[CLASS:%.*]] = call %struct._class_t* @objc_loadClassref(i8** @"OBJC_CLASSLIST_REFERENCES_$_")
-// CHECK-NEXT:   [[RECEIVER:%.*]] = bitcast %struct._class_t* [[CLASS]] to i8*
-// CHECK-NEXT:   [[SELECTOR:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_
-// CHECK-NEXT:   call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*)*)(i8* noundef [[RECEIVER]], i8* noundef [[SELECTOR]])
+// CHECK-NEXT:   [[CLASS:%.*]] = call ptr @objc_loadClassref(ptr @"OBJC_CLASSLIST_REFERENCES_$_")
+// CHECK-NEXT:   [[SELECTOR:%.*]] = load ptr, ptr @OBJC_SELECTOR_REFERENCES_
+// CHECK-NEXT:   call void @objc_msgSend(ptr noundef [[CLASS]], ptr noundef [[SELECTOR]])
 // CHECK-NEXT:   ret i32 0
 
-// CHECK-LABEL: declare extern_weak %struct._class_t* @objc_loadClassref(i8**)
+// CHECK-LABEL: declare extern_weak ptr @objc_loadClassref(ptr)
 // CHECK-SAME: [[ATTRLIST:#.*]]
 
 @implementation Derived (MyCategory)
@@ -55,27 +54,25 @@ @implementation Derived (MyCategory)
 + (void) anotherClassMethod {
   [super classMethod];
 }
-// CHECK-LABEL: define internal void @"\01+[Derived(MyCategory) anotherClassMethod]"(i8* noundef %self, i8* noundef %_cmd) #0 {
+// CHECK-LABEL: define internal void @"\01+[Derived(MyCategory) anotherClassMethod]"(ptr noundef %self, ptr noundef %_cmd) #0 {
 // CHECK-NEXT: entry:
 // CHECK:        [[SUPER:%.*]] = alloca %struct._objc_super, align 8
-// CHECK:        [[METACLASS_REF:%.*]] = load %struct._class_t*, %struct._class_t** @"OBJC_CLASSLIST_SUP_REFS_$_", align 8
-// CHECK:        [[CAST_METACLASS_REF:%.*]] = bitcast %struct._class_t* [[METACLASS_REF]] to i8*
-// CHECK:        [[DEST:%.*]] = getelementptr inbounds %struct._objc_super, %struct._objc_super* [[SUPER]], i32 0, i32 1
-// CHECK:        store i8* [[CAST_METACLASS_REF]], i8** [[DEST]], align 8
-// CHECK:        call void bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper2 to void (%struct._objc_super*, i8*)*)(%struct._objc_super* noundef [[SUPER]], i8* noundef {{%.*}})
+// CHECK:        [[METACLASS_REF:%.*]] = load ptr, ptr @"OBJC_CLASSLIST_SUP_REFS_$_", align 8
+// CHECK:        [[DEST:%.*]] = getelementptr inbounds %struct._objc_super, ptr [[SUPER]], i32 0, i32 1
+// CHECK:        store ptr [[METACLASS_REF]], ptr [[DEST]], align 8
+// CHECK:        call void @objc_msgSendSuper2(ptr noundef [[SUPER]], ptr noundef {{%.*}})
 // CHECK:        ret void
 
 - (void) anotherInstanceMethod {
   [super instanceMethod];
 }
-// CHECK-LABEL: define internal void @"\01-[Derived(MyCategory) anotherInstanceMethod]"(%0* noundef %self, i8* noundef %_cmd) #0 {
+// CHECK-LABEL: define internal void @"\01-[Derived(MyCategory) anotherInstanceMethod]"(ptr noundef %self, ptr noundef %_cmd) #0 {
 // CHECK-NEXT: entry:
 // CHECK:        [[SUPER:%.*]] = alloca %struct._objc_super, align 8
-// CHECK:        [[CLASS_REF:%.*]] = call %struct._class_t* @objc_loadClassref(i8** @"OBJC_CLASSLIST_SUP_REFS_$_.1")
-// CHECK:        [[CAST_CLASS_REF:%.*]] = bitcast %struct._class_t* [[CLASS_REF]] to i8*
-// CHECK:        [[DEST:%.*]] = getelementptr inbounds %struct._objc_super, %struct._objc_super* [[SUPER]], i32 0, i32 1
-// CHECK:        store i8* [[CAST_CLASS_REF]], i8** [[DEST]], align 8
-// CHECK:        call void bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper2 to void (%struct._objc_super*, i8*)*)(%struct._objc_super* noundef [[SUPER]], i8* noundef {{%.*}})
+// CHECK:        [[CLASS_REF:%.*]] = call ptr @objc_loadClassref(ptr @"OBJC_CLASSLIST_SUP_REFS_$_.1")
+// CHECK:        [[DEST:%.*]] = getelementptr inbounds %struct._objc_super, ptr [[SUPER]], i32 0, i32 1
+// CHECK:        store ptr [[CLASS_REF]], ptr [[DEST]], align 8
+// CHECK:        call void @objc_msgSendSuper2(ptr noundef [[SUPER]], ptr noundef {{%.*}})
 // CHECK:        ret void
 
 @end

diff  --git a/clang/test/Index/pipe-size.cl b/clang/test/Index/pipe-size.cl
index 6b4e8b7bc2551..0a384db00ba66 100644
--- a/clang/test/Index/pipe-size.cl
+++ b/clang/test/Index/pipe-size.cl
@@ -1,16 +1,16 @@
-// RUN: %clang_cc1 -no-opaque-pointers -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple x86_64-unknown-linux-gnu %s -o - | FileCheck %s --check-prefix=X86
-// RUN: %clang_cc1 -no-opaque-pointers -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple spir-unknown-unknown %s -o - | FileCheck %s --check-prefix=SPIR
-// RUN: %clang_cc1 -no-opaque-pointers -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple spir64-unknown-unknown %s -o - | FileCheck %s --check-prefix=SPIR64
-// RUN: %clang_cc1 -no-opaque-pointers -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple amdgcn-amd-amdhsa %s -o - | FileCheck %s --check-prefix=AMDGCN
+// RUN: %clang_cc1 -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple x86_64-unknown-linux-gnu %s -o - | FileCheck %s --check-prefix=X86
+// RUN: %clang_cc1 -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple spir-unknown-unknown %s -o - | FileCheck %s --check-prefix=SPIR
+// RUN: %clang_cc1 -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple spir64-unknown-unknown %s -o - | FileCheck %s --check-prefix=SPIR64
+// RUN: %clang_cc1 -x cl -O0 -cl-std=CL2.0 -emit-llvm -triple amdgcn-amd-amdhsa %s -o - | FileCheck %s --check-prefix=AMDGCN
 __kernel void testPipe( pipe int test )
 {
     int s = sizeof(test);
-    // X86: store %opencl.pipe_ro_t* %test, %opencl.pipe_ro_t** %test.addr, align 8
-    // X86: store i32 8, i32* %s, align 4
-    // SPIR: store %opencl.pipe_ro_t addrspace(1)* %test, %opencl.pipe_ro_t addrspace(1)** %test.addr, align 4
-    // SPIR: store i32 4, i32* %s, align 4
-    // SPIR64: store %opencl.pipe_ro_t addrspace(1)* %test, %opencl.pipe_ro_t addrspace(1)** %test.addr, align 8
-    // SPIR64: store i32 8, i32* %s, align 4
-    // AMDGCN: store %opencl.pipe_ro_t addrspace(1)* %test, %opencl.pipe_ro_t addrspace(1)* addrspace(5)* %test.addr, align 8
-    // AMDGCN: store i32 8, i32 addrspace(5)* %s, align 4
+    // X86: store ptr %test, ptr %test.addr, align 8
+    // X86: store i32 8, ptr %s, align 4
+    // SPIR: store ptr addrspace(1) %test, ptr %test.addr, align 4
+    // SPIR: store i32 4, ptr %s, align 4
+    // SPIR64: store ptr addrspace(1) %test, ptr %test.addr, align 8
+    // SPIR64: store i32 8, ptr %s, align 4
+    // AMDGCN: store ptr addrspace(1) %test, ptr addrspace(5) %test.addr, align 8
+    // AMDGCN: store i32 8, ptr addrspace(5) %s, align 4
 }

diff  --git a/clang/test/OpenMP/target_map_codegen_hold.cpp b/clang/test/OpenMP/target_map_codegen_hold.cpp
index a5bd05536357f..7008d32d76cad 100644
--- a/clang/test/OpenMP/target_map_codegen_hold.cpp
+++ b/clang/test/OpenMP/target_map_codegen_hold.cpp
@@ -9,14 +9,14 @@
 
 // powerpc64le-ibm-linux-gnu
 
-// RUN: %clang_cc1 -no-opaque-pointers -DUSE -verify -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -DUSE -verify -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ \
 // RUN:     -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | \
 // RUN:   FileCheck %s --check-prefixes=CHECK-USE-PPC64LE
-// RUN: %clang_cc1 -no-opaque-pointers -DUSE -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -DUSE -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 \
 // RUN:     -triple powerpc64le-unknown-unknown -emit-pch -o %t %s
-// RUN: %clang_cc1 -no-opaque-pointers -DUSE -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -DUSE -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ \
 // RUN:     -triple powerpc64le-unknown-unknown -std=c++11 -include-pch %t \
 // RUN:     -verify %s -emit-llvm -o - | \
@@ -24,14 +24,14 @@
 
 // i386-pc-linux-gnu
 
-// RUN: %clang_cc1 -no-opaque-pointers -DUSE -verify -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -DUSE -verify -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=i386-pc-linux-gnu -x c++ \
 // RUN:     -triple i386-unknown-unknown -emit-llvm %s -o - | \
 // RUN:   FileCheck %s --check-prefixes=CHECK-USE-I386
-// RUN: %clang_cc1 -no-opaque-pointers -DUSE -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -DUSE -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=i386-pc-linux-gnu -x c++ -std=c++11 \
 // RUN:     -triple i386-unknown-unknown -emit-pch -o %t %s
-// RUN: %clang_cc1 -no-opaque-pointers -DUSE -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -DUSE -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=i386-pc-linux-gnu -x c++ \
 // RUN:     -triple i386-unknown-unknown -std=c++11 -include-pch %t -verify %s \
 // RUN:     -emit-llvm -o - | \
@@ -43,14 +43,14 @@
 
 // powerpc64le-ibm-linux-gnu
 
-// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ \
 // RUN:     -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | \
 // RUN:   FileCheck %s --check-prefixes=CHECK-NOUSE-PPC64LE
-// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 \
 // RUN:     -triple powerpc64le-unknown-unknown -emit-pch -o %t %s
-// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ \
 // RUN:     -triple powerpc64le-unknown-unknown -std=c++11 -include-pch %t \
 // RUN:     -verify %s -emit-llvm -o - | \
@@ -58,14 +58,14 @@
 
 // i386-pc-linux-gnu
 
-// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=i386-pc-linux-gnu -x c++ \
 // RUN:     -triple i386-unknown-unknown -emit-llvm %s -o - | \
 // RUN:   FileCheck %s --check-prefixes=CHECK-NOUSE-I386
-// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=i386-pc-linux-gnu -x c++ -std=c++11 \
 // RUN:     -triple i386-unknown-unknown -emit-pch -o %t %s
-// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-extensions \
+// RUN: %clang_cc1 -fopenmp -fopenmp-extensions \
 // RUN:     -fopenmp-targets=i386-pc-linux-gnu -x c++ \
 // RUN:     -triple i386-unknown-unknown -std=c++11 -include-pch %t -verify %s \
 // RUN:     -emit-llvm -o - | \
@@ -169,327 +169,297 @@ void ST::test_present_members() {
 // CHECK-USE-PPC64LE-NEXT:    [[A:%.*]] = alloca i32, align 4
 // CHECK-USE-PPC64LE-NEXT:    [[ST1:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[ST2:%.*]] = alloca [[STRUCT_ST]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x ptr], align 8
 // CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[II]], i32* [[II_ADDR]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load i32, i32* [[II_ADDR]], align 4
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[TMP0]], i32* [[A]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP2:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP1]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
-// CHECK-USE-PPC64LE-NEXT:    [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[II]], ptr [[II_ADDR]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load i32, ptr [[II_ADDR]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[TMP0]], ptr [[A]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP1]] to i64
+// CHECK-USE-PPC64LE-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-USE-PPC64LE-NEXT:    [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
-// CHECK-USE-PPC64LE-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-USE-PPC64LE-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[J2]], i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP9:%.*]] = bitcast i32* [[I1]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP8]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    [[TMP11:%.*]] = ptrtoint i8* [[TMP10]] to i64
-// CHECK-USE-PPC64LE-NEXT:    [[TMP12:%.*]] = ptrtoint i8* [[TMP9]] to i64
+// CHECK-USE-PPC64LE-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-USE-PPC64LE-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[J2]], i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP8]] to i64
+// CHECK-USE-PPC64LE-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[I1]] to i64
 // CHECK-USE-PPC64LE-NEXT:    [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
-// CHECK-USE-PPC64LE-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-USE-PPC64LE-NEXT:    [[TMP15:%.*]] = bitcast [7 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP15]], i8* align 8 bitcast ([7 x i64]* @.offload_sizes to i8*), i64 56, i1 false)
-// CHECK-USE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP17]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP19]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP21]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP23]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP25]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP26]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[J]], i32** [[TMP30]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP31]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
-// CHECK-USE-PPC64LE-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP33]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
-// CHECK-USE-PPC64LE-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP35]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP36]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP38]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[I1]], i32** [[TMP40]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
-// CHECK-USE-PPC64LE-NEXT:    store i64 [[TMP14]], i64* [[TMP41]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP42]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
-// CHECK-USE-PPC64LE-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP44]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
-// CHECK-USE-PPC64LE-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[I1]], i32** [[TMP46]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP47]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
-// CHECK-USE-PPC64LE-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP49]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6
-// CHECK-USE-PPC64LE-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[J2]], i32** [[TMP51]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP52]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-USE-PPC64LE-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 56, i1 false)
+// CHECK-USE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[TMP16]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP18]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store i64 [[TMP7]], ptr [[TMP20]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP21]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[TMP22]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP24]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP26]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[TMP27]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[J]], ptr [[TMP29]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP31]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP32]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP34]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP36]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[TMP37]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[I1]], ptr [[TMP39]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4
+// CHECK-USE-PPC64LE-NEXT:    store i64 [[TMP14]], ptr [[TMP41]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP42]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[TMP43]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 5
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[I1]], ptr [[TMP45]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP47]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[TMP48]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 6
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[J2]], ptr [[TMP50]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP52]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-USE-PPC64LE-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    store i32 2, i32* [[TMP56]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 7, i32* [[TMP57]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    store i8** [[TMP53]], i8*** [[TMP58]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-USE-PPC64LE-NEXT:    store i8** [[TMP54]], i8*** [[TMP59]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-USE-PPC64LE-NEXT:    store i64* [[TMP55]], i64** [[TMP60]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-USE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP61]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-USE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP62]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-USE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP63]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-USE-PPC64LE-NEXT:    store i64 0, i64* [[TMP64]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-USE-PPC64LE-NEXT:    store i64 0, i64* [[TMP65]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP66]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP67]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-USE-PPC64LE-NEXT:    store i32 0, i32* [[TMP68]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-USE-PPC64LE-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store i32 2, ptr [[TMP56]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store i32 7, ptr [[TMP57]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP53]], ptr [[TMP58]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP54]], ptr [[TMP59]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP55]], ptr [[TMP60]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-USE-PPC64LE-NEXT:    store ptr @.offload_maptypes, ptr [[TMP61]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP62]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP63]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-USE-PPC64LE-NEXT:    store i64 0, ptr [[TMP64]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-USE-PPC64LE-NEXT:    store i64 0, ptr [[TMP65]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP66]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP67]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-USE-PPC64LE-NEXT:    store i32 0, ptr [[TMP68]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-USE-PPC64LE-NEXT:    [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0
 // CHECK-USE-PPC64LE-NEXT:    br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-USE-PPC64LE:       omp_offload.failed:
-// CHECK-USE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(%struct.ST* [[ST1]], i32* [[A]], %struct.ST* [[ST2]]) #[[ATTR3:[0-9]+]]
+// CHECK-USE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(ptr [[ST1]], ptr [[A]], ptr [[ST2]]) #[[ATTR3:[0-9]+]]
 // CHECK-USE-PPC64LE-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 // CHECK-USE-PPC64LE:       omp_offload.cont:
-// CHECK-USE-PPC64LE-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP72]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP74]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP75]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP71]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP73]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP75]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
 // CHECK-USE-PPC64LE-NEXT:    [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    store i32 2, i32* [[TMP78]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 1, i32* [[TMP79]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    store i8** [[TMP76]], i8*** [[TMP80]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 3
-// CHECK-USE-PPC64LE-NEXT:    store i8** [[TMP77]], i8*** [[TMP81]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 4
-// CHECK-USE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64** [[TMP82]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 5
-// CHECK-USE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP83]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 6
-// CHECK-USE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP84]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 7
-// CHECK-USE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP85]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 8
-// CHECK-USE-PPC64LE-NEXT:    store i64 0, i64* [[TMP86]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 9
-// CHECK-USE-PPC64LE-NEXT:    store i64 0, i64* [[TMP87]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 10
-// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP88]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 11
-// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP89]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 12
-// CHECK-USE-PPC64LE-NEXT:    store i32 0, i32* [[TMP90]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]])
+// CHECK-USE-PPC64LE-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store i32 2, ptr [[TMP78]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store i32 1, ptr [[TMP79]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP76]], ptr [[TMP80]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP77]], ptr [[TMP81]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4
+// CHECK-USE-PPC64LE-NEXT:    store ptr @.offload_sizes.1, ptr [[TMP82]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5
+// CHECK-USE-PPC64LE-NEXT:    store ptr @.offload_maptypes.2, ptr [[TMP83]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP84]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP85]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8
+// CHECK-USE-PPC64LE-NEXT:    store i64 0, ptr [[TMP86]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9
+// CHECK-USE-PPC64LE-NEXT:    store i64 0, ptr [[TMP87]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10
+// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP88]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11
+// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP89]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12
+// CHECK-USE-PPC64LE-NEXT:    store i32 0, ptr [[TMP90]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS6]])
 // CHECK-USE-PPC64LE-NEXT:    [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0
 // CHECK-USE-PPC64LE-NEXT:    br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
 // CHECK-USE-PPC64LE:       omp_offload.failed7:
-// CHECK-USE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(i32* [[A]]) #[[ATTR3]]
+// CHECK-USE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(ptr [[A]]) #[[ATTR3]]
 // CHECK-USE-PPC64LE-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
 // CHECK-USE-PPC64LE:       omp_offload.cont8:
 // CHECK-USE-PPC64LE-NEXT:    ret void
 //
 //
 // CHECK-USE-PPC64LE-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}
-// CHECK-USE-PPC64LE-SAME: (%struct.ST* noundef nonnull align 4 dereferenceable(8) [[ST1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %struct.ST* noundef nonnull align 4 dereferenceable(8) [[ST2:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK-USE-PPC64LE-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[ST1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[ST2:%.*]]) #[[ATTR1:[0-9]+]] {
 // CHECK-USE-PPC64LE-NEXT:  entry:
-// CHECK-USE-PPC64LE-NEXT:    [[ST1_ADDR:%.*]] = alloca %struct.ST*, align 8
-// CHECK-USE-PPC64LE-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
-// CHECK-USE-PPC64LE-NEXT:    [[ST2_ADDR:%.*]] = alloca %struct.ST*, align 8
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[ST1_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[ST2_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[ST1_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP2:%.*]] = load %struct.ST*, %struct.ST** [[ST2_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[TMP0]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP3:%.*]] = load i32, i32* [[I]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[ST1_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-USE-PPC64LE-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-USE-PPC64LE-NEXT:    [[ST2_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[ST1_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[ST2_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ST1_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[ST2_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP3:%.*]] = load i32, ptr [[I]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP3]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC]], i32* [[I]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC]], ptr [[I]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC1:%.*]] = add nsw i32 [[TMP4]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC1]], i32* [[TMP1]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP0]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP5:%.*]] = load i32, i32* [[J]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC1]], ptr [[TMP1]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP0]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP5:%.*]] = load i32, ptr [[J]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP5]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC2]], i32* [[J]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[I3:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP2]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP6:%.*]] = load i32, i32* [[I3]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC2]], ptr [[J]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[I3:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP2]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP6:%.*]] = load i32, ptr [[I3]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC4:%.*]] = add nsw i32 [[TMP6]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC4]], i32* [[I3]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[J5:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP2]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP7:%.*]] = load i32, i32* [[J5]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC4]], ptr [[I3]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[J5:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP2]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP7:%.*]] = load i32, ptr [[J5]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC6:%.*]] = add nsw i32 [[TMP7]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC6]], i32* [[J5]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC6]], ptr [[J5]], align 4
 // CHECK-USE-PPC64LE-NEXT:    ret void
 //
 //
 // CHECK-USE-PPC64LE-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}
-// CHECK-USE-PPC64LE-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
+// CHECK-USE-PPC64LE-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
 // CHECK-USE-PPC64LE-NEXT:  entry:
-// CHECK-USE-PPC64LE-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC]], ptr [[TMP0]], align 4
 // CHECK-USE-PPC64LE-NEXT:    ret void
 //
 //
 // CHECK-USE-PPC64LE-LABEL: define {{[^@]+}}@_ZN2ST20test_present_membersEv
-// CHECK-USE-PPC64LE-SAME: (%struct.ST* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
+// CHECK-USE-PPC64LE-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
 // CHECK-USE-PPC64LE-NEXT:  entry:
-// CHECK-USE-PPC64LE-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.ST*, align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 8
 // CHECK-USE-PPC64LE-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[THIS]], %struct.ST** [[THIS_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[THIS1]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    [[TMP2:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    [[TMP3:%.*]] = ptrtoint i8* [[TMP2]] to i64
-// CHECK-USE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[THIS1]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+// CHECK-USE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-USE-PPC64LE-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
-// CHECK-USE-PPC64LE-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-USE-PPC64LE-NEXT:    [[TMP7:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-USE-PPC64LE-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP7]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes.3 to i8*), i64 24, i1 false)
-// CHECK-USE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP9]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP11]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    store i64 [[TMP6]], i64* [[TMP12]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP13]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP15]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP17]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP18]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST**
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32**
-// CHECK-USE-PPC64LE-NEXT:    store i32* [[J]], i32** [[TMP22]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
-// CHECK-USE-PPC64LE-NEXT:    store i8* null, i8** [[TMP23]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-USE-PPC64LE-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes.3, i64 24, i1 false)
+// CHECK-USE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[THIS1]], ptr [[TMP8]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP10]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store i64 [[TMP6]], ptr [[TMP12]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP13]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[THIS1]], ptr [[TMP14]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP16]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP18]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[THIS1]], ptr [[TMP19]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[J]], ptr [[TMP21]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP23]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-USE-PPC64LE-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    store i32 2, i32* [[TMP27]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 3, i32* [[TMP28]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-USE-PPC64LE-NEXT:    store i8** [[TMP24]], i8*** [[TMP29]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-USE-PPC64LE-NEXT:    store i8** [[TMP25]], i8*** [[TMP30]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-USE-PPC64LE-NEXT:    store i64* [[TMP26]], i64** [[TMP31]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-USE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP32]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-USE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP33]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-USE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP34]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-USE-PPC64LE-NEXT:    store i64 0, i64* [[TMP35]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-USE-PPC64LE-NEXT:    store i64 0, i64* [[TMP36]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP37]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP38]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-USE-PPC64LE-NEXT:    store i32 0, i32* [[TMP39]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-USE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    store i32 2, ptr [[TMP27]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    store i32 3, ptr [[TMP28]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP24]], ptr [[TMP29]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP25]], ptr [[TMP30]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[TMP26]], ptr [[TMP31]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-USE-PPC64LE-NEXT:    store ptr @.offload_maptypes.4, ptr [[TMP32]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP33]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-USE-PPC64LE-NEXT:    store ptr null, ptr [[TMP34]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-USE-PPC64LE-NEXT:    store i64 0, ptr [[TMP35]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-USE-PPC64LE-NEXT:    store i64 0, ptr [[TMP36]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP37]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-USE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP38]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-USE-PPC64LE-NEXT:    store i32 0, ptr [[TMP39]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-USE-PPC64LE-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
 // CHECK-USE-PPC64LE-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-USE-PPC64LE:       omp_offload.failed:
-// CHECK-USE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}(%struct.ST* [[THIS1]]) #[[ATTR3]]
+// CHECK-USE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}(ptr [[THIS1]]) #[[ATTR3]]
 // CHECK-USE-PPC64LE-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 // CHECK-USE-PPC64LE:       omp_offload.cont:
 // CHECK-USE-PPC64LE-NEXT:    ret void
 //
 //
 // CHECK-USE-PPC64LE-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}
-// CHECK-USE-PPC64LE-SAME: (%struct.ST* noundef [[THIS:%.*]]) #[[ATTR1]] {
+// CHECK-USE-PPC64LE-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
 // CHECK-USE-PPC64LE-NEXT:  entry:
-// CHECK-USE-PPC64LE-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.ST*, align 8
-// CHECK-USE-PPC64LE-NEXT:    store %struct.ST* [[THIS]], %struct.ST** [[THIS_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 8
-// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[TMP0]], i32 0, i32 0
-// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-USE-PPC64LE-NEXT:    store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK-USE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK-USE-PPC64LE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[I]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC]], i32* [[I]], align 4
-// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP0]], i32 0, i32 1
-// CHECK-USE-PPC64LE-NEXT:    [[TMP2:%.*]] = load i32, i32* [[J]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC]], ptr [[I]], align 4
+// CHECK-USE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP0]], i32 0, i32 1
+// CHECK-USE-PPC64LE-NEXT:    [[TMP2:%.*]] = load i32, ptr [[J]], align 4
 // CHECK-USE-PPC64LE-NEXT:    [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
-// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC1]], i32* [[J]], align 4
+// CHECK-USE-PPC64LE-NEXT:    store i32 [[INC1]], ptr [[J]], align 4
 // CHECK-USE-PPC64LE-NEXT:    ret void
 //
 //
@@ -507,327 +477,297 @@ void ST::test_present_members() {
 // CHECK-USE-I386-NEXT:    [[A:%.*]] = alloca i32, align 4
 // CHECK-USE-I386-NEXT:    [[ST1:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
 // CHECK-USE-I386-NEXT:    [[ST2:%.*]] = alloca [[STRUCT_ST]], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x ptr], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x ptr], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x ptr], align 4
 // CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
-// CHECK-USE-I386-NEXT:    store i32 [[II]], i32* [[II_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load i32, i32* [[II_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    store i32 [[TMP0]], i32* [[A]], align 4
-// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-USE-I386-NEXT:    [[TMP2:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-USE-I386-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP1]] to i8*
-// CHECK-USE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
-// CHECK-USE-I386-NEXT:    [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[II]], ptr [[II_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load i32, ptr [[II_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[TMP0]], ptr [[A]], align 4
+// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-USE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP1]] to i64
+// CHECK-USE-I386-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-USE-I386-NEXT:    [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
-// CHECK-USE-I386-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-USE-I386-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[J2]], i32 1
-// CHECK-USE-I386-NEXT:    [[TMP9:%.*]] = bitcast i32* [[I1]] to i8*
-// CHECK-USE-I386-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP8]] to i8*
-// CHECK-USE-I386-NEXT:    [[TMP11:%.*]] = ptrtoint i8* [[TMP10]] to i64
-// CHECK-USE-I386-NEXT:    [[TMP12:%.*]] = ptrtoint i8* [[TMP9]] to i64
+// CHECK-USE-I386-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-USE-I386-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[J2]], i32 1
+// CHECK-USE-I386-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP8]] to i64
+// CHECK-USE-I386-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[I1]] to i64
 // CHECK-USE-I386-NEXT:    [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
-// CHECK-USE-I386-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-USE-I386-NEXT:    [[TMP15:%.*]] = bitcast [7 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-USE-I386-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP15]], i8* align 4 bitcast ([7 x i64]* @.offload_sizes to i8*), i32 56, i1 false)
-// CHECK-USE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP17]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[I]], i32** [[TMP19]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP21]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP23]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[I]], i32** [[TMP25]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP26]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[J]], i32** [[TMP30]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP31]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
-// CHECK-USE-I386-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[A]], i32** [[TMP33]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
-// CHECK-USE-I386-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[A]], i32** [[TMP35]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP36]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP38]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[I1]], i32** [[TMP40]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    store i64 [[TMP14]], i64* [[TMP41]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP42]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
-// CHECK-USE-I386-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP44]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
-// CHECK-USE-I386-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[I1]], i32** [[TMP46]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP47]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
-// CHECK-USE-I386-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP49]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6
-// CHECK-USE-I386-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[J2]], i32** [[TMP51]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP52]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-USE-I386-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 56, i1 false)
+// CHECK-USE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr [[ST1]], ptr [[TMP16]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr [[I]], ptr [[TMP18]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store i64 [[TMP7]], ptr [[TMP20]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP21]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store ptr [[ST1]], ptr [[TMP22]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store ptr [[I]], ptr [[TMP24]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP26]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[ST1]], ptr [[TMP27]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[J]], ptr [[TMP29]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP31]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK-USE-I386-NEXT:    store ptr [[A]], ptr [[TMP32]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK-USE-I386-NEXT:    store ptr [[A]], ptr [[TMP34]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP36]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store ptr [[ST2]], ptr [[TMP37]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store ptr [[I1]], ptr [[TMP39]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store i64 [[TMP14]], ptr [[TMP41]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP42]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
+// CHECK-USE-I386-NEXT:    store ptr [[ST2]], ptr [[TMP43]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 5
+// CHECK-USE-I386-NEXT:    store ptr [[I1]], ptr [[TMP45]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP47]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
+// CHECK-USE-I386-NEXT:    store ptr [[ST2]], ptr [[TMP48]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 6
+// CHECK-USE-I386-NEXT:    store ptr [[J2]], ptr [[TMP50]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP52]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-USE-I386-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i32 2, i32* [[TMP56]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    store i32 7, i32* [[TMP57]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    store i8** [[TMP53]], i8*** [[TMP58]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-USE-I386-NEXT:    store i8** [[TMP54]], i8*** [[TMP59]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    store i64* [[TMP55]], i64** [[TMP60]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-USE-I386-NEXT:    store i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP61]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-USE-I386-NEXT:    store i8** null, i8*** [[TMP62]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-USE-I386-NEXT:    store i8** null, i8*** [[TMP63]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-USE-I386-NEXT:    store i64 0, i64* [[TMP64]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-USE-I386-NEXT:    store i64 0, i64* [[TMP65]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-USE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP66]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-USE-I386-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP67]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-USE-I386-NEXT:    store i32 0, i32* [[TMP68]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-USE-I386-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store i32 2, ptr [[TMP56]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store i32 7, ptr [[TMP57]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[TMP53]], ptr [[TMP58]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-USE-I386-NEXT:    store ptr [[TMP54]], ptr [[TMP59]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store ptr [[TMP55]], ptr [[TMP60]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-USE-I386-NEXT:    store ptr @.offload_maptypes, ptr [[TMP61]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP62]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP63]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-USE-I386-NEXT:    store i64 0, ptr [[TMP64]], align 8
+// CHECK-USE-I386-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-USE-I386-NEXT:    store i64 0, ptr [[TMP65]], align 8
+// CHECK-USE-I386-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-USE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP66]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-USE-I386-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP67]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-USE-I386-NEXT:    store i32 0, ptr [[TMP68]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-USE-I386-NEXT:    [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0
 // CHECK-USE-I386-NEXT:    br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-USE-I386:       omp_offload.failed:
-// CHECK-USE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(%struct.ST* [[ST1]], i32* [[A]], %struct.ST* [[ST2]]) #[[ATTR3:[0-9]+]]
+// CHECK-USE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(ptr [[ST1]], ptr [[A]], ptr [[ST2]]) #[[ATTR3:[0-9]+]]
 // CHECK-USE-I386-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 // CHECK-USE-I386:       omp_offload.cont:
-// CHECK-USE-I386-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[A]], i32** [[TMP72]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[A]], i32** [[TMP74]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP75]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr [[A]], ptr [[TMP71]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr [[A]], ptr [[TMP73]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP75]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
 // CHECK-USE-I386-NEXT:    [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i32 2, i32* [[TMP78]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    store i32 1, i32* [[TMP79]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    store i8** [[TMP76]], i8*** [[TMP80]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 3
-// CHECK-USE-I386-NEXT:    store i8** [[TMP77]], i8*** [[TMP81]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64** [[TMP82]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 5
-// CHECK-USE-I386-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP83]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 6
-// CHECK-USE-I386-NEXT:    store i8** null, i8*** [[TMP84]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 7
-// CHECK-USE-I386-NEXT:    store i8** null, i8*** [[TMP85]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 8
-// CHECK-USE-I386-NEXT:    store i64 0, i64* [[TMP86]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 9
-// CHECK-USE-I386-NEXT:    store i64 0, i64* [[TMP87]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 10
-// CHECK-USE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP88]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 11
-// CHECK-USE-I386-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP89]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 12
-// CHECK-USE-I386-NEXT:    store i32 0, i32* [[TMP90]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]])
+// CHECK-USE-I386-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store i32 2, ptr [[TMP78]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store i32 1, ptr [[TMP79]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[TMP76]], ptr [[TMP80]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3
+// CHECK-USE-I386-NEXT:    store ptr [[TMP77]], ptr [[TMP81]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store ptr @.offload_sizes.1, ptr [[TMP82]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5
+// CHECK-USE-I386-NEXT:    store ptr @.offload_maptypes.2, ptr [[TMP83]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP84]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP85]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8
+// CHECK-USE-I386-NEXT:    store i64 0, ptr [[TMP86]], align 8
+// CHECK-USE-I386-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9
+// CHECK-USE-I386-NEXT:    store i64 0, ptr [[TMP87]], align 8
+// CHECK-USE-I386-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10
+// CHECK-USE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP88]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11
+// CHECK-USE-I386-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP89]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12
+// CHECK-USE-I386-NEXT:    store i32 0, ptr [[TMP90]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS6]])
 // CHECK-USE-I386-NEXT:    [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0
 // CHECK-USE-I386-NEXT:    br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
 // CHECK-USE-I386:       omp_offload.failed7:
-// CHECK-USE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(i32* [[A]]) #[[ATTR3]]
+// CHECK-USE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}(ptr [[A]]) #[[ATTR3]]
 // CHECK-USE-I386-NEXT:    br label [[OMP_OFFLOAD_CONT8]]
 // CHECK-USE-I386:       omp_offload.cont8:
 // CHECK-USE-I386-NEXT:    ret void
 //
 //
 // CHECK-USE-I386-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}
-// CHECK-USE-I386-SAME: (%struct.ST* noundef nonnull align 4 dereferenceable(8) [[ST1:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], %struct.ST* noundef nonnull align 4 dereferenceable(8) [[ST2:%.*]]) #[[ATTR1:[0-9]+]] {
+// CHECK-USE-I386-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[ST1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[ST2:%.*]]) #[[ATTR1:[0-9]+]] {
 // CHECK-USE-I386-NEXT:  entry:
-// CHECK-USE-I386-NEXT:    [[ST1_ADDR:%.*]] = alloca %struct.ST*, align 4
-// CHECK-USE-I386-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
-// CHECK-USE-I386-NEXT:    [[ST2_ADDR:%.*]] = alloca %struct.ST*, align 4
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[ST1_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[ST2_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[ST1_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP2:%.*]] = load %struct.ST*, %struct.ST** [[ST2_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[TMP0]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP3:%.*]] = load i32, i32* [[I]], align 4
+// CHECK-USE-I386-NEXT:    [[ST1_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-USE-I386-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-USE-I386-NEXT:    [[ST2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-USE-I386-NEXT:    store ptr [[ST1]], ptr [[ST1_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    store ptr [[ST2]], ptr [[ST2_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ST1_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[ST2_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP3:%.*]] = load i32, ptr [[I]], align 4
 // CHECK-USE-I386-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP3]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC]], i32* [[I]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC]], ptr [[I]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4
 // CHECK-USE-I386-NEXT:    [[INC1:%.*]] = add nsw i32 [[TMP4]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC1]], i32* [[TMP1]], align 4
-// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP0]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP5:%.*]] = load i32, i32* [[J]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC1]], ptr [[TMP1]], align 4
+// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP0]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    [[TMP5:%.*]] = load i32, ptr [[J]], align 4
 // CHECK-USE-I386-NEXT:    [[INC2:%.*]] = add nsw i32 [[TMP5]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC2]], i32* [[J]], align 4
-// CHECK-USE-I386-NEXT:    [[I3:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP2]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP6:%.*]] = load i32, i32* [[I3]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC2]], ptr [[J]], align 4
+// CHECK-USE-I386-NEXT:    [[I3:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP2]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP6:%.*]] = load i32, ptr [[I3]], align 4
 // CHECK-USE-I386-NEXT:    [[INC4:%.*]] = add nsw i32 [[TMP6]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC4]], i32* [[I3]], align 4
-// CHECK-USE-I386-NEXT:    [[J5:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP2]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP7:%.*]] = load i32, i32* [[J5]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC4]], ptr [[I3]], align 4
+// CHECK-USE-I386-NEXT:    [[J5:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP2]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    [[TMP7:%.*]] = load i32, ptr [[J5]], align 4
 // CHECK-USE-I386-NEXT:    [[INC6:%.*]] = add nsw i32 [[TMP7]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC6]], i32* [[J5]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC6]], ptr [[J5]], align 4
 // CHECK-USE-I386-NEXT:    ret void
 //
 //
 // CHECK-USE-I386-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}
-// CHECK-USE-I386-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
+// CHECK-USE-I386-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1]] {
 // CHECK-USE-I386-NEXT:  entry:
-// CHECK-USE-I386-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 4
-// CHECK-USE-I386-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
+// CHECK-USE-I386-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-USE-I386-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
 // CHECK-USE-I386-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC]], ptr [[TMP0]], align 4
 // CHECK-USE-I386-NEXT:    ret void
 //
 //
 // CHECK-USE-I386-LABEL: define {{[^@]+}}@_ZN2ST20test_present_membersEv
-// CHECK-USE-I386-SAME: (%struct.ST* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
+// CHECK-USE-I386-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
 // CHECK-USE-I386-NEXT:  entry:
-// CHECK-USE-I386-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.ST*, align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
-// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
+// CHECK-USE-I386-NEXT:    [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 4
+// CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 4
 // CHECK-USE-I386-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[THIS]], %struct.ST** [[THIS_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[THIS1]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-USE-I386-NEXT:    [[TMP2:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-USE-I386-NEXT:    [[TMP3:%.*]] = ptrtoint i8* [[TMP2]] to i64
-// CHECK-USE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-USE-I386-NEXT:    store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[THIS1]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-USE-I386-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+// CHECK-USE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-USE-I386-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
-// CHECK-USE-I386-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-USE-I386-NEXT:    [[TMP7:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-USE-I386-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP7]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes.3 to i8*), i32 24, i1 false)
-// CHECK-USE-I386-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP9]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[I]], i32** [[TMP11]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i64 [[TMP6]], i64* [[TMP12]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP13]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP15]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[I]], i32** [[TMP17]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP18]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST**
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32**
-// CHECK-USE-I386-NEXT:    store i32* [[J]], i32** [[TMP22]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    store i8* null, i8** [[TMP23]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-USE-I386-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes.3, i32 24, i1 false)
+// CHECK-USE-I386-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr [[THIS1]], ptr [[TMP8]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr [[I]], ptr [[TMP10]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store i64 [[TMP6]], ptr [[TMP12]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP13]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store ptr [[THIS1]], ptr [[TMP14]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store ptr [[I]], ptr [[TMP16]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP18]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[THIS1]], ptr [[TMP19]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[J]], ptr [[TMP21]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP23]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-USE-I386-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    store i32 2, i32* [[TMP27]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    store i32 3, i32* [[TMP28]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-USE-I386-NEXT:    store i8** [[TMP24]], i8*** [[TMP29]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-USE-I386-NEXT:    store i8** [[TMP25]], i8*** [[TMP30]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-USE-I386-NEXT:    store i64* [[TMP26]], i64** [[TMP31]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-USE-I386-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP32]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-USE-I386-NEXT:    store i8** null, i8*** [[TMP33]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-USE-I386-NEXT:    store i8** null, i8*** [[TMP34]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-USE-I386-NEXT:    store i64 0, i64* [[TMP35]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-USE-I386-NEXT:    store i64 0, i64* [[TMP36]], align 8
-// CHECK-USE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-USE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP37]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-USE-I386-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP38]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-USE-I386-NEXT:    store i32 0, i32* [[TMP39]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-USE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    store i32 2, ptr [[TMP27]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    store i32 3, ptr [[TMP28]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-USE-I386-NEXT:    store ptr [[TMP24]], ptr [[TMP29]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-USE-I386-NEXT:    store ptr [[TMP25]], ptr [[TMP30]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-USE-I386-NEXT:    store ptr [[TMP26]], ptr [[TMP31]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-USE-I386-NEXT:    store ptr @.offload_maptypes.4, ptr [[TMP32]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP33]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-USE-I386-NEXT:    store ptr null, ptr [[TMP34]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-USE-I386-NEXT:    store i64 0, ptr [[TMP35]], align 8
+// CHECK-USE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-USE-I386-NEXT:    store i64 0, ptr [[TMP36]], align 8
+// CHECK-USE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-USE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP37]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-USE-I386-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP38]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-USE-I386-NEXT:    store i32 0, ptr [[TMP39]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-USE-I386-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
 // CHECK-USE-I386-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-USE-I386:       omp_offload.failed:
-// CHECK-USE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}(%struct.ST* [[THIS1]]) #[[ATTR3]]
+// CHECK-USE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}(ptr [[THIS1]]) #[[ATTR3]]
 // CHECK-USE-I386-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 // CHECK-USE-I386:       omp_offload.cont:
 // CHECK-USE-I386-NEXT:    ret void
 //
 //
 // CHECK-USE-I386-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}
-// CHECK-USE-I386-SAME: (%struct.ST* noundef [[THIS:%.*]]) #[[ATTR1]] {
+// CHECK-USE-I386-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
 // CHECK-USE-I386-NEXT:  entry:
-// CHECK-USE-I386-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.ST*, align 4
-// CHECK-USE-I386-NEXT:    store %struct.ST* [[THIS]], %struct.ST** [[THIS_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 4
-// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[TMP0]], i32 0, i32 0
-// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
+// CHECK-USE-I386-NEXT:    [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-USE-I386-NEXT:    store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK-USE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[TMP0]], i32 0, i32 0
+// CHECK-USE-I386-NEXT:    [[TMP1:%.*]] = load i32, ptr [[I]], align 4
 // CHECK-USE-I386-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP1]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC]], i32* [[I]], align 4
-// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[TMP0]], i32 0, i32 1
-// CHECK-USE-I386-NEXT:    [[TMP2:%.*]] = load i32, i32* [[J]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC]], ptr [[I]], align 4
+// CHECK-USE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[TMP0]], i32 0, i32 1
+// CHECK-USE-I386-NEXT:    [[TMP2:%.*]] = load i32, ptr [[J]], align 4
 // CHECK-USE-I386-NEXT:    [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
-// CHECK-USE-I386-NEXT:    store i32 [[INC1]], i32* [[J]], align 4
+// CHECK-USE-I386-NEXT:    store i32 [[INC1]], ptr [[J]], align 4
 // CHECK-USE-I386-NEXT:    ret void
 //
 //
@@ -845,171 +785,150 @@ void ST::test_present_members() {
 // CHECK-NOUSE-PPC64LE-NEXT:    [[A:%.*]] = alloca i32, align 4
 // CHECK-NOUSE-PPC64LE-NEXT:    [[ST1:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
 // CHECK-NOUSE-PPC64LE-NEXT:    [[ST2:%.*]] = alloca [[STRUCT_ST]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x ptr], align 8
 // CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 [[II]], i32* [[II_ADDR]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP0:%.*]] = load i32, i32* [[II_ADDR]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 [[TMP0]], i32* [[A]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP2:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP1]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 [[II]], ptr [[II_ADDR]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP0:%.*]] = load i32, ptr [[II_ADDR]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 [[TMP0]], ptr [[A]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP1]] to i64
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-NOUSE-PPC64LE-NEXT:    [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-NOUSE-PPC64LE-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[J2]], i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP9:%.*]] = bitcast i32* [[I1]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP8]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP11:%.*]] = ptrtoint i8* [[TMP10]] to i64
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP12:%.*]] = ptrtoint i8* [[TMP9]] to i64
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-NOUSE-PPC64LE-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[J2]], i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP8]] to i64
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[I1]] to i64
 // CHECK-NOUSE-PPC64LE-NEXT:    [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP15:%.*]] = bitcast [7 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP15]], i8* align 8 bitcast ([7 x i64]* @.offload_sizes to i8*), i64 56, i1 false)
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP17]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP19]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP21]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP23]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP25]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP26]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[J]], i32** [[TMP30]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP31]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP33]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP35]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP36]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP38]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[I1]], i32** [[TMP40]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 [[TMP14]], i64* [[TMP41]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP42]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP44]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[I1]], i32** [[TMP46]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP47]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP49]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[J2]], i32** [[TMP51]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP52]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-NOUSE-PPC64LE-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 56, i1 false)
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[TMP16]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP18]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 [[TMP7]], ptr [[TMP20]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP21]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[TMP22]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP24]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP26]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[ST1]], ptr [[TMP27]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[J]], ptr [[TMP29]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP31]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP32]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP34]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP36]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[TMP37]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[I1]], ptr [[TMP39]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 [[TMP14]], ptr [[TMP41]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP42]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[TMP43]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 5
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[I1]], ptr [[TMP45]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP47]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[ST2]], ptr [[TMP48]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 6
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[J2]], ptr [[TMP50]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP52]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-NOUSE-PPC64LE-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 2, i32* [[TMP56]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 7, i32* [[TMP57]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** [[TMP53]], i8*** [[TMP58]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** [[TMP54]], i8*** [[TMP59]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64* [[TMP55]], i64** [[TMP60]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP61]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP62]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP63]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, i64* [[TMP64]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, i64* [[TMP65]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP66]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP67]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 0, i32* [[TMP68]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 2, ptr [[TMP56]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 7, ptr [[TMP57]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP53]], ptr [[TMP58]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP54]], ptr [[TMP59]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP55]], ptr [[TMP60]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr @.offload_maptypes, ptr [[TMP61]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP62]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP63]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, ptr [[TMP64]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, ptr [[TMP65]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP66]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP67]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 0, ptr [[TMP68]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-NOUSE-PPC64LE-NEXT:    [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0
 // CHECK-NOUSE-PPC64LE-NEXT:    br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-NOUSE-PPC64LE:       omp_offload.failed:
 // CHECK-NOUSE-PPC64LE-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}() #[[ATTR3:[0-9]+]]
 // CHECK-NOUSE-PPC64LE-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 // CHECK-NOUSE-PPC64LE:       omp_offload.cont:
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP72]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[A]], i32** [[TMP74]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP75]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP71]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[A]], ptr [[TMP73]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP75]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
 // CHECK-NOUSE-PPC64LE-NEXT:    [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 2, i32* [[TMP78]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 1, i32* [[TMP79]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** [[TMP76]], i8*** [[TMP80]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 3
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** [[TMP77]], i8*** [[TMP81]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 4
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64** [[TMP82]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 5
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP83]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 6
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP84]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 7
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP85]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 8
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, i64* [[TMP86]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 9
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, i64* [[TMP87]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 10
-// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP88]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 11
-// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP89]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 12
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 0, i32* [[TMP90]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]])
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 2, ptr [[TMP78]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 1, ptr [[TMP79]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP76]], ptr [[TMP80]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP77]], ptr [[TMP81]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr @.offload_sizes.1, ptr [[TMP82]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr @.offload_maptypes.2, ptr [[TMP83]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP84]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP85]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, ptr [[TMP86]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, ptr [[TMP87]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10
+// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP88]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11
+// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP89]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 0, ptr [[TMP90]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS6]])
 // CHECK-NOUSE-PPC64LE-NEXT:    [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0
 // CHECK-NOUSE-PPC64LE-NEXT:    br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
 // CHECK-NOUSE-PPC64LE:       omp_offload.failed7:
@@ -1032,83 +951,74 @@ void ST::test_present_members() {
 //
 //
 // CHECK-NOUSE-PPC64LE-LABEL: define {{[^@]+}}@_ZN2ST20test_present_membersEv
-// CHECK-NOUSE-PPC64LE-SAME: (%struct.ST* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
+// CHECK-NOUSE-PPC64LE-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
 // CHECK-NOUSE-PPC64LE-NEXT:  entry:
-// CHECK-NOUSE-PPC64LE-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.ST*, align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 8
 // CHECK-NOUSE-PPC64LE-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[THIS]], %struct.ST** [[THIS_ADDR]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[THIS1]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP0:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP1:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP2:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP3:%.*]] = ptrtoint i8* [[TMP2]] to i64
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[THIS1]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP0:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-NOUSE-PPC64LE-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP7:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-NOUSE-PPC64LE-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP7]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes.3 to i8*), i64 24, i1 false)
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP9]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP11]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 [[TMP6]], i64* [[TMP12]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP13]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP15]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[I]], i32** [[TMP17]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP18]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST**
-// CHECK-NOUSE-PPC64LE-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32**
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32* [[J]], i32** [[TMP22]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8* null, i8** [[TMP23]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-NOUSE-PPC64LE-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes.3, i64 24, i1 false)
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[THIS1]], ptr [[TMP8]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP10]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 [[TMP6]], ptr [[TMP12]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP13]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[THIS1]], ptr [[TMP14]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[I]], ptr [[TMP16]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP18]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[THIS1]], ptr [[TMP19]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[J]], ptr [[TMP21]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP23]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-NOUSE-PPC64LE-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 2, i32* [[TMP27]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 3, i32* [[TMP28]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** [[TMP24]], i8*** [[TMP29]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** [[TMP25]], i8*** [[TMP30]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64* [[TMP26]], i64** [[TMP31]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP32]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP33]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-NOUSE-PPC64LE-NEXT:    store i8** null, i8*** [[TMP34]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, i64* [[TMP35]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, i64* [[TMP36]], align 8
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP37]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP38]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-NOUSE-PPC64LE-NEXT:    store i32 0, i32* [[TMP39]], align 4
-// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 2, ptr [[TMP27]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 3, ptr [[TMP28]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP24]], ptr [[TMP29]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP25]], ptr [[TMP30]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr [[TMP26]], ptr [[TMP31]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr @.offload_maptypes.4, ptr [[TMP32]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP33]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NOUSE-PPC64LE-NEXT:    store ptr null, ptr [[TMP34]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, ptr [[TMP35]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-NOUSE-PPC64LE-NEXT:    store i64 0, ptr [[TMP36]], align 8
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP37]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-NOUSE-PPC64LE-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP38]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-NOUSE-PPC64LE-NEXT:    store i32 0, ptr [[TMP39]], align 4
+// CHECK-NOUSE-PPC64LE-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-NOUSE-PPC64LE-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
 // CHECK-NOUSE-PPC64LE-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-NOUSE-PPC64LE:       omp_offload.failed:
@@ -1138,171 +1048,150 @@ void ST::test_present_members() {
 // CHECK-NOUSE-I386-NEXT:    [[A:%.*]] = alloca i32, align 4
 // CHECK-NOUSE-I386-NEXT:    [[ST1:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
 // CHECK-NOUSE-I386-NEXT:    [[ST2:%.*]] = alloca [[STRUCT_ST]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x ptr], align 4
 // CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    store i32 [[II]], i32* [[II_ADDR]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP0:%.*]] = load i32, i32* [[II_ADDR]], align 4
-// CHECK-NOUSE-I386-NEXT:    store i32 [[TMP0]], i32* [[A]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST1]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP1:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP2:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-NOUSE-I386-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP1]] to i8*
-// CHECK-NOUSE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP3]] to i64
-// CHECK-NOUSE-I386-NEXT:    [[TMP5:%.*]] = ptrtoint i8* [[TMP2]] to i64
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    store i32 [[II]], ptr [[II_ADDR]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP0:%.*]] = load i32, ptr [[II_ADDR]], align 4
+// CHECK-NOUSE-I386-NEXT:    store i32 [[TMP0]], ptr [[A]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST1]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    [[TMP1:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-NOUSE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[TMP1]] to i64
+// CHECK-NOUSE-I386-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-NOUSE-I386-NEXT:    [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]]
-// CHECK-NOUSE-I386-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-NOUSE-I386-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ST2]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[J2]], i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP9:%.*]] = bitcast i32* [[I1]] to i8*
-// CHECK-NOUSE-I386-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP8]] to i8*
-// CHECK-NOUSE-I386-NEXT:    [[TMP11:%.*]] = ptrtoint i8* [[TMP10]] to i64
-// CHECK-NOUSE-I386-NEXT:    [[TMP12:%.*]] = ptrtoint i8* [[TMP9]] to i64
+// CHECK-NOUSE-I386-NEXT:    [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-NOUSE-I386-NEXT:    [[I1:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[J2:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[ST2]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[J2]], i32 1
+// CHECK-NOUSE-I386-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP8]] to i64
+// CHECK-NOUSE-I386-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[I1]] to i64
 // CHECK-NOUSE-I386-NEXT:    [[TMP13:%.*]] = sub i64 [[TMP11]], [[TMP12]]
-// CHECK-NOUSE-I386-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-NOUSE-I386-NEXT:    [[TMP15:%.*]] = bitcast [7 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-NOUSE-I386-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP15]], i8* align 4 bitcast ([7 x i64]* @.offload_sizes to i8*), i32 56, i1 false)
-// CHECK-NOUSE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP17]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[I]], i32** [[TMP19]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i64 [[TMP7]], i64* [[TMP20]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP21]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP23]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[I]], i32** [[TMP25]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP26]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[J]], i32** [[TMP30]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP31]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
-// CHECK-NOUSE-I386-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[A]], i32** [[TMP33]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3
-// CHECK-NOUSE-I386-NEXT:    [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[A]], i32** [[TMP35]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP36]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP38:%.*]] = bitcast i8** [[TMP37]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP38]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[I1]], i32** [[TMP40]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    store i64 [[TMP14]], i64* [[TMP41]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP42]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
-// CHECK-NOUSE-I386-NEXT:    [[TMP44:%.*]] = bitcast i8** [[TMP43]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP44]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5
-// CHECK-NOUSE-I386-NEXT:    [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[I1]], i32** [[TMP46]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP47]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
-// CHECK-NOUSE-I386-NEXT:    [[TMP49:%.*]] = bitcast i8** [[TMP48]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[ST2]], %struct.ST** [[TMP49]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6
-// CHECK-NOUSE-I386-NEXT:    [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[J2]], i32** [[TMP51]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP52]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP14:%.*]] = sdiv exact i64 [[TMP13]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-NOUSE-I386-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 56, i1 false)
+// CHECK-NOUSE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr [[ST1]], ptr [[TMP16]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr [[I]], ptr [[TMP18]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store i64 [[TMP7]], ptr [[TMP20]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP21]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store ptr [[ST1]], ptr [[TMP22]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store ptr [[I]], ptr [[TMP24]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP26]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[ST1]], ptr [[TMP27]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[J]], ptr [[TMP29]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP31]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
+// CHECK-NOUSE-I386-NEXT:    store ptr [[A]], ptr [[TMP32]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
+// CHECK-NOUSE-I386-NEXT:    store ptr [[A]], ptr [[TMP34]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP36]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store ptr [[ST2]], ptr [[TMP37]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store ptr [[I1]], ptr [[TMP39]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store i64 [[TMP14]], ptr [[TMP41]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP42]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5
+// CHECK-NOUSE-I386-NEXT:    store ptr [[ST2]], ptr [[TMP43]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 5
+// CHECK-NOUSE-I386-NEXT:    store ptr [[I1]], ptr [[TMP45]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP47]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6
+// CHECK-NOUSE-I386-NEXT:    store ptr [[ST2]], ptr [[TMP48]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP50:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 6
+// CHECK-NOUSE-I386-NEXT:    store ptr [[J2]], ptr [[TMP50]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP52]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [7 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [7 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-NOUSE-I386-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i32 2, i32* [[TMP56]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    store i32 7, i32* [[TMP57]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    store i8** [[TMP53]], i8*** [[TMP58]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-NOUSE-I386-NEXT:    store i8** [[TMP54]], i8*** [[TMP59]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    store i64* [[TMP55]], i64** [[TMP60]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-NOUSE-I386-NEXT:    store i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP61]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-NOUSE-I386-NEXT:    store i8** null, i8*** [[TMP62]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-NOUSE-I386-NEXT:    store i8** null, i8*** [[TMP63]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-NOUSE-I386-NEXT:    store i64 0, i64* [[TMP64]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-NOUSE-I386-NEXT:    store i64 0, i64* [[TMP65]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-NOUSE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP66]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-NOUSE-I386-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP67]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-NOUSE-I386-NEXT:    store i32 0, i32* [[TMP68]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NOUSE-I386-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store i32 2, ptr [[TMP56]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store i32 7, ptr [[TMP57]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP53]], ptr [[TMP58]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP54]], ptr [[TMP59]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP55]], ptr [[TMP60]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NOUSE-I386-NEXT:    store ptr @.offload_maptypes, ptr [[TMP61]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP62]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP63]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NOUSE-I386-NEXT:    store i64 0, ptr [[TMP64]], align 8
+// CHECK-NOUSE-I386-NEXT:    [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-NOUSE-I386-NEXT:    store i64 0, ptr [[TMP65]], align 8
+// CHECK-NOUSE-I386-NEXT:    [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-NOUSE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP66]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-NOUSE-I386-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP67]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-NOUSE-I386-NEXT:    store i32 0, ptr [[TMP68]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP69:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-NOUSE-I386-NEXT:    [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0
 // CHECK-NOUSE-I386-NEXT:    br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-NOUSE-I386:       omp_offload.failed:
 // CHECK-NOUSE-I386-NEXT:    call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}() #[[ATTR3:[0-9]+]]
 // CHECK-NOUSE-I386-NEXT:    br label [[OMP_OFFLOAD_CONT]]
 // CHECK-NOUSE-I386:       omp_offload.cont:
-// CHECK-NOUSE-I386-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[A]], i32** [[TMP72]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[A]], i32** [[TMP74]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP75]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP71:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr [[A]], ptr [[TMP71]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP73:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr [[A]], ptr [[TMP73]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP75:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP75]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP76:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP77:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
 // CHECK-NOUSE-I386-NEXT:    [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i32 2, i32* [[TMP78]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    store i32 1, i32* [[TMP79]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    store i8** [[TMP76]], i8*** [[TMP80]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 3
-// CHECK-NOUSE-I386-NEXT:    store i8** [[TMP77]], i8*** [[TMP81]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64** [[TMP82]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 5
-// CHECK-NOUSE-I386-NEXT:    store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i64** [[TMP83]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 6
-// CHECK-NOUSE-I386-NEXT:    store i8** null, i8*** [[TMP84]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 7
-// CHECK-NOUSE-I386-NEXT:    store i8** null, i8*** [[TMP85]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 8
-// CHECK-NOUSE-I386-NEXT:    store i64 0, i64* [[TMP86]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 9
-// CHECK-NOUSE-I386-NEXT:    store i64 0, i64* [[TMP87]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 10
-// CHECK-NOUSE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP88]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 11
-// CHECK-NOUSE-I386-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP89]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]], i32 0, i32 12
-// CHECK-NOUSE-I386-NEXT:    store i32 0, i32* [[TMP90]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS6]])
+// CHECK-NOUSE-I386-NEXT:    [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store i32 2, ptr [[TMP78]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store i32 1, ptr [[TMP79]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP76]], ptr [[TMP80]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP77]], ptr [[TMP81]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store ptr @.offload_sizes.1, ptr [[TMP82]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5
+// CHECK-NOUSE-I386-NEXT:    store ptr @.offload_maptypes.2, ptr [[TMP83]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP84]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP85]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8
+// CHECK-NOUSE-I386-NEXT:    store i64 0, ptr [[TMP86]], align 8
+// CHECK-NOUSE-I386-NEXT:    [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9
+// CHECK-NOUSE-I386-NEXT:    store i64 0, ptr [[TMP87]], align 8
+// CHECK-NOUSE-I386-NEXT:    [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10
+// CHECK-NOUSE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP88]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11
+// CHECK-NOUSE-I386-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP89]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12
+// CHECK-NOUSE-I386-NEXT:    store i32 0, ptr [[TMP90]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP91:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS6]])
 // CHECK-NOUSE-I386-NEXT:    [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0
 // CHECK-NOUSE-I386-NEXT:    br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]]
 // CHECK-NOUSE-I386:       omp_offload.failed7:
@@ -1325,83 +1214,74 @@ void ST::test_present_members() {
 //
 //
 // CHECK-NOUSE-I386-LABEL: define {{[^@]+}}@_ZN2ST20test_present_membersEv
-// CHECK-NOUSE-I386-SAME: (%struct.ST* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
+// CHECK-NOUSE-I386-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) #[[ATTR0]] align 2 {
 // CHECK-NOUSE-I386-NEXT:  entry:
-// CHECK-NOUSE-I386-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.ST*, align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
-// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
+// CHECK-NOUSE-I386-NEXT:    [[THIS_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 4
+// CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 4
 // CHECK-NOUSE-I386-NEXT:    [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[THIS]], %struct.ST** [[THIS_ADDR]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[THIS1]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP0:%.*]] = getelementptr i32, i32* [[J]], i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP1:%.*]] = bitcast i32* [[I]] to i8*
-// CHECK-NOUSE-I386-NEXT:    [[TMP2:%.*]] = bitcast i32* [[TMP0]] to i8*
-// CHECK-NOUSE-I386-NEXT:    [[TMP3:%.*]] = ptrtoint i8* [[TMP2]] to i64
-// CHECK-NOUSE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NOUSE-I386-NEXT:    store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], ptr [[THIS1]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[J:%.*]] = getelementptr inbounds [[STRUCT_ST]], ptr [[THIS1]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    [[TMP0:%.*]] = getelementptr i32, ptr [[J]], i32 1
+// CHECK-NOUSE-I386-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[TMP0]] to i64
+// CHECK-NOUSE-I386-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[I]] to i64
 // CHECK-NOUSE-I386-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]]
-// CHECK-NOUSE-I386-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)
-// CHECK-NOUSE-I386-NEXT:    [[TMP7:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8*
-// CHECK-NOUSE-I386-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP7]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes.3 to i8*), i32 24, i1 false)
-// CHECK-NOUSE-I386-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP9:%.*]] = bitcast i8** [[TMP8]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP9]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[I]], i32** [[TMP11]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i64 [[TMP6]], i64* [[TMP12]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP13]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP15]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[I]], i32** [[TMP17]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP18]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST**
-// CHECK-NOUSE-I386-NEXT:    store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32**
-// CHECK-NOUSE-I386-NEXT:    store i32* [[J]], i32** [[TMP22]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    store i8* null, i8** [[TMP23]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK-NOUSE-I386-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes.3, i32 24, i1 false)
+// CHECK-NOUSE-I386-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr [[THIS1]], ptr [[TMP8]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr [[I]], ptr [[TMP10]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store i64 [[TMP6]], ptr [[TMP12]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP13]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store ptr [[THIS1]], ptr [[TMP14]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store ptr [[I]], ptr [[TMP16]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP18]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[THIS1]], ptr [[TMP19]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[J]], ptr [[TMP21]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP23]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
 // CHECK-NOUSE-I386-NEXT:    [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
-// CHECK-NOUSE-I386-NEXT:    store i32 2, i32* [[TMP27]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
-// CHECK-NOUSE-I386-NEXT:    store i32 3, i32* [[TMP28]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
-// CHECK-NOUSE-I386-NEXT:    store i8** [[TMP24]], i8*** [[TMP29]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
-// CHECK-NOUSE-I386-NEXT:    store i8** [[TMP25]], i8*** [[TMP30]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
-// CHECK-NOUSE-I386-NEXT:    store i64* [[TMP26]], i64** [[TMP31]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
-// CHECK-NOUSE-I386-NEXT:    store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP32]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
-// CHECK-NOUSE-I386-NEXT:    store i8** null, i8*** [[TMP33]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
-// CHECK-NOUSE-I386-NEXT:    store i8** null, i8*** [[TMP34]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
-// CHECK-NOUSE-I386-NEXT:    store i64 0, i64* [[TMP35]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 9
-// CHECK-NOUSE-I386-NEXT:    store i64 0, i64* [[TMP36]], align 8
-// CHECK-NOUSE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 10
-// CHECK-NOUSE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], [3 x i32]* [[TMP37]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 11
-// CHECK-NOUSE-I386-NEXT:    store [3 x i32] zeroinitializer, [3 x i32]* [[TMP38]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 12
-// CHECK-NOUSE-I386-NEXT:    store i32 0, i32* [[TMP39]], align 4
-// CHECK-NOUSE-I386-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 -1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
+// CHECK-NOUSE-I386-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
+// CHECK-NOUSE-I386-NEXT:    store i32 2, ptr [[TMP27]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
+// CHECK-NOUSE-I386-NEXT:    store i32 3, ptr [[TMP28]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP24]], ptr [[TMP29]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP25]], ptr [[TMP30]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
+// CHECK-NOUSE-I386-NEXT:    store ptr [[TMP26]], ptr [[TMP31]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
+// CHECK-NOUSE-I386-NEXT:    store ptr @.offload_maptypes.4, ptr [[TMP32]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP33]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
+// CHECK-NOUSE-I386-NEXT:    store ptr null, ptr [[TMP34]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
+// CHECK-NOUSE-I386-NEXT:    store i64 0, ptr [[TMP35]], align 8
+// CHECK-NOUSE-I386-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
+// CHECK-NOUSE-I386-NEXT:    store i64 0, ptr [[TMP36]], align 8
+// CHECK-NOUSE-I386-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
+// CHECK-NOUSE-I386-NEXT:    store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP37]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
+// CHECK-NOUSE-I386-NEXT:    store [3 x i32] zeroinitializer, ptr [[TMP38]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
+// CHECK-NOUSE-I386-NEXT:    store i32 0, ptr [[TMP39]], align 4
+// CHECK-NOUSE-I386-NEXT:    [[TMP40:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l{{[0-9]*}}.region_id, ptr [[KERNEL_ARGS]])
 // CHECK-NOUSE-I386-NEXT:    [[TMP41:%.*]] = icmp ne i32 [[TMP40]], 0
 // CHECK-NOUSE-I386-NEXT:    br i1 [[TMP41]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
 // CHECK-NOUSE-I386:       omp_offload.failed:

diff  --git a/clang/test/SemaOpenCL/block-array-capturing.cl b/clang/test/SemaOpenCL/block-array-capturing.cl
index 88ceabd95727a..7c886d395b9ee 100644
--- a/clang/test/SemaOpenCL/block-array-capturing.cl
+++ b/clang/test/SemaOpenCL/block-array-capturing.cl
@@ -1,10 +1,10 @@
-// RUN: %clang_cc1 -no-opaque-pointers -O0 -cl-std=CL2.0 -triple spir64-unknown-unknown -emit-llvm %s -o -| FileCheck %s
+// RUN: %clang_cc1 -O0 -cl-std=CL2.0 -triple spir64-unknown-unknown -emit-llvm %s -o -| FileCheck %s
 // expected-no-diagnostics
 
 typedef int (^block_t)(void);
 
 int block_typedef_kernel(global int* res) {
-  // CHECK: %{{.*}} = alloca <{ i32, i32, i8 addrspace(4)*, [3 x i32] }>
+  // CHECK: %{{.*}} = alloca <{ i32, i32, ptr addrspace(4), [3 x i32] }>
   int a[3] = {1, 2, 3};
   // CHECK: call void @llvm.memcpy{{.*}}
   block_t b = ^(void) { return a[0]; };
@@ -12,5 +12,5 @@ int block_typedef_kernel(global int* res) {
 }
 
 // CHECK: define {{.*}} @__block_typedef_kernel_block_invoke
-// CHECK: %{{.*}} = getelementptr inbounds [3 x i32], [3 x i32] addrspace(4)* %{{.*}}, i64 0, i64 0
+// CHECK: %{{.*}} = getelementptr inbounds [3 x i32], ptr addrspace(4) %{{.*}}, i64 0, i64 0
 // CHECK-NOT: call void @llvm.memcpy{{.*}}


        


More information about the cfe-commits mailing list