[clang] f0ef1ea - [IRBuilder] Introduce folder using inst-simplify, use for Or fold.

Florian Hahn via cfe-commits cfe-commits at lists.llvm.org
Tue Jan 11 09:31:12 PST 2022


Author: Florian Hahn
Date: 2022-01-11T17:30:48Z
New Revision: f0ef1ea6dd32c1518f826308b3d4a1e38e4973e9

URL: https://github.com/llvm/llvm-project/commit/f0ef1ea6dd32c1518f826308b3d4a1e38e4973e9
DIFF: https://github.com/llvm/llvm-project/commit/f0ef1ea6dd32c1518f826308b3d4a1e38e4973e9.diff

LOG: [IRBuilder] Introduce folder using inst-simplify, use for Or fold.

Alternative to D116817.

This introduces a new value-based folding interface for Or (FoldOr),
which takes 2 values and returns an existing Value or a constant if the
Or can be simplified. Otherwise nullptr is returned. This replaces the
more restrictive CreateOr which takes 2 constants.

This is the used to implement a folder that uses InstructionSimplify.
The logic to simplify `Or` instructions is moved there. Subsequent
patches are going to transition other CreateXXX to the more general
FoldXXX interface.

Reviewed By: nikic, lebedev.ri

Differential Revision: https://reviews.llvm.org/D116935

Added: 
    llvm/include/llvm/Analysis/InstSimplifyFolder.h

Modified: 
    clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c
    clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c
    clang/test/CodeGen/catch-pointer-overflow-volatile.c
    clang/test/CodeGen/catch-pointer-overflow.c
    clang/test/CodeGen/ubsan-pointer-overflow.c
    clang/test/OpenMP/for_lastprivate_codegen.cpp
    clang/test/OpenMP/for_linear_codegen.cpp
    clang/test/OpenMP/parallel_firstprivate_codegen.cpp
    clang/test/OpenMP/parallel_private_codegen.cpp
    clang/test/OpenMP/parallel_reduction_codegen.cpp
    clang/test/OpenMP/single_codegen.cpp
    clang/test/OpenMP/teams_private_codegen.cpp
    llvm/include/llvm/Analysis/TargetFolder.h
    llvm/include/llvm/IR/ConstantFolder.h
    llvm/include/llvm/IR/IRBuilder.h
    llvm/include/llvm/IR/IRBuilderFolder.h
    llvm/include/llvm/IR/NoFolder.h
    llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
    llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll
    llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
    llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
    llvm/test/Transforms/SROA/basictest.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c
index e0b538a2a4803..aafcab5a0b9f7 100644
--- a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c
+++ b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset-when-nullptr-is-defined.c
@@ -36,6 +36,7 @@ char *add_unsigned(char *base, unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -43,7 +44,7 @@ char *add_unsigned(char *base, unsigned long offset) {
   // CHECK-SANITIZE-NULLNOTOK-NEXT:     %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-NULLNOTOK-C-NEXT:   %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-NULLNOTOK-CPP-NEXT: %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NULLNOTOK-NEXT:     %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize

diff  --git a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c
index 5c233a6ce2644..90be8750dea3e 100644
--- a/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c
+++ b/clang/test/CodeGen/catch-nullptr-and-nonzero-offset.c
@@ -52,6 +52,7 @@ char *var_var(char *base, unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -59,7 +60,7 @@ char *var_var(char *base, unsigned long offset) {
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize
@@ -169,12 +170,13 @@ char *nullptr_var(unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* null, i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 0, %[[COMPUTED_OFFSET]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 false, %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 false, %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize
@@ -255,12 +257,13 @@ char *one_var(unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* inttoptr (i64 1 to i8*), i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 1, %[[COMPUTED_OFFSET]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 icmp ne (i8* inttoptr (i64 1 to i8*), i8* null), %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 icmp ne (i8* inttoptr (i64 1 to i8*), i8* null), %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], 1, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize
@@ -341,12 +344,13 @@ char *allones_var(unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* inttoptr (i64 -1 to i8*), i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 -1, %[[COMPUTED_OFFSET]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 icmp ne (i8* inttoptr (i64 -1 to i8*), i8* null), %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 icmp ne (i8* inttoptr (i64 -1 to i8*), i8* null), %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], -1, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize

diff  --git a/clang/test/CodeGen/catch-pointer-overflow-volatile.c b/clang/test/CodeGen/catch-pointer-overflow-volatile.c
index b1b809e4217c1..e5448f2e67e48 100644
--- a/clang/test/CodeGen/catch-pointer-overflow-volatile.c
+++ b/clang/test/CodeGen/catch-pointer-overflow-volatile.c
@@ -26,6 +26,7 @@ char *volatile_ptr(char *volatile base, unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -33,7 +34,7 @@ char *volatile_ptr(char *volatile base, unsigned long offset) {
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize

diff  --git a/clang/test/CodeGen/catch-pointer-overflow.c b/clang/test/CodeGen/catch-pointer-overflow.c
index bd01d77222e83..e86a759c07ec4 100644
--- a/clang/test/CodeGen/catch-pointer-overflow.c
+++ b/clang/test/CodeGen/catch-pointer-overflow.c
@@ -33,6 +33,7 @@ char *add_unsigned(char *base, unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -40,7 +41,7 @@ char *add_unsigned(char *base, unsigned long offset) {
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_UGE_BASE:.*]] = icmp uge i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_UGE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize
@@ -69,6 +70,7 @@ char *sub_unsigned(char *base, unsigned long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[IDX_NEG]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[IDX_NEG]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -76,7 +78,7 @@ char *sub_unsigned(char *base, unsigned long offset) {
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_ULE_BASE:.*]] = icmp ule i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_DID_NOT_OVERFLOW:.*]] = and i1 %[[COMPUTED_GEP_IS_ULE_BASE]], %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[GEP_IS_OKAY:.*]] = and i1 %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL]], %[[GEP_DID_NOT_OVERFLOW]], !nosanitize
@@ -104,6 +106,7 @@ char *add_signed(char *base, signed long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[OFFSET_RELOADED]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[OFFSET_RELOADED]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -111,7 +114,7 @@ char *add_signed(char *base, signed long offset) {
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[POSORZEROVALID:.*]] = icmp uge i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[POSORZEROOFFSET:.*]] = icmp sge i64 %[[COMPUTED_OFFSET]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[NEGVALID:.*]] = icmp ult i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
@@ -143,6 +146,7 @@ char *sub_signed(char *base, signed long offset) {
   // CHECK-NEXT:                        %[[ADD_PTR:.*]] = getelementptr inbounds i8, i8* %[[BASE_RELOADED]], i64 %[[IDX_NEG]]
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_AGGREGATE:.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 1, i64 %[[IDX_NEG]]), !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_OVERFLOWED:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 1, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[OR_OV:.+]] = or i1 %[[COMPUTED_OFFSET_OVERFLOWED]], false, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET:.*]] = extractvalue { i64, i1 } %[[COMPUTED_OFFSET_AGGREGATE]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[BASE_RELOADED_INT:.*]] = ptrtoint i8* %[[BASE_RELOADED]] to i64, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP:.*]] = add i64 %[[BASE_RELOADED_INT]], %[[COMPUTED_OFFSET]], !nosanitize
@@ -150,7 +154,7 @@ char *sub_signed(char *base, signed long offset) {
   // CHECK-SANITIZE-NEXT:               %[[COMPUTED_GEP_IS_NOT_NULL:.*]] = icmp ne i64 %[[COMPUTED_GEP]], 0, !nosanitize
   // CHECK-SANITIZE-C-NEXT:             %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = and i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
   // CHECK-SANITIZE-CPP-NEXT:           %[[BOTH_POINTERS_ARE_NULL_OR_BOTH_ARE_NONNULL:.*]] = icmp eq i1 %[[BASE_IS_NOT_NULLPTR]], %[[COMPUTED_GEP_IS_NOT_NULL]], !nosanitize
-  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[COMPUTED_OFFSET_OVERFLOWED]], true, !nosanitize
+  // CHECK-SANITIZE-NEXT:               %[[COMPUTED_OFFSET_DID_NOT_OVERFLOW:.*]] = xor i1 %[[OR_OV]], true, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[POSORZEROVALID:.*]] = icmp uge i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[POSORZEROOFFSET:.*]] = icmp sge i64 %[[COMPUTED_OFFSET]], 0, !nosanitize
   // CHECK-SANITIZE-NEXT:               %[[NEGVALID:.*]] = icmp ult i64 %[[COMPUTED_GEP]], %[[BASE_RELOADED_INT]], !nosanitize

diff  --git a/clang/test/CodeGen/ubsan-pointer-overflow.c b/clang/test/CodeGen/ubsan-pointer-overflow.c
index e2b5e498cf0b8..f95afc93f72e0 100644
--- a/clang/test/CodeGen/ubsan-pointer-overflow.c
+++ b/clang/test/CodeGen/ubsan-pointer-overflow.c
@@ -10,6 +10,7 @@ void fixed_len_array(int k) {
   // CHECK: getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* [[ARR:%.*]], i64 0, i64 [[IDXPROM:%.*]]
   // CHECK-NEXT: [[SMUL:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 40, i64 [[IDXPROM]]), !nosanitize
   // CHECK-NEXT: [[SMULOFLOW:%.*]] = extractvalue { i64, i1 } [[SMUL]], 1, !nosanitize
+  // CHECK-NEXT: [[OR:%.+]] = or i1 [[SMULOFLOW]], false, !nosanitize
   // CHECK-NEXT: [[SMULVAL:%.*]] = extractvalue { i64, i1 } [[SMUL]], 0, !nosanitize
   // CHECK-NEXT: [[BASE:%.*]] = ptrtoint [10 x [10 x i32]]* [[ARR]] to i64, !nosanitize
   // CHECK-NEXT: [[COMPGEP:%.*]] = add i64 [[BASE]], [[SMULVAL]], !nosanitize

diff  --git a/clang/test/OpenMP/for_lastprivate_codegen.cpp b/clang/test/OpenMP/for_lastprivate_codegen.cpp
index e06364ee01c33..694f4c79f369c 100644
--- a/clang/test/OpenMP/for_lastprivate_codegen.cpp
+++ b/clang/test/OpenMP/for_lastprivate_codegen.cpp
@@ -901,7 +901,8 @@ int main() {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK1-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -952,13 +953,13 @@ int main() {
 // CHECK1-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B6]], align 8
 // CHECK1-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP11]], 15
 // CHECK1-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B6]], align 8
+// CHECK1-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK1-NEXT:    store i8 [[BF_SET10]], i8* [[B6]], align 8
 // CHECK1-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i8 [[BF_VALUE]], 4
 // CHECK1-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i8 [[BF_RESULT_SHL]], 4
 // CHECK1-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[BF_RESULT_ASHR]] to i32
-// CHECK1-NEXT:    [[C10:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
-// CHECK1-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C10]], align 8
+// CHECK1-NEXT:    [[C11:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
+// CHECK1-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C11]], align 8
 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
 // CHECK1-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP13]], 1
 // CHECK1-NEXT:    store i32 [[DIV]], i32* [[TMP12]], align 4
@@ -967,8 +968,8 @@ int main() {
 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
 // CHECK1:       omp.inner.for.inc:
 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
-// CHECK1-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP14]], 1
-// CHECK1-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
+// CHECK1-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK1-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
 // CHECK1:       omp.inner.for.end:
 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
@@ -2077,7 +2078,8 @@ int main() {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK2-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -2128,13 +2130,13 @@ int main() {
 // CHECK2-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B6]], align 8
 // CHECK2-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP11]], 15
 // CHECK2-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B6]], align 8
+// CHECK2-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK2-NEXT:    store i8 [[BF_SET10]], i8* [[B6]], align 8
 // CHECK2-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i8 [[BF_VALUE]], 4
 // CHECK2-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i8 [[BF_RESULT_SHL]], 4
 // CHECK2-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[BF_RESULT_ASHR]] to i32
-// CHECK2-NEXT:    [[C10:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
-// CHECK2-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C10]], align 8
+// CHECK2-NEXT:    [[C11:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
+// CHECK2-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C11]], align 8
 // CHECK2-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
 // CHECK2-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP13]], 1
 // CHECK2-NEXT:    store i32 [[DIV]], i32* [[TMP12]], align 4
@@ -2143,8 +2145,8 @@ int main() {
 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
 // CHECK2:       omp.inner.for.inc:
 // CHECK2-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
-// CHECK2-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP14]], 1
-// CHECK2-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
+// CHECK2-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK2-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND]]
 // CHECK2:       omp.inner.for.end:
 // CHECK2-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
@@ -2762,7 +2764,8 @@ int main() {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK3-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -3569,7 +3572,8 @@ int main() {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK4-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -4651,7 +4655,8 @@ int main() {
 // CHECK5-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK5-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK5-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK5-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK5-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK5-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK5-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK5-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -4702,13 +4707,13 @@ int main() {
 // CHECK5-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B6]], align 8
 // CHECK5-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP11]], 15
 // CHECK5-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK5-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK5-NEXT:    store i8 [[BF_SET]], i8* [[B6]], align 8
+// CHECK5-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK5-NEXT:    store i8 [[BF_SET10]], i8* [[B6]], align 8
 // CHECK5-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i8 [[BF_VALUE]], 4
 // CHECK5-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i8 [[BF_RESULT_SHL]], 4
 // CHECK5-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[BF_RESULT_ASHR]] to i32
-// CHECK5-NEXT:    [[C10:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C10]], align 8
+// CHECK5-NEXT:    [[C11:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C11]], align 8
 // CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
 // CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP13]], 1
 // CHECK5-NEXT:    store i32 [[DIV]], i32* [[TMP12]], align 4
@@ -4717,8 +4722,8 @@ int main() {
 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
 // CHECK5:       omp.inner.for.inc:
 // CHECK5-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
-// CHECK5-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP14]], 1
-// CHECK5-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
+// CHECK5-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK5-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
 // CHECK5-NEXT:    br label [[OMP_INNER_FOR_COND]]
 // CHECK5:       omp.inner.for.end:
 // CHECK5-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
@@ -5845,7 +5850,8 @@ int main() {
 // CHECK6-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK6-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK6-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK6-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK6-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK6-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK6-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK6-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -5896,13 +5902,13 @@ int main() {
 // CHECK6-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B6]], align 8
 // CHECK6-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP11]], 15
 // CHECK6-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK6-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK6-NEXT:    store i8 [[BF_SET]], i8* [[B6]], align 8
+// CHECK6-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK6-NEXT:    store i8 [[BF_SET10]], i8* [[B6]], align 8
 // CHECK6-NEXT:    [[BF_RESULT_SHL:%.*]] = shl i8 [[BF_VALUE]], 4
 // CHECK6-NEXT:    [[BF_RESULT_ASHR:%.*]] = ashr i8 [[BF_RESULT_SHL]], 4
 // CHECK6-NEXT:    [[BF_RESULT_CAST:%.*]] = sext i8 [[BF_RESULT_ASHR]] to i32
-// CHECK6-NEXT:    [[C10:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
-// CHECK6-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C10]], align 8
+// CHECK6-NEXT:    [[C11:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
+// CHECK6-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[C11]], align 8
 // CHECK6-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4
 // CHECK6-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP13]], 1
 // CHECK6-NEXT:    store i32 [[DIV]], i32* [[TMP12]], align 4
@@ -5911,8 +5917,8 @@ int main() {
 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
 // CHECK6:       omp.inner.for.inc:
 // CHECK6-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
-// CHECK6-NEXT:    [[ADD11:%.*]] = add nsw i32 [[TMP14]], 1
-// CHECK6-NEXT:    store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4
+// CHECK6-NEXT:    [[ADD12:%.*]] = add nsw i32 [[TMP14]], 1
+// CHECK6-NEXT:    store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4
 // CHECK6-NEXT:    br label [[OMP_INNER_FOR_COND]]
 // CHECK6:       omp.inner.for.end:
 // CHECK6-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
@@ -6530,7 +6536,8 @@ int main() {
 // CHECK7-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK7-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK7-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK7-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK7-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK7-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK7-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK7-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK7-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -7337,7 +7344,8 @@ int main() {
 // CHECK8-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK8-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 8
 // CHECK8-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK8-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 8
+// CHECK8-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK8-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 8
 // CHECK8-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 3
 // CHECK8-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK8-NEXT:    store i32* [[TMP1]], i32** [[C]], align 8
@@ -7858,4 +7866,3 @@ int main() {
 // CHECK8-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]])
 // CHECK8-NEXT:    ret void
 //
-//
\ No newline at end of file

diff  --git a/clang/test/OpenMP/for_linear_codegen.cpp b/clang/test/OpenMP/for_linear_codegen.cpp
index 919c7abf51b41..76832b89ef556 100644
--- a/clang/test/OpenMP/for_linear_codegen.cpp
+++ b/clang/test/OpenMP/for_linear_codegen.cpp
@@ -380,7 +380,8 @@ int main() {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1032,7 +1033,8 @@ int main() {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1526,7 +1528,8 @@ int main() {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK3-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -2180,7 +2183,8 @@ int main() {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK4-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8

diff  --git a/clang/test/OpenMP/parallel_firstprivate_codegen.cpp b/clang/test/OpenMP/parallel_firstprivate_codegen.cpp
index a777faf5f03d0..609501d63cdfd 100644
--- a/clang/test/OpenMP/parallel_firstprivate_codegen.cpp
+++ b/clang/test/OpenMP/parallel_firstprivate_codegen.cpp
@@ -239,7 +239,6 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 
 #endif
 
-
 // CHECK1-LABEL: define {{[^@]+}}@main
 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
 // CHECK1-NEXT:  entry:
@@ -523,7 +522,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -1208,7 +1208,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -1657,7 +1658,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK3-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -1981,7 +1983,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK4-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -2446,7 +2449,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK9-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK9-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK9-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK9-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK9-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK9-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK9-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK9-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -3145,7 +3149,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK10-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK10-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK10-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK10-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK10-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK10-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK10-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK10-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -3602,7 +3607,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK11-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK11-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK11-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK11-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK11-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK11-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK11-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK11-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -3941,7 +3947,8 @@ void array_func(float a[3], St s[2], int n, long double vla1[n]) {
 // CHECK12-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK12-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK12-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK12-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK12-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK12-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK12-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK12-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8

diff  --git a/clang/test/OpenMP/parallel_private_codegen.cpp b/clang/test/OpenMP/parallel_private_codegen.cpp
index 40d890b4bfb5c..4f23e8a7419f0 100644
--- a/clang/test/OpenMP/parallel_private_codegen.cpp
+++ b/clang/test/OpenMP/parallel_private_codegen.cpp
@@ -349,7 +349,8 @@ int main() {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -761,7 +762,8 @@ int main() {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1029,7 +1031,8 @@ int main() {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK3-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1251,7 +1254,8 @@ int main() {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK4-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1366,4 +1370,3 @@ int main() {
 // CHECK4-NEXT:    store i32 [[DIV]], i32* [[TMP4]], align 4
 // CHECK4-NEXT:    ret void
 //
-//
\ No newline at end of file

diff  --git a/clang/test/OpenMP/parallel_reduction_codegen.cpp b/clang/test/OpenMP/parallel_reduction_codegen.cpp
index acb891aa1c7a0..209226919542a 100644
--- a/clang/test/OpenMP/parallel_reduction_codegen.cpp
+++ b/clang/test/OpenMP/parallel_reduction_codegen.cpp
@@ -925,7 +925,7 @@ int main() {
 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 4, i1 false)
-// CHECK1-NEXT:    br label [[WHILE_COND]], !llvm.loop [[LOOP4:![0-9]+]]
+// CHECK1-NEXT:    br label [[WHILE_COND]], !llvm.loop [[LOOP5:![0-9]+]]
 //
 //
 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
@@ -1111,7 +1111,8 @@ int main() {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1129,8 +1130,8 @@ int main() {
 // CHECK1-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
 // CHECK1-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
 // CHECK1-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B7]], align 4
+// CHECK1-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK1-NEXT:    store i8 [[BF_SET10]], i8* [[B7]], align 4
 // CHECK1-NEXT:    ret void
 //
 //
@@ -2324,7 +2325,7 @@ int main() {
 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast %struct.S* [[ARRAYIDX6]] to i8*
 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast %struct.S* [[VAR3]] to i8*
 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP7]], i8* align 4 [[TMP8]], i64 4, i1 false)
-// CHECK2-NEXT:    br label [[WHILE_COND]], !llvm.loop [[LOOP4:![0-9]+]]
+// CHECK2-NEXT:    br label [[WHILE_COND]], !llvm.loop [[LOOP5:![0-9]+]]
 //
 //
 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
@@ -2510,7 +2511,8 @@ int main() {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -2528,8 +2530,8 @@ int main() {
 // CHECK2-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
 // CHECK2-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
 // CHECK2-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B7]], align 4
+// CHECK2-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK2-NEXT:    store i8 [[BF_SET10]], i8* [[B7]], align 4
 // CHECK2-NEXT:    ret void
 //
 //
@@ -3338,7 +3340,8 @@ int main() {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK3-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -3356,8 +3359,8 @@ int main() {
 // CHECK3-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
 // CHECK3-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
 // CHECK3-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B7]], align 4
+// CHECK3-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK3-NEXT:    store i8 [[BF_SET10]], i8* [[B7]], align 4
 // CHECK3-NEXT:    ret void
 //
 //
@@ -4056,7 +4059,8 @@ int main() {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK4-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -4074,8 +4078,8 @@ int main() {
 // CHECK4-NEXT:    [[BF_LOAD8:%.*]] = load i8, i8* [[B7]], align 4
 // CHECK4-NEXT:    [[BF_VALUE:%.*]] = and i8 [[TMP5]], 15
 // CHECK4-NEXT:    [[BF_CLEAR9:%.*]] = and i8 [[BF_LOAD8]], -16
-// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
-// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B7]], align 4
+// CHECK4-NEXT:    [[BF_SET10:%.*]] = or i8 [[BF_CLEAR9]], [[BF_VALUE]]
+// CHECK4-NEXT:    store i8 [[BF_SET10]], i8* [[B7]], align 4
 // CHECK4-NEXT:    ret void
 //
 //
@@ -4399,4 +4403,3 @@ int main() {
 // CHECK4-NEXT:    store i32 [[ADD3]], i32* [[TMP23]], align 4
 // CHECK4-NEXT:    ret void
 //
-//
\ No newline at end of file

diff  --git a/clang/test/OpenMP/single_codegen.cpp b/clang/test/OpenMP/single_codegen.cpp
index 890451cfafe89..ae8df718d91e4 100644
--- a/clang/test/OpenMP/single_codegen.cpp
+++ b/clang/test/OpenMP/single_codegen.cpp
@@ -132,17 +132,6 @@ void array_func(int n, int a[n], St s[2]) {
 // Private b
 // Private c
 
-
-
-
-
-
-
-
-
-
-
-
 // CHECK1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_.
 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK1-NEXT:  entry:
@@ -782,7 +771,8 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1618,7 +1608,8 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -2800,7 +2791,8 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK3-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -3624,7 +3616,8 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK4-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -4294,98 +4287,98 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_var_init
 // CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" !dbg [[DBG32:![0-9]+]] {
 // CHECK5-NEXT:  entry:
-// CHECK5-NEXT:    call void @_ZN9TestClassC1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) @tc), !dbg [[DBG33:![0-9]+]]
-// CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%class.TestClass*)* @_ZN9TestClassD1Ev to void (i8*)*), i8* bitcast (%class.TestClass* @tc to i8*), i8* @__dso_handle) #[[ATTR3]], !dbg [[DBG35:![0-9]+]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG33]]
+// CHECK5-NEXT:    call void @_ZN9TestClassC1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) @tc), !dbg [[DBG34:![0-9]+]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%class.TestClass*)* @_ZN9TestClassD1Ev to void (i8*)*), i8* bitcast (%class.TestClass* @tc to i8*), i8* @__dso_handle) #[[ATTR3]], !dbg [[DBG36:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG34]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
-// CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG36:![0-9]+]] {
+// CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG37:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-// CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]], !dbg [[DBG37:![0-9]+]]
+// CHECK5-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]], !dbg [[DBG38:![0-9]+]]
 // CHECK5:       arrayctor.loop:
-// CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %class.TestClass* [ getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[INVOKE_CONT:%.*]] ], !dbg [[DBG37]]
+// CHECK5-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %class.TestClass* [ getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[INVOKE_CONT:%.*]] ], !dbg [[DBG38]]
 // CHECK5-NEXT:    invoke void @_ZN9TestClassC1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
-// CHECK5-NEXT:    to label [[INVOKE_CONT]] unwind label [[LPAD:%.*]], !dbg [[DBG37]]
+// CHECK5-NEXT:    to label [[INVOKE_CONT]] unwind label [[LPAD:%.*]], !dbg [[DBG38]]
 // CHECK5:       invoke.cont:
-// CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[ARRAYCTOR_CUR]], i64 1, !dbg [[DBG37]]
-// CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.TestClass* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i64 1, i64 0), !dbg [[DBG37]]
-// CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]], !dbg [[DBG37]]
+// CHECK5-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[ARRAYCTOR_CUR]], i64 1, !dbg [[DBG38]]
+// CHECK5-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.TestClass* [[ARRAYCTOR_NEXT]], getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i64 1, i64 0), !dbg [[DBG38]]
+// CHECK5-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]], !dbg [[DBG38]]
 // CHECK5:       arrayctor.cont:
-// CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR3]], !dbg [[DBG39:![0-9]+]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG39]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR3]], !dbg [[DBG40:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG40]]
 // CHECK5:       lpad:
 // CHECK5-NEXT:    [[TMP1:%.*]] = landingpad { i8*, i32 }
-// CHECK5-NEXT:    cleanup, !dbg [[DBG40:![0-9]+]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = extractvalue { i8*, i32 } [[TMP1]], 0, !dbg [[DBG40]]
-// CHECK5-NEXT:    store i8* [[TMP2]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG40]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = extractvalue { i8*, i32 } [[TMP1]], 1, !dbg [[DBG40]]
-// CHECK5-NEXT:    store i32 [[TMP3]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG40]]
-// CHECK5-NEXT:    [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq %class.TestClass* getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), [[ARRAYCTOR_CUR]], !dbg [[DBG37]]
-// CHECK5-NEXT:    br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG37]]
+// CHECK5-NEXT:    cleanup, !dbg [[DBG41:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = extractvalue { i8*, i32 } [[TMP1]], 0, !dbg [[DBG41]]
+// CHECK5-NEXT:    store i8* [[TMP2]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG41]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = extractvalue { i8*, i32 } [[TMP1]], 1, !dbg [[DBG41]]
+// CHECK5-NEXT:    store i32 [[TMP3]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG41]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq %class.TestClass* getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), [[ARRAYCTOR_CUR]], !dbg [[DBG38]]
+// CHECK5-NEXT:    br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG38]]
 // CHECK5:       arraydestroy.body:
-// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %class.TestClass* [ [[ARRAYCTOR_CUR]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG37]]
-// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[CLASS_TESTCLASS]], %class.TestClass* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG37]]
-// CHECK5-NEXT:    call void @_ZN9TestClassD1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG37]]
-// CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %class.TestClass* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), !dbg [[DBG37]]
-// CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG37]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %class.TestClass* [ [[ARRAYCTOR_CUR]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG38]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[CLASS_TESTCLASS]], %class.TestClass* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG38]]
+// CHECK5-NEXT:    call void @_ZN9TestClassD1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG38]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %class.TestClass* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), !dbg [[DBG38]]
+// CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG38]]
 // CHECK5:       arraydestroy.done1:
-// CHECK5-NEXT:    br label [[EH_RESUME:%.*]], !dbg [[DBG37]]
+// CHECK5-NEXT:    br label [[EH_RESUME:%.*]], !dbg [[DBG38]]
 // CHECK5:       eh.resume:
-// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG37]]
-// CHECK5-NEXT:    [[SEL:%.*]] = load i32, i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG37]]
-// CHECK5-NEXT:    [[LPAD_VAL:%.*]] = insertvalue { i8*, i32 } undef, i8* [[EXN]], 0, !dbg [[DBG37]]
-// CHECK5-NEXT:    [[LPAD_VAL2:%.*]] = insertvalue { i8*, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG37]]
-// CHECK5-NEXT:    resume { i8*, i32 } [[LPAD_VAL2]], !dbg [[DBG37]]
+// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG38]]
+// CHECK5-NEXT:    [[SEL:%.*]] = load i32, i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG38]]
+// CHECK5-NEXT:    [[LPAD_VAL:%.*]] = insertvalue { i8*, i32 } undef, i8* [[EXN]], 0, !dbg [[DBG38]]
+// CHECK5-NEXT:    [[LPAD_VAL2:%.*]] = insertvalue { i8*, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG38]]
+// CHECK5-NEXT:    resume { i8*, i32 } [[LPAD_VAL2]], !dbg [[DBG38]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" !dbg [[DBG41:![0-9]+]] {
+// CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" !dbg [[DBG42:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
-// CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG42:![0-9]+]]
+// CHECK5-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG43:![0-9]+]]
 // CHECK5:       arraydestroy.body:
-// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %class.TestClass* [ getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG42]]
-// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG42]]
-// CHECK5-NEXT:    call void @_ZN9TestClassD1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG42]]
-// CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %class.TestClass* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), !dbg [[DBG42]]
-// CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG42]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %class.TestClass* [ getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i64 1, i64 0), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG43]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG43]]
+// CHECK5-NEXT:    call void @_ZN9TestClassD1Ev(%class.TestClass* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG43]]
+// CHECK5-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %class.TestClass* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %class.TestClass], [2 x %class.TestClass]* @tc2, i32 0, i32 0), !dbg [[DBG43]]
+// CHECK5-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG43]]
 // CHECK5:       arraydestroy.done1:
-// CHECK5-NEXT:    ret void, !dbg [[DBG42]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG43]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN9TestClassC2Ev
-// CHECK5-SAME: (%class.TestClass* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG43:![0-9]+]] {
+// CHECK5-SAME: (%class.TestClass* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG44:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.TestClass*, align 8
 // CHECK5-NEXT:    store %class.TestClass* [[THIS]], %class.TestClass** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %class.TestClass*, %class.TestClass** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[THIS1]], i32 0, i32 0, !dbg [[DBG44:![0-9]+]]
-// CHECK5-NEXT:    store i32 0, i32* [[A]], align 4, !dbg [[DBG44]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG45:![0-9]+]]
+// CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[THIS1]], i32 0, i32 0, !dbg [[DBG45:![0-9]+]]
+// CHECK5-NEXT:    store i32 0, i32* [[A]], align 4, !dbg [[DBG45]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG46:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN9TestClassD2Ev
-// CHECK5-SAME: (%class.TestClass* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG46:![0-9]+]] {
+// CHECK5-SAME: (%class.TestClass* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG47:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.TestClass*, align 8
 // CHECK5-NEXT:    store %class.TestClass* [[THIS]], %class.TestClass** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %class.TestClass*, %class.TestClass** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    ret void, !dbg [[DBG47:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG48:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_Z3foov
-// CHECK5-SAME: () #[[ATTR4:[0-9]+]] !dbg [[DBG48:![0-9]+]] {
+// CHECK5-SAME: () #[[ATTR4:[0-9]+]] !dbg [[DBG49:![0-9]+]] {
 // CHECK5-NEXT:  entry:
-// CHECK5-NEXT:    call void @_Z8mayThrowv(), !dbg [[DBG49:![0-9]+]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG50:![0-9]+]]
+// CHECK5-NEXT:    call void @_Z8mayThrowv(), !dbg [[DBG50:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG51:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@main
-// CHECK5-SAME: () #[[ATTR6:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG51:![0-9]+]] {
+// CHECK5-SAME: () #[[ATTR6:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG52:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
 // CHECK5-NEXT:    [[A:%.*]] = alloca i8, align 1
@@ -4399,97 +4392,97 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    [[DOTOMP_COPYPRIVATE_CPR_LIST:%.*]] = alloca [5 x i8*], align 8
 // CHECK5-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB5:[0-9]+]])
 // CHECK5-NEXT:    store i32 0, i32* [[RETVAL]], align 4
-// CHECK5-NEXT:    store %class.TestClass* @tc, %class.TestClass** [[C]], align 8, !dbg [[DBG52:![0-9]+]]
-// CHECK5-NEXT:    call void @_ZN3SSTIdEC1Ev(%struct.SST* nonnull align 8 dereferenceable(8) [[SST]]), !dbg [[DBG53:![0-9]+]]
-// CHECK5-NEXT:    call void @_ZN2SSC1ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[SS]], i32* nonnull align 4 dereferenceable(4) getelementptr inbounds ([[CLASS_TESTCLASS:%.*]], %class.TestClass* @tc, i32 0, i32 0)), !dbg [[DBG54:![0-9]+]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB5]], i32 [[TMP0]]), !dbg [[DBG55:![0-9]+]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG55]]
-// CHECK5-NEXT:    br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG55]]
+// CHECK5-NEXT:    store %class.TestClass* @tc, %class.TestClass** [[C]], align 8, !dbg [[DBG53:![0-9]+]]
+// CHECK5-NEXT:    call void @_ZN3SSTIdEC1Ev(%struct.SST* nonnull align 8 dereferenceable(8) [[SST]]), !dbg [[DBG54:![0-9]+]]
+// CHECK5-NEXT:    call void @_ZN2SSC1ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[SS]], i32* nonnull align 4 dereferenceable(4) getelementptr inbounds ([[CLASS_TESTCLASS:%.*]], %class.TestClass* @tc, i32 0, i32 0)), !dbg [[DBG55:![0-9]+]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB5]], i32 [[TMP0]]), !dbg [[DBG56:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG56]]
+// CHECK5-NEXT:    br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG56]]
 // CHECK5:       omp_if.then:
-// CHECK5-NEXT:    store i8 2, i8* [[A]], align 1, !dbg [[DBG56:![0-9]+]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB5]], i32 [[TMP0]]), !dbg [[DBG57:![0-9]+]]
-// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG57]]
+// CHECK5-NEXT:    store i8 2, i8* [[A]], align 1, !dbg [[DBG57:![0-9]+]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB5]], i32 [[TMP0]]), !dbg [[DBG58:![0-9]+]]
+// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG58]]
 // CHECK5:       omp_if.end:
-// CHECK5-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 [[TMP0]]), !dbg [[DBG58:![0-9]+]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0, !dbg [[DBG58]]
-// CHECK5-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN1:%.*]], label [[OMP_IF_END2:%.*]], !dbg [[DBG58]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 [[TMP0]]), !dbg [[DBG59:![0-9]+]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0, !dbg [[DBG59]]
+// CHECK5-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN1:%.*]], label [[OMP_IF_END2:%.*]], !dbg [[DBG59]]
 // CHECK5:       omp_if.then1:
-// CHECK5-NEXT:    store i8 2, i8* [[A]], align 1, !dbg [[DBG59:![0-9]+]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB7]], i32 [[TMP0]]), !dbg [[DBG60:![0-9]+]]
-// CHECK5-NEXT:    br label [[OMP_IF_END2]], !dbg [[DBG60]]
+// CHECK5-NEXT:    store i8 2, i8* [[A]], align 1, !dbg [[DBG60:![0-9]+]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB7]], i32 [[TMP0]]), !dbg [[DBG61:![0-9]+]]
+// CHECK5-NEXT:    br label [[OMP_IF_END2]], !dbg [[DBG61]]
 // CHECK5:       omp_if.end2:
-// CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB8:[0-9]+]], i32 [[TMP0]]), !dbg [[DBG61:![0-9]+]]
-// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG62:![0-9]+]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB10:[0-9]+]], i32 [[TMP0]]), !dbg [[DBG62]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG62]]
-// CHECK5-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN3:%.*]], label [[OMP_IF_END4:%.*]], !dbg [[DBG62]]
+// CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB8:[0-9]+]], i32 [[TMP0]]), !dbg [[DBG62:![0-9]+]]
+// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG63:![0-9]+]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB10:[0-9]+]], i32 [[TMP0]]), !dbg [[DBG63]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG63]]
+// CHECK5-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN3:%.*]], label [[OMP_IF_END4:%.*]], !dbg [[DBG63]]
 // CHECK5:       omp_if.then3:
 // CHECK5-NEXT:    invoke void @_Z3foov()
-// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG63:![0-9]+]]
+// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG64:![0-9]+]]
 // CHECK5:       invoke.cont:
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB10]], i32 [[TMP0]]), !dbg [[DBG63]]
-// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG63]]
-// CHECK5-NEXT:    br label [[OMP_IF_END4]], !dbg [[DBG63]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB10]], i32 [[TMP0]]), !dbg [[DBG64]]
+// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG64]]
+// CHECK5-NEXT:    br label [[OMP_IF_END4]], !dbg [[DBG64]]
 // CHECK5:       lpad:
 // CHECK5-NEXT:    [[TMP7:%.*]] = landingpad { i8*, i32 }
-// CHECK5-NEXT:    catch i8* null, !dbg [[DBG64:![0-9]+]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG64]]
-// CHECK5-NEXT:    store i8* [[TMP8]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG64]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 1, !dbg [[DBG64]]
-// CHECK5-NEXT:    store i32 [[TMP9]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG64]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB10]], i32 [[TMP0]]), !dbg [[DBG63]]
-// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG63]]
+// CHECK5-NEXT:    catch i8* null, !dbg [[DBG65:![0-9]+]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG65]]
+// CHECK5-NEXT:    store i8* [[TMP8]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG65]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 1, !dbg [[DBG65]]
+// CHECK5-NEXT:    store i32 [[TMP9]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG65]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB10]], i32 [[TMP0]]), !dbg [[DBG64]]
+// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG64]]
 // CHECK5:       omp_if.end4:
-// CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG63]]
-// CHECK5-NEXT:    store i8* [[A]], i8** [[TMP10]], align 8, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 1, !dbg [[DBG63]]
-// CHECK5-NEXT:    store i8* bitcast (%class.TestClass* @tc to i8*), i8** [[TMP11]], align 8, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 2, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB12:[0-9]+]], i32 [[TMP0]], i8* bitcast (%class.TestClass* @tc to i8*), i64 4, i8*** @tc.cache.), !dbg [[DBG65:![0-9]+]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %class.TestClass*, !dbg [[DBG65]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast %class.TestClass* [[TMP14]] to i8*, !dbg [[DBG63]]
-// CHECK5-NEXT:    store i8* [[TMP15]], i8** [[TMP12]], align 8, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 3, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = bitcast [2 x i8]* [[A2]] to i8*, !dbg [[DBG63]]
-// CHECK5-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 4, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB14:[0-9]+]], i32 [[TMP0]], i8* bitcast ([2 x %class.TestClass]* @tc2 to i8*), i64 8, i8*** @tc2.cache.), !dbg [[DBG66:![0-9]+]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to [2 x %class.TestClass]*, !dbg [[DBG66]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = bitcast [2 x %class.TestClass]* [[TMP20]] to i8*, !dbg [[DBG63]]
-// CHECK5-NEXT:    store i8* [[TMP21]], i8** [[TMP18]], align 8, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG63]]
-// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB10]], i32 [[TMP0]], i64 40, i8* [[TMP22]], void (i8*, i8*)* @.omp.copyprivate.copy_func, i32 [[TMP23]]), !dbg [[DBG63]]
-// CHECK5-NEXT:    [[TMP24:%.*]] = load i8, i8* [[A]], align 1, !dbg [[DBG67:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = sext i8 [[TMP24]] to i32, !dbg [[DBG67]]
-// CHECK5-NEXT:    ret i32 [[CONV]], !dbg [[DBG68:![0-9]+]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG64]]
+// CHECK5-NEXT:    store i8* [[A]], i8** [[TMP10]], align 8, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 1, !dbg [[DBG64]]
+// CHECK5-NEXT:    store i8* bitcast (%class.TestClass* @tc to i8*), i8** [[TMP11]], align 8, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 2, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB12:[0-9]+]], i32 [[TMP0]], i8* bitcast (%class.TestClass* @tc to i8*), i64 4, i8*** @tc.cache.), !dbg [[DBG66:![0-9]+]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = bitcast i8* [[TMP13]] to %class.TestClass*, !dbg [[DBG66]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast %class.TestClass* [[TMP14]] to i8*, !dbg [[DBG64]]
+// CHECK5-NEXT:    store i8* [[TMP15]], i8** [[TMP12]], align 8, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 3, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = bitcast [2 x i8]* [[A2]] to i8*, !dbg [[DBG64]]
+// CHECK5-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 4, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB14:[0-9]+]], i32 [[TMP0]], i8* bitcast ([2 x %class.TestClass]* @tc2 to i8*), i64 8, i8*** @tc2.cache.), !dbg [[DBG67:![0-9]+]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i8* [[TMP19]] to [2 x %class.TestClass]*, !dbg [[DBG67]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = bitcast [2 x %class.TestClass]* [[TMP20]] to i8*, !dbg [[DBG64]]
+// CHECK5-NEXT:    store i8* [[TMP21]], i8** [[TMP18]], align 8, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast [5 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG64]]
+// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB10]], i32 [[TMP0]], i64 40, i8* [[TMP22]], void (i8*, i8*)* @.omp.copyprivate.copy_func, i32 [[TMP23]]), !dbg [[DBG64]]
+// CHECK5-NEXT:    [[TMP24:%.*]] = load i8, i8* [[A]], align 1, !dbg [[DBG68:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = sext i8 [[TMP24]] to i32, !dbg [[DBG68]]
+// CHECK5-NEXT:    ret i32 [[CONV]], !dbg [[DBG69:![0-9]+]]
 // CHECK5:       terminate.handler:
-// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG63]]
-// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13:[0-9]+]], !dbg [[DBG63]]
-// CHECK5-NEXT:    unreachable, !dbg [[DBG63]]
+// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG64]]
+// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13:[0-9]+]], !dbg [[DBG64]]
+// CHECK5-NEXT:    unreachable, !dbg [[DBG64]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN3SSTIdEC1Ev
-// CHECK5-SAME: (%struct.SST* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG69:![0-9]+]] {
+// CHECK5-SAME: (%struct.SST* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG70:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
 // CHECK5-NEXT:    store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    call void @_ZN3SSTIdEC2Ev(%struct.SST* nonnull align 8 dereferenceable(8) [[THIS1]]), !dbg [[DBG70:![0-9]+]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG71:![0-9]+]]
+// CHECK5-NEXT:    call void @_ZN3SSTIdEC2Ev(%struct.SST* nonnull align 8 dereferenceable(8) [[THIS1]]), !dbg [[DBG71:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG72:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN2SSC1ERi
-// CHECK5-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG72:![0-9]+]] {
+// CHECK5-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG73:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
 // CHECK5-NEXT:    [[D_ADDR:%.*]] = alloca i32*, align 8
 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    store i32* [[D]], i32** [[D_ADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8, !dbg [[DBG73:![0-9]+]]
-// CHECK5-NEXT:    call void @_ZN2SSC2ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[THIS1]], i32* nonnull align 4 dereferenceable(4) [[TMP0]]), !dbg [[DBG73]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG74:![0-9]+]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8, !dbg [[DBG74:![0-9]+]]
+// CHECK5-NEXT:    call void @_ZN2SSC2ERi(%struct.SS* nonnull align 8 dereferenceable(16) [[THIS1]], i32* nonnull align 4 dereferenceable(4) [[TMP0]]), !dbg [[DBG74]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG75:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate
@@ -4500,96 +4493,96 @@ void array_func(int n, int a[n], St s[2]) {
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp.copyprivate.copy_func
-// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9:[0-9]+]] !dbg [[DBG75:![0-9]+]] {
+// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9:[0-9]+]] !dbg [[DBG76:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG76:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [5 x i8*]*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [5 x i8*]*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i8, i8* [[TMP9]], align 1, !dbg [[DBG77:![0-9]+]]
-// CHECK5-NEXT:    store i8 [[TMP10]], i8* [[TMP7]], align 1, !dbg [[DBG77]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 1, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i8*, i8** [[TMP11]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to %class.TestClass*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 1, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[TMP14]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8* [[TMP15]] to %class.TestClass*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %class.TestClass* @_ZN9TestClassaSERKS_(%class.TestClass* nonnull align 4 dereferenceable(4) [[TMP13]], %class.TestClass* nonnull align 4 dereferenceable(4) [[TMP16]]), !dbg [[DBG78:![0-9]+]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 2, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i8* [[TMP18]] to %class.TestClass*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 2, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8* [[TMP21]] to %class.TestClass*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %class.TestClass* @_ZN9TestClassaSERKS_(%class.TestClass* nonnull align 4 dereferenceable(4) [[TMP19]], %class.TestClass* nonnull align 4 dereferenceable(4) [[TMP22]]), !dbg [[DBG79:![0-9]+]]
-// CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 3, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 3, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP26:%.*]] = load i8*, i8** [[TMP25]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[TMP24]], i8* align 1 [[TMP26]], i64 2, i1 false), !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 4, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to %class.TestClass*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 4, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP31:%.*]] = load i8*, i8** [[TMP30]], align 8, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP32:%.*]] = bitcast i8* [[TMP31]] to %class.TestClass*, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[TMP29]], i64 2, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %class.TestClass* [[TMP29]], [[TMP33]], !dbg [[DBG76]]
-// CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]], !dbg [[DBG76]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG77:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [5 x i8*]*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [5 x i8*]*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i8, i8* [[TMP9]], align 1, !dbg [[DBG78:![0-9]+]]
+// CHECK5-NEXT:    store i8 [[TMP10]], i8* [[TMP7]], align 1, !dbg [[DBG78]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 1, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i8*, i8** [[TMP11]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = bitcast i8* [[TMP12]] to %class.TestClass*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 1, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = load i8*, i8** [[TMP14]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i8* [[TMP15]] to %class.TestClass*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %class.TestClass* @_ZN9TestClassaSERKS_(%class.TestClass* nonnull align 4 dereferenceable(4) [[TMP13]], %class.TestClass* nonnull align 4 dereferenceable(4) [[TMP16]]), !dbg [[DBG79:![0-9]+]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 2, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i8* [[TMP18]] to %class.TestClass*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 2, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8* [[TMP21]] to %class.TestClass*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[CALL2:%.*]] = call nonnull align 4 dereferenceable(4) %class.TestClass* @_ZN9TestClassaSERKS_(%class.TestClass* nonnull align 4 dereferenceable(4) [[TMP19]], %class.TestClass* nonnull align 4 dereferenceable(4) [[TMP22]]), !dbg [[DBG80:![0-9]+]]
+// CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 3, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 3, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP26:%.*]] = load i8*, i8** [[TMP25]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[TMP24]], i8* align 1 [[TMP26]], i64 2, i1 false), !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP3]], i64 0, i64 4, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP28:%.*]] = load i8*, i8** [[TMP27]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP29:%.*]] = bitcast i8* [[TMP28]] to %class.TestClass*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[TMP5]], i64 0, i64 4, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP31:%.*]] = load i8*, i8** [[TMP30]], align 8, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP32:%.*]] = bitcast i8* [[TMP31]] to %class.TestClass*, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[TMP33:%.*]] = getelementptr [[CLASS_TESTCLASS:%.*]], %class.TestClass* [[TMP29]], i64 2, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %class.TestClass* [[TMP29]], [[TMP33]], !dbg [[DBG77]]
+// CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]], !dbg [[DBG77]]
 // CHECK5:       omp.arraycpy.body:
-// CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %class.TestClass* [ [[TMP32]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ], !dbg [[DBG76]]
-// CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %class.TestClass* [ [[TMP29]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ], !dbg [[DBG76]]
-// CHECK5-NEXT:    [[CALL3:%.*]] = call nonnull align 4 dereferenceable(4) %class.TestClass* @_ZN9TestClassaSERKS_(%class.TestClass* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %class.TestClass* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]]), !dbg [[DBG80:![0-9]+]]
-// CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[CLASS_TESTCLASS]], %class.TestClass* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[CLASS_TESTCLASS]], %class.TestClass* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1, !dbg [[DBG76]]
-// CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %class.TestClass* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP33]], !dbg [[DBG76]]
-// CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]], !dbg [[DBG76]]
+// CHECK5-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %class.TestClass* [ [[TMP32]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ], !dbg [[DBG77]]
+// CHECK5-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %class.TestClass* [ [[TMP29]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ], !dbg [[DBG77]]
+// CHECK5-NEXT:    [[CALL3:%.*]] = call nonnull align 4 dereferenceable(4) %class.TestClass* @_ZN9TestClassaSERKS_(%class.TestClass* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %class.TestClass* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]]), !dbg [[DBG81:![0-9]+]]
+// CHECK5-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[CLASS_TESTCLASS]], %class.TestClass* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[CLASS_TESTCLASS]], %class.TestClass* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1, !dbg [[DBG77]]
+// CHECK5-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %class.TestClass* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP33]], !dbg [[DBG77]]
+// CHECK5-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]], !dbg [[DBG77]]
 // CHECK5:       omp.arraycpy.done4:
-// CHECK5-NEXT:    ret void, !dbg [[DBG80]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG81]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN9TestClassaSERKS_
-// CHECK5-SAME: (%class.TestClass* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %class.TestClass* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR10:[0-9]+]] align 2 !dbg [[DBG81:![0-9]+]] {
+// CHECK5-SAME: (%class.TestClass* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %class.TestClass* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR10:[0-9]+]] align 2 !dbg [[DBG82:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.TestClass*, align 8
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca %class.TestClass*, align 8
 // CHECK5-NEXT:    store %class.TestClass* [[THIS]], %class.TestClass** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    store %class.TestClass* [[TMP0]], %class.TestClass** [[DOTADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %class.TestClass*, %class.TestClass** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    ret %class.TestClass* [[THIS1]], !dbg [[DBG82:![0-9]+]]
+// CHECK5-NEXT:    ret %class.TestClass* [[THIS1]], !dbg [[DBG83:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN3SSTIdEC2Ev
-// CHECK5-SAME: (%struct.SST* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG83:![0-9]+]] {
+// CHECK5-SAME: (%struct.SST* nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG84:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SST*, align 8
 // CHECK5-NEXT:    [[A2:%.*]] = alloca double*, align 8
 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
 // CHECK5-NEXT:    store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SST:%.*]], %struct.SST* [[THIS1]], i32 0, i32 0, !dbg [[DBG84:![0-9]+]]
-// CHECK5-NEXT:    store double 0.000000e+00, double* [[A]], align 8, !dbg [[DBG84]]
-// CHECK5-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_SST]], %struct.SST* [[THIS1]], i32 0, i32 0, !dbg [[DBG85:![0-9]+]]
-// CHECK5-NEXT:    store double* [[A3]], double** [[A2]], align 8, !dbg [[DBG85]]
-// CHECK5-NEXT:    [[TMP0:%.*]] = load double*, double** [[A2]], align 8, !dbg [[DBG86:![0-9]+]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load double, double* [[TMP0]], align 8, !dbg [[DBG87:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to double*, !dbg [[DBG87]]
-// CHECK5-NEXT:    store double [[TMP1]], double* [[CONV]], align 8, !dbg [[DBG87]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG87]]
-// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB18:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SST* [[THIS1]], i64 [[TMP2]]), !dbg [[DBG87]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG88:![0-9]+]]
+// CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SST:%.*]], %struct.SST* [[THIS1]], i32 0, i32 0, !dbg [[DBG85:![0-9]+]]
+// CHECK5-NEXT:    store double 0.000000e+00, double* [[A]], align 8, !dbg [[DBG85]]
+// CHECK5-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_SST]], %struct.SST* [[THIS1]], i32 0, i32 0, !dbg [[DBG86:![0-9]+]]
+// CHECK5-NEXT:    store double* [[A3]], double** [[A2]], align 8, !dbg [[DBG86]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load double*, double** [[A2]], align 8, !dbg [[DBG87:![0-9]+]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load double, double* [[TMP0]], align 8, !dbg [[DBG88:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to double*, !dbg [[DBG88]]
+// CHECK5-NEXT:    store double [[TMP1]], double* [[CONV]], align 8, !dbg [[DBG88]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG88]]
+// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB18:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SST* [[THIS1]], i64 [[TMP2]]), !dbg [[DBG88]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG89:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
-// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SST* [[THIS:%.*]], i64 [[A:%.*]]) #[[ATTR12:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG89:![0-9]+]] {
+// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SST* [[THIS:%.*]], i64 [[A:%.*]]) #[[ATTR12:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG90:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
@@ -4606,55 +4599,55 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
 // CHECK5-NEXT:    store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
-// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8, !dbg [[DBG90:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to double*, !dbg [[DBG90]]
-// CHECK5-NEXT:    store double* [[CONV]], double** [[TMP]], align 8, !dbg [[DBG90]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP]], align 8, !dbg [[DBG91:![0-9]+]]
-// CHECK5-NEXT:    store double* [[TMP1]], double** [[_TMP1]], align 8, !dbg [[DBG92:![0-9]+]]
-// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG92]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG92]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG92]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB16:[0-9]+]], i32 [[TMP3]]), !dbg [[DBG92]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0, !dbg [[DBG92]]
-// CHECK5-NEXT:    br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG92]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8, !dbg [[DBG91:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to double*, !dbg [[DBG91]]
+// CHECK5-NEXT:    store double* [[CONV]], double** [[TMP]], align 8, !dbg [[DBG91]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP]], align 8, !dbg [[DBG92:![0-9]+]]
+// CHECK5-NEXT:    store double* [[TMP1]], double** [[_TMP1]], align 8, !dbg [[DBG93:![0-9]+]]
+// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG93]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG93]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG93]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB16:[0-9]+]], i32 [[TMP3]]), !dbg [[DBG93]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0, !dbg [[DBG93]]
+// CHECK5-NEXT:    br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG93]]
 // CHECK5:       omp_if.then:
-// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0, !dbg [[DBG93:![0-9]+]]
-// CHECK5-NEXT:    store %struct.SST* [[TMP0]], %struct.SST** [[TMP6]], align 8, !dbg [[DBG93]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1, !dbg [[DBG93]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG94:![0-9]+]]
-// CHECK5-NEXT:    store double* [[TMP8]], double** [[TMP7]], align 8, !dbg [[DBG93]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0, !dbg [[DBG94:![0-9]+]]
+// CHECK5-NEXT:    store %struct.SST* [[TMP0]], %struct.SST** [[TMP6]], align 8, !dbg [[DBG94]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 1, !dbg [[DBG94]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG95:![0-9]+]]
+// CHECK5-NEXT:    store double* [[TMP8]], double** [[TMP7]], align 8, !dbg [[DBG94]]
 // CHECK5-NEXT:    invoke void @_ZZN3SSTIdEC1EvENKUlvE_clEv(%class.anon* nonnull align 8 dereferenceable(16) [[REF_TMP]])
-// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG93]]
+// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG94]]
 // CHECK5:       invoke.cont:
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB16]], i32 [[TMP3]]), !dbg [[DBG93]]
-// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG93]]
-// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG93]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB16]], i32 [[TMP3]]), !dbg [[DBG94]]
+// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG94]]
+// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG94]]
 // CHECK5:       lpad:
 // CHECK5-NEXT:    [[TMP9:%.*]] = landingpad { i8*, i32 }
-// CHECK5-NEXT:    catch i8* null, !dbg [[DBG95:![0-9]+]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 0, !dbg [[DBG95]]
-// CHECK5-NEXT:    store i8* [[TMP10]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG95]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 1, !dbg [[DBG95]]
-// CHECK5-NEXT:    store i32 [[TMP11]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG95]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB16]], i32 [[TMP3]]), !dbg [[DBG93]]
-// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG93]]
+// CHECK5-NEXT:    catch i8* null, !dbg [[DBG96:![0-9]+]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 0, !dbg [[DBG96]]
+// CHECK5-NEXT:    store i8* [[TMP10]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG96]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 1, !dbg [[DBG96]]
+// CHECK5-NEXT:    store i32 [[TMP11]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG96]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB16]], i32 [[TMP3]]), !dbg [[DBG94]]
+// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG94]]
 // CHECK5:       omp_if.end:
-// CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG93]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG96:![0-9]+]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8*, !dbg [[DBG93]]
-// CHECK5-NEXT:    store i8* [[TMP14]], i8** [[TMP12]], align 8, !dbg [[DBG93]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG93]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG93]]
-// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB16]], i32 [[TMP3]], i64 8, i8* [[TMP15]], void (i8*, i8*)* @.omp.copyprivate.copy_func.5, i32 [[TMP16]]), !dbg [[DBG93]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG97:![0-9]+]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG94]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG97:![0-9]+]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8*, !dbg [[DBG94]]
+// CHECK5-NEXT:    store i8* [[TMP14]], i8** [[TMP12]], align 8, !dbg [[DBG94]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG94]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG94]]
+// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB16]], i32 [[TMP3]], i64 8, i8* [[TMP15]], void (i8*, i8*)* @.omp.copyprivate.copy_func.5, i32 [[TMP16]]), !dbg [[DBG94]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG98:![0-9]+]]
 // CHECK5:       terminate.handler:
-// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG93]]
-// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13]], !dbg [[DBG93]]
-// CHECK5-NEXT:    unreachable, !dbg [[DBG93]]
+// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG94]]
+// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13]], !dbg [[DBG94]]
+// CHECK5-NEXT:    unreachable, !dbg [[DBG94]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZZN3SSTIdEC1EvENKUlvE_clEv
-// CHECK5-SAME: (%class.anon* nonnull align 8 dereferenceable(16) [[THIS:%.*]]) #[[ATTR4]] align 2 !dbg [[DBG98:![0-9]+]] {
+// CHECK5-SAME: (%class.anon* nonnull align 8 dereferenceable(16) [[THIS:%.*]]) #[[ATTR4]] align 2 !dbg [[DBG99:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.anon*, align 8
 // CHECK5-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
@@ -4662,40 +4655,40 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %class.anon*, %class.anon** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON:%.*]], %class.anon* [[THIS1]], i32 0, i32 0
 // CHECK5-NEXT:    [[TMP1:%.*]] = load %struct.SST*, %struct.SST** [[TMP0]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0, !dbg [[DBG99:![0-9]+]]
-// CHECK5-NEXT:    store %struct.SST* [[TMP1]], %struct.SST** [[TMP2]], align 8, !dbg [[DBG99]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1, !dbg [[DBG99]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[THIS1]], i32 0, i32 1, !dbg [[DBG100:![0-9]+]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = load double*, double** [[TMP4]], align 8, !dbg [[DBG100]]
-// CHECK5-NEXT:    store double* [[TMP5]], double** [[TMP3]], align 8, !dbg [[DBG99]]
-// CHECK5-NEXT:    call void @_ZZZN3SSTIdEC1EvENKUlvE_clEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(16) [[REF_TMP]]), !dbg [[DBG99]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG101:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0, !dbg [[DBG100:![0-9]+]]
+// CHECK5-NEXT:    store %struct.SST* [[TMP1]], %struct.SST** [[TMP2]], align 8, !dbg [[DBG100]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1, !dbg [[DBG100]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[THIS1]], i32 0, i32 1, !dbg [[DBG101:![0-9]+]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = load double*, double** [[TMP4]], align 8, !dbg [[DBG101]]
+// CHECK5-NEXT:    store double* [[TMP5]], double** [[TMP3]], align 8, !dbg [[DBG100]]
+// CHECK5-NEXT:    call void @_ZZZN3SSTIdEC1EvENKUlvE_clEvENKUlvE_clEv(%class.anon.0* nonnull align 8 dereferenceable(16) [[REF_TMP]]), !dbg [[DBG100]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG102:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp.copyprivate.copy_func.5
-// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG102:![0-9]+]] {
+// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG103:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG103:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*, !dbg [[DBG103]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8, !dbg [[DBG104:![0-9]+]]
-// CHECK5-NEXT:    store double [[TMP12]], double* [[TMP8]], align 8, !dbg [[DBG104]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG104:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*, !dbg [[DBG104]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8, !dbg [[DBG105:![0-9]+]]
+// CHECK5-NEXT:    store double [[TMP12]], double* [[TMP8]], align 8, !dbg [[DBG105]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG105]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZZZN3SSTIdEC1EvENKUlvE_clEvENKUlvE_clEv
-// CHECK5-SAME: (%class.anon.0* nonnull align 8 dereferenceable(16) [[THIS:%.*]]) #[[ATTR10]] align 2 !dbg [[DBG107:![0-9]+]] {
+// CHECK5-SAME: (%class.anon.0* nonnull align 8 dereferenceable(16) [[THIS:%.*]]) #[[ATTR10]] align 2 !dbg [[DBG108:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.anon.0*, align 8
 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
@@ -4703,23 +4696,23 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %class.anon.0*, %class.anon.0** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_0:%.*]], %class.anon.0* [[THIS1]], i32 0, i32 0
 // CHECK5-NEXT:    [[TMP1:%.*]] = load %struct.SST*, %struct.SST** [[TMP0]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 1, !dbg [[DBG108:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load double*, double** [[TMP2]], align 8, !dbg [[DBG108]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load double, double* [[TMP3]], align 8, !dbg [[DBG109:![0-9]+]]
-// CHECK5-NEXT:    [[INC:%.*]] = fadd double [[TMP4]], 1.000000e+00, !dbg [[DBG109]]
-// CHECK5-NEXT:    store double [[INC]], double* [[TMP3]], align 8, !dbg [[DBG109]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 1, !dbg [[DBG110:![0-9]+]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[TMP5]], align 8, !dbg [[DBG110]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load double, double* [[TMP6]], align 8, !dbg [[DBG111:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to double*, !dbg [[DBG111]]
-// CHECK5-NEXT:    store double [[TMP7]], double* [[CONV]], align 8, !dbg [[DBG111]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG111]]
-// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB22:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SST* [[TMP1]], i64 [[TMP8]]), !dbg [[DBG111]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG112:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 1, !dbg [[DBG109:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load double*, double** [[TMP2]], align 8, !dbg [[DBG109]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load double, double* [[TMP3]], align 8, !dbg [[DBG110:![0-9]+]]
+// CHECK5-NEXT:    [[INC:%.*]] = fadd double [[TMP4]], 1.000000e+00, !dbg [[DBG110]]
+// CHECK5-NEXT:    store double [[INC]], double* [[TMP3]], align 8, !dbg [[DBG110]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[THIS1]], i32 0, i32 1, !dbg [[DBG111:![0-9]+]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[TMP5]], align 8, !dbg [[DBG111]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load double, double* [[TMP6]], align 8, !dbg [[DBG112:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to double*, !dbg [[DBG112]]
+// CHECK5-NEXT:    store double [[TMP7]], double* [[CONV]], align 8, !dbg [[DBG112]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG112]]
+// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB22:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SST*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), %struct.SST* [[TMP1]], i64 [[TMP8]]), !dbg [[DBG112]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG113:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
-// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SST* [[THIS:%.*]], i64 [[A:%.*]]) #[[ATTR12]] !dbg [[DBG113:![0-9]+]] {
+// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SST* [[THIS:%.*]], i64 [[A:%.*]]) #[[ATTR12]] !dbg [[DBG114:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
@@ -4733,60 +4726,60 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
 // CHECK5-NEXT:    store %struct.SST* [[THIS]], %struct.SST** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
-// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8, !dbg [[DBG114:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to double*, !dbg [[DBG114]]
-// CHECK5-NEXT:    store double* [[CONV]], double** [[TMP]], align 8, !dbg [[DBG114]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP]], align 8, !dbg [[DBG115:![0-9]+]]
-// CHECK5-NEXT:    store double* [[TMP1]], double** [[_TMP1]], align 8, !dbg [[DBG116:![0-9]+]]
-// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG116]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG116]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG116]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB20:[0-9]+]], i32 [[TMP3]]), !dbg [[DBG116]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0, !dbg [[DBG116]]
-// CHECK5-NEXT:    br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG116]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SST*, %struct.SST** [[THIS_ADDR]], align 8, !dbg [[DBG115:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to double*, !dbg [[DBG115]]
+// CHECK5-NEXT:    store double* [[CONV]], double** [[TMP]], align 8, !dbg [[DBG115]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP]], align 8, !dbg [[DBG116:![0-9]+]]
+// CHECK5-NEXT:    store double* [[TMP1]], double** [[_TMP1]], align 8, !dbg [[DBG117:![0-9]+]]
+// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG117]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG117]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG117]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB20:[0-9]+]], i32 [[TMP3]]), !dbg [[DBG117]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0, !dbg [[DBG117]]
+// CHECK5-NEXT:    br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG117]]
 // CHECK5:       omp_if.then:
-// CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG115]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load double, double* [[TMP6]], align 8, !dbg [[DBG117:![0-9]+]]
-// CHECK5-NEXT:    [[INC:%.*]] = fadd double [[TMP7]], 1.000000e+00, !dbg [[DBG117]]
-// CHECK5-NEXT:    store double [[INC]], double* [[TMP6]], align 8, !dbg [[DBG117]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB20]], i32 [[TMP3]]), !dbg [[DBG117]]
-// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG117]]
-// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG117]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG116]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load double, double* [[TMP6]], align 8, !dbg [[DBG118:![0-9]+]]
+// CHECK5-NEXT:    [[INC:%.*]] = fadd double [[TMP7]], 1.000000e+00, !dbg [[DBG118]]
+// CHECK5-NEXT:    store double [[INC]], double* [[TMP6]], align 8, !dbg [[DBG118]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB20]], i32 [[TMP3]]), !dbg [[DBG118]]
+// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG118]]
+// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG118]]
 // CHECK5:       omp_if.end:
-// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG117]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG118:![0-9]+]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = bitcast double* [[TMP9]] to i8*, !dbg [[DBG117]]
-// CHECK5-NEXT:    store i8* [[TMP10]], i8** [[TMP8]], align 8, !dbg [[DBG117]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG117]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG117]]
-// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB20]], i32 [[TMP3]], i64 8, i8* [[TMP11]], void (i8*, i8*)* @.omp.copyprivate.copy_func.7, i32 [[TMP12]]), !dbg [[DBG117]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG119:![0-9]+]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG118]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = load double*, double** [[_TMP1]], align 8, !dbg [[DBG119:![0-9]+]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = bitcast double* [[TMP9]] to i8*, !dbg [[DBG118]]
+// CHECK5-NEXT:    store i8* [[TMP10]], i8** [[TMP8]], align 8, !dbg [[DBG118]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast [1 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG118]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG118]]
+// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB20]], i32 [[TMP3]], i64 8, i8* [[TMP11]], void (i8*, i8*)* @.omp.copyprivate.copy_func.7, i32 [[TMP12]]), !dbg [[DBG118]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG120:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp.copyprivate.copy_func.7
-// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG120:![0-9]+]] {
+// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG121:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG121:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*, !dbg [[DBG121]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8, !dbg [[DBG122:![0-9]+]]
-// CHECK5-NEXT:    store double [[TMP12]], double* [[TMP8]], align 8, !dbg [[DBG122]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG122:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double*, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double*, !dbg [[DBG122]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8, !dbg [[DBG123:![0-9]+]]
+// CHECK5-NEXT:    store double [[TMP12]], double* [[TMP8]], align 8, !dbg [[DBG123]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG123]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZN2SSC2ERi
-// CHECK5-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG123:![0-9]+]] {
+// CHECK5-SAME: (%struct.SS* nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG124:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8
 // CHECK5-NEXT:    [[D_ADDR:%.*]] = alloca i32*, align 8
@@ -4799,46 +4792,47 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    store i32* [[D]], i32** [[D_ADDR]], align 8
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8
-// CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0, !dbg [[DBG124:![0-9]+]]
-// CHECK5-NEXT:    store i32 0, i32* [[A]], align 8, !dbg [[DBG124]]
-// CHECK5-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1, !dbg [[DBG125:![0-9]+]]
-// CHECK5-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4, !dbg [[DBG125]]
-// CHECK5-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16, !dbg [[DBG125]]
-// CHECK5-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4, !dbg [[DBG125]]
-// CHECK5-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2, !dbg [[DBG126:![0-9]+]]
-// CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8, !dbg [[DBG127:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8, !dbg [[DBG126]]
-// CHECK5-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0, !dbg [[DBG128:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[A3]], i32** [[A2]], align 8, !dbg [[DBG128]]
-// CHECK5-NEXT:    [[B5:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1, !dbg [[DBG129:![0-9]+]]
-// CHECK5-NEXT:    [[BF_LOAD6:%.*]] = load i8, i8* [[B5]], align 4, !dbg [[DBG129]]
-// CHECK5-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD6]], 4, !dbg [[DBG129]]
-// CHECK5-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 4, !dbg [[DBG129]]
-// CHECK5-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32, !dbg [[DBG129]]
-// CHECK5-NEXT:    store i32 [[BF_CAST]], i32* [[B4]], align 4, !dbg [[DBG129]]
-// CHECK5-NEXT:    [[C8:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2, !dbg [[DBG130:![0-9]+]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[C8]], align 8, !dbg [[DBG130]]
-// CHECK5-NEXT:    store i32* [[TMP1]], i32** [[C7]], align 8, !dbg [[DBG130]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A2]], align 8, !dbg [[DBG131:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG132:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*, !dbg [[DBG132]]
-// CHECK5-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B4]], align 4, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[CONV9:%.*]] = bitcast i64* [[B_CASTED]] to i32*, !dbg [[DBG132]]
-// CHECK5-NEXT:    store i32 [[TMP5]], i32* [[CONV9]], align 4, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[C7]], align 8, !dbg [[DBG133:![0-9]+]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[CONV10:%.*]] = bitcast i64* [[C_CASTED]] to i32*, !dbg [[DBG132]]
-// CHECK5-NEXT:    store i32 [[TMP8]], i32* [[CONV10]], align 4, !dbg [[DBG132]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[C_CASTED]], align 8, !dbg [[DBG132]]
-// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB26:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i64, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP9]]), !dbg [[DBG132]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG134:![0-9]+]]
+// CHECK5-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0, !dbg [[DBG125:![0-9]+]]
+// CHECK5-NEXT:    store i32 0, i32* [[A]], align 8, !dbg [[DBG125]]
+// CHECK5-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1, !dbg [[DBG126:![0-9]+]]
+// CHECK5-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4, !dbg [[DBG126]]
+// CHECK5-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16, !dbg [[DBG126]]
+// CHECK5-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0, !dbg [[DBG126]]
+// CHECK5-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4, !dbg [[DBG126]]
+// CHECK5-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2, !dbg [[DBG127:![0-9]+]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8, !dbg [[DBG128:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8, !dbg [[DBG127]]
+// CHECK5-NEXT:    [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0, !dbg [[DBG129:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[A3]], i32** [[A2]], align 8, !dbg [[DBG129]]
+// CHECK5-NEXT:    [[B5:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1, !dbg [[DBG130:![0-9]+]]
+// CHECK5-NEXT:    [[BF_LOAD6:%.*]] = load i8, i8* [[B5]], align 4, !dbg [[DBG130]]
+// CHECK5-NEXT:    [[BF_SHL:%.*]] = shl i8 [[BF_LOAD6]], 4, !dbg [[DBG130]]
+// CHECK5-NEXT:    [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 4, !dbg [[DBG130]]
+// CHECK5-NEXT:    [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32, !dbg [[DBG130]]
+// CHECK5-NEXT:    store i32 [[BF_CAST]], i32* [[B4]], align 4, !dbg [[DBG130]]
+// CHECK5-NEXT:    [[C8:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2, !dbg [[DBG131:![0-9]+]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[C8]], align 8, !dbg [[DBG131]]
+// CHECK5-NEXT:    store i32* [[TMP1]], i32** [[C7]], align 8, !dbg [[DBG131]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[A2]], align 8, !dbg [[DBG132:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG133:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*, !dbg [[DBG133]]
+// CHECK5-NEXT:    store i32 [[TMP3]], i32* [[CONV]], align 4, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = load i32, i32* [[B4]], align 4, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[CONV9:%.*]] = bitcast i64* [[B_CASTED]] to i32*, !dbg [[DBG133]]
+// CHECK5-NEXT:    store i32 [[TMP5]], i32* [[CONV9]], align 4, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[C7]], align 8, !dbg [[DBG134:![0-9]+]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[CONV10:%.*]] = bitcast i64* [[C_CASTED]] to i32*, !dbg [[DBG133]]
+// CHECK5-NEXT:    store i32 [[TMP8]], i32* [[CONV10]], align 4, !dbg [[DBG133]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = load i64, i64* [[C_CASTED]], align 8, !dbg [[DBG133]]
+// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB26:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i64, i64, i64)* @.omp_outlined..8 to void (i32*, i32*, ...)*), %struct.SS* [[THIS1]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP9]]), !dbg [[DBG133]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG135:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..8
-// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR12]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG135:![0-9]+]] {
+// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR12]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG136:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
@@ -4861,72 +4855,72 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
 // CHECK5-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
 // CHECK5-NEXT:    store i64 [[C]], i64* [[C_ADDR]], align 8
-// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8, !dbg [[DBG136:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*, !dbg [[DBG136]]
-// CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i32*, !dbg [[DBG136]]
-// CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[C_ADDR]] to i32*, !dbg [[DBG136]]
-// CHECK5-NEXT:    store i32* [[CONV]], i32** [[TMP]], align 8, !dbg [[DBG136]]
-// CHECK5-NEXT:    store i32* [[CONV2]], i32** [[_TMP3]], align 8, !dbg [[DBG136]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8, !dbg [[DBG137:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[TMP1]], i32** [[_TMP4]], align 8, !dbg [[DBG138:![0-9]+]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[_TMP3]], align 8, !dbg [[DBG139:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[TMP2]], i32** [[_TMP5]], align 8, !dbg [[DBG138]]
-// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG138]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG138]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG138]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB24:[0-9]+]], i32 [[TMP4]]), !dbg [[DBG138]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG138]]
-// CHECK5-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG138]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8, !dbg [[DBG137:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*, !dbg [[DBG137]]
+// CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i32*, !dbg [[DBG137]]
+// CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[C_ADDR]] to i32*, !dbg [[DBG137]]
+// CHECK5-NEXT:    store i32* [[CONV]], i32** [[TMP]], align 8, !dbg [[DBG137]]
+// CHECK5-NEXT:    store i32* [[CONV2]], i32** [[_TMP3]], align 8, !dbg [[DBG137]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8, !dbg [[DBG138:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[TMP1]], i32** [[_TMP4]], align 8, !dbg [[DBG139:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[_TMP3]], align 8, !dbg [[DBG140:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[TMP2]], i32** [[_TMP5]], align 8, !dbg [[DBG139]]
+// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG139]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG139]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG139]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB24:[0-9]+]], i32 [[TMP4]]), !dbg [[DBG139]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG139]]
+// CHECK5-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG139]]
 // CHECK5:       omp_if.then:
-// CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0, !dbg [[DBG140:![0-9]+]]
-// CHECK5-NEXT:    store %struct.SS* [[TMP0]], %struct.SS** [[TMP7]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG141:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[TMP9]], i32** [[TMP8]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2, !dbg [[DBG140]]
-// CHECK5-NEXT:    store i32* [[CONV1]], i32** [[TMP10]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG141]]
-// CHECK5-NEXT:    store i32* [[TMP12]], i32** [[TMP11]], align 8, !dbg [[DBG140]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 0, !dbg [[DBG141:![0-9]+]]
+// CHECK5-NEXT:    store %struct.SS* [[TMP0]], %struct.SS** [[TMP7]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 1, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG142:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[TMP9]], i32** [[TMP8]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 2, !dbg [[DBG141]]
+// CHECK5-NEXT:    store i32* [[CONV1]], i32** [[TMP10]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[REF_TMP]], i32 0, i32 3, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG142]]
+// CHECK5-NEXT:    store i32* [[TMP12]], i32** [[TMP11]], align 8, !dbg [[DBG141]]
 // CHECK5-NEXT:    invoke void @_ZZN2SSC1ERiENKUlvE_clEv(%class.anon.1* nonnull align 8 dereferenceable(32) [[REF_TMP]])
-// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG140]]
+// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG141]]
 // CHECK5:       invoke.cont:
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB24]], i32 [[TMP4]]), !dbg [[DBG140]]
-// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG140]]
-// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG140]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB24]], i32 [[TMP4]]), !dbg [[DBG141]]
+// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG141]]
+// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG141]]
 // CHECK5:       lpad:
 // CHECK5-NEXT:    [[TMP13:%.*]] = landingpad { i8*, i32 }
-// CHECK5-NEXT:    catch i8* null, !dbg [[DBG142:![0-9]+]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = extractvalue { i8*, i32 } [[TMP13]], 0, !dbg [[DBG142]]
-// CHECK5-NEXT:    store i8* [[TMP14]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG142]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = extractvalue { i8*, i32 } [[TMP13]], 1, !dbg [[DBG142]]
-// CHECK5-NEXT:    store i32 [[TMP15]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG142]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB24]], i32 [[TMP4]]), !dbg [[DBG140]]
-// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG140]]
+// CHECK5-NEXT:    catch i8* null, !dbg [[DBG143:![0-9]+]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = extractvalue { i8*, i32 } [[TMP13]], 0, !dbg [[DBG143]]
+// CHECK5-NEXT:    store i8* [[TMP14]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG143]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = extractvalue { i8*, i32 } [[TMP13]], 1, !dbg [[DBG143]]
+// CHECK5-NEXT:    store i32 [[TMP15]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG143]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB24]], i32 [[TMP4]]), !dbg [[DBG141]]
+// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG141]]
 // CHECK5:       omp_if.end:
-// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG143:![0-9]+]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i32* [[TMP17]] to i8*, !dbg [[DBG140]]
-// CHECK5-NEXT:    store i8* [[TMP18]], i8** [[TMP16]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 1, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i32* [[CONV1]] to i8*, !dbg [[DBG140]]
-// CHECK5-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 2, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG144:![0-9]+]]
-// CHECK5-NEXT:    [[TMP23:%.*]] = bitcast i32* [[TMP22]] to i8*, !dbg [[DBG140]]
-// CHECK5-NEXT:    store i8* [[TMP23]], i8** [[TMP21]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP24:%.*]] = bitcast [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG140]]
-// CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG140]]
-// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB24]], i32 [[TMP4]], i64 24, i8* [[TMP24]], void (i8*, i8*)* @.omp.copyprivate.copy_func.9, i32 [[TMP25]]), !dbg [[DBG140]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG145:![0-9]+]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG144:![0-9]+]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i32* [[TMP17]] to i8*, !dbg [[DBG141]]
+// CHECK5-NEXT:    store i8* [[TMP18]], i8** [[TMP16]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 1, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = bitcast i32* [[CONV1]] to i8*, !dbg [[DBG141]]
+// CHECK5-NEXT:    store i8* [[TMP20]], i8** [[TMP19]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 2, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP22:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG145:![0-9]+]]
+// CHECK5-NEXT:    [[TMP23:%.*]] = bitcast i32* [[TMP22]] to i8*, !dbg [[DBG141]]
+// CHECK5-NEXT:    store i8* [[TMP23]], i8** [[TMP21]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP24:%.*]] = bitcast [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG141]]
+// CHECK5-NEXT:    [[TMP25:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG141]]
+// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB24]], i32 [[TMP4]], i64 24, i8* [[TMP24]], void (i8*, i8*)* @.omp.copyprivate.copy_func.9, i32 [[TMP25]]), !dbg [[DBG141]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG146:![0-9]+]]
 // CHECK5:       terminate.handler:
-// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG140]]
-// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13]], !dbg [[DBG140]]
-// CHECK5-NEXT:    unreachable, !dbg [[DBG140]]
+// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG141]]
+// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13]], !dbg [[DBG141]]
+// CHECK5-NEXT:    unreachable, !dbg [[DBG141]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_ZZN2SSC1ERiENKUlvE_clEv
-// CHECK5-SAME: (%class.anon.1* nonnull align 8 dereferenceable(32) [[THIS:%.*]]) #[[ATTR10]] align 2 !dbg [[DBG146:![0-9]+]] {
+// CHECK5-SAME: (%class.anon.1* nonnull align 8 dereferenceable(32) [[THIS:%.*]]) #[[ATTR10]] align 2 !dbg [[DBG147:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[THIS_ADDR:%.*]] = alloca %class.anon.1*, align 8
 // CHECK5-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
@@ -4936,83 +4930,83 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    [[THIS1:%.*]] = load %class.anon.1*, %class.anon.1** [[THIS_ADDR]], align 8
 // CHECK5-NEXT:    [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON_1:%.*]], %class.anon.1* [[THIS1]], i32 0, i32 0
 // CHECK5-NEXT:    [[TMP1:%.*]] = load %struct.SS*, %struct.SS** [[TMP0]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 1, !dbg [[DBG147:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8, !dbg [[DBG147]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG148:![0-9]+]]
-// CHECK5-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1, !dbg [[DBG148]]
-// CHECK5-NEXT:    store i32 [[INC]], i32* [[TMP3]], align 4, !dbg [[DBG148]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 2, !dbg [[DBG149:![0-9]+]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8, !dbg [[DBG149]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4, !dbg [[DBG150:![0-9]+]]
-// CHECK5-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP7]], -1, !dbg [[DBG150]]
-// CHECK5-NEXT:    store i32 [[DEC]], i32* [[TMP6]], align 4, !dbg [[DBG150]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 3, !dbg [[DBG151:![0-9]+]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[TMP8]], align 8, !dbg [[DBG151]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4, !dbg [[DBG152:![0-9]+]]
-// CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP10]], 1, !dbg [[DBG152]]
-// CHECK5-NEXT:    store i32 [[DIV]], i32* [[TMP9]], align 4, !dbg [[DBG152]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 1, !dbg [[DBG153:![0-9]+]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8, !dbg [[DBG153]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4, !dbg [[DBG154:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*, !dbg [[DBG154]]
-// CHECK5-NEXT:    store i32 [[TMP13]], i32* [[CONV]], align 4, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 2, !dbg [[DBG155:![0-9]+]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[TMP15]], align 8, !dbg [[DBG155]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32*, !dbg [[DBG154]]
-// CHECK5-NEXT:    store i32 [[TMP17]], i32* [[CONV2]], align 4, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = load i64, i64* [[B_CASTED]], align 8, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 3, !dbg [[DBG156:![0-9]+]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP19]], align 8, !dbg [[DBG156]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[C_CASTED]] to i32*, !dbg [[DBG154]]
-// CHECK5-NEXT:    store i32 [[TMP21]], i32* [[CONV3]], align 4, !dbg [[DBG154]]
-// CHECK5-NEXT:    [[TMP22:%.*]] = load i64, i64* [[C_CASTED]], align 8, !dbg [[DBG154]]
-// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB30:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i64, i64, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP1]], i64 [[TMP14]], i64 [[TMP18]], i64 [[TMP22]]), !dbg [[DBG154]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG157:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 1, !dbg [[DBG148:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8, !dbg [[DBG148]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG149:![0-9]+]]
+// CHECK5-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1, !dbg [[DBG149]]
+// CHECK5-NEXT:    store i32 [[INC]], i32* [[TMP3]], align 4, !dbg [[DBG149]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 2, !dbg [[DBG150:![0-9]+]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8, !dbg [[DBG150]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4, !dbg [[DBG151:![0-9]+]]
+// CHECK5-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP7]], -1, !dbg [[DBG151]]
+// CHECK5-NEXT:    store i32 [[DEC]], i32* [[TMP6]], align 4, !dbg [[DBG151]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 3, !dbg [[DBG152:![0-9]+]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = load i32*, i32** [[TMP8]], align 8, !dbg [[DBG152]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4, !dbg [[DBG153:![0-9]+]]
+// CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP10]], 1, !dbg [[DBG153]]
+// CHECK5-NEXT:    store i32 [[DIV]], i32* [[TMP9]], align 4, !dbg [[DBG153]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 1, !dbg [[DBG154:![0-9]+]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[TMP11]], align 8, !dbg [[DBG154]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4, !dbg [[DBG155:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*, !dbg [[DBG155]]
+// CHECK5-NEXT:    store i32 [[TMP13]], i32* [[CONV]], align 4, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = load i64, i64* [[A_CASTED]], align 8, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 2, !dbg [[DBG156:![0-9]+]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[TMP15]], align 8, !dbg [[DBG156]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[B_CASTED]] to i32*, !dbg [[DBG155]]
+// CHECK5-NEXT:    store i32 [[TMP17]], i32* [[CONV2]], align 4, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = load i64, i64* [[B_CASTED]], align 8, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_1]], %class.anon.1* [[THIS1]], i32 0, i32 3, !dbg [[DBG157:![0-9]+]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = load i32*, i32** [[TMP19]], align 8, !dbg [[DBG157]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[CONV3:%.*]] = bitcast i64* [[C_CASTED]] to i32*, !dbg [[DBG155]]
+// CHECK5-NEXT:    store i32 [[TMP21]], i32* [[CONV3]], align 4, !dbg [[DBG155]]
+// CHECK5-NEXT:    [[TMP22:%.*]] = load i64, i64* [[C_CASTED]], align 8, !dbg [[DBG155]]
+// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB30:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*, i64, i64, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.SS* [[TMP1]], i64 [[TMP14]], i64 [[TMP18]], i64 [[TMP22]]), !dbg [[DBG155]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG158:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp.copyprivate.copy_func.9
-// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG158:![0-9]+]] {
+// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG159:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG159:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, !dbg [[DBG160:![0-9]+]]
-// CHECK5-NEXT:    store i32 [[TMP12]], i32* [[TMP8]], align 4, !dbg [[DBG160]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i32*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8* [[TMP17]] to i32*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, !dbg [[DBG161:![0-9]+]]
-// CHECK5-NEXT:    store i32 [[TMP19]], i32* [[TMP15]], align 4, !dbg [[DBG161]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8* [[TMP21]] to i32*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8* [[TMP24]] to i32*, !dbg [[DBG159]]
-// CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4, !dbg [[DBG162:![0-9]+]]
-// CHECK5-NEXT:    store i32 [[TMP26]], i32* [[TMP22]], align 4, !dbg [[DBG162]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG162]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG160:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, !dbg [[DBG161:![0-9]+]]
+// CHECK5-NEXT:    store i32 [[TMP12]], i32* [[TMP8]], align 4, !dbg [[DBG161]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i32*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8* [[TMP17]] to i32*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, !dbg [[DBG162:![0-9]+]]
+// CHECK5-NEXT:    store i32 [[TMP19]], i32* [[TMP15]], align 4, !dbg [[DBG162]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8* [[TMP21]] to i32*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8* [[TMP24]] to i32*, !dbg [[DBG160]]
+// CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4, !dbg [[DBG163:![0-9]+]]
+// CHECK5-NEXT:    store i32 [[TMP26]], i32* [[TMP22]], align 4, !dbg [[DBG163]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG163]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10
-// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR12]] !dbg [[DBG163:![0-9]+]] {
+// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.SS* [[THIS:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR12]] !dbg [[DBG164:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
@@ -5032,102 +5026,102 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
 // CHECK5-NEXT:    store i64 [[B]], i64* [[B_ADDR]], align 8
 // CHECK5-NEXT:    store i64 [[C]], i64* [[C_ADDR]], align 8
-// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8, !dbg [[DBG164:![0-9]+]]
-// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*, !dbg [[DBG164]]
-// CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i32*, !dbg [[DBG164]]
-// CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[C_ADDR]] to i32*, !dbg [[DBG164]]
-// CHECK5-NEXT:    store i32* [[CONV]], i32** [[TMP]], align 8, !dbg [[DBG164]]
-// CHECK5-NEXT:    store i32* [[CONV2]], i32** [[_TMP3]], align 8, !dbg [[DBG164]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8, !dbg [[DBG165:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[TMP1]], i32** [[_TMP4]], align 8, !dbg [[DBG166:![0-9]+]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[_TMP3]], align 8, !dbg [[DBG167:![0-9]+]]
-// CHECK5-NEXT:    store i32* [[TMP2]], i32** [[_TMP5]], align 8, !dbg [[DBG166]]
-// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG166]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG166]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG166]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB28:[0-9]+]], i32 [[TMP4]]), !dbg [[DBG166]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG166]]
-// CHECK5-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG166]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8, !dbg [[DBG165:![0-9]+]]
+// CHECK5-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*, !dbg [[DBG165]]
+// CHECK5-NEXT:    [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i32*, !dbg [[DBG165]]
+// CHECK5-NEXT:    [[CONV2:%.*]] = bitcast i64* [[C_ADDR]] to i32*, !dbg [[DBG165]]
+// CHECK5-NEXT:    store i32* [[CONV]], i32** [[TMP]], align 8, !dbg [[DBG165]]
+// CHECK5-NEXT:    store i32* [[CONV2]], i32** [[_TMP3]], align 8, !dbg [[DBG165]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[TMP]], align 8, !dbg [[DBG166:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[TMP1]], i32** [[_TMP4]], align 8, !dbg [[DBG167:![0-9]+]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[_TMP3]], align 8, !dbg [[DBG168:![0-9]+]]
+// CHECK5-NEXT:    store i32* [[TMP2]], i32** [[_TMP5]], align 8, !dbg [[DBG167]]
+// CHECK5-NEXT:    store i32 0, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG167]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG167]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4, !dbg [[DBG167]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB28:[0-9]+]], i32 [[TMP4]]), !dbg [[DBG167]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG167]]
+// CHECK5-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG167]]
 // CHECK5:       omp_if.then:
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG165]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, !dbg [[DBG168:![0-9]+]]
-// CHECK5-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP8]], 1, !dbg [[DBG168]]
-// CHECK5-NEXT:    store i32 [[INC]], i32* [[TMP7]], align 4, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV1]], align 4, !dbg [[DBG169:![0-9]+]]
-// CHECK5-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP9]], -1, !dbg [[DBG169]]
-// CHECK5-NEXT:    store i32 [[DEC]], i32* [[CONV1]], align 4, !dbg [[DBG169]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG167]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, !dbg [[DBG170:![0-9]+]]
-// CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP11]], 1, !dbg [[DBG170]]
-// CHECK5-NEXT:    store i32 [[DIV]], i32* [[TMP10]], align 4, !dbg [[DBG170]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB28]], i32 [[TMP4]]), !dbg [[DBG168]]
-// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG168]]
-// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG168]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG166]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, !dbg [[DBG169:![0-9]+]]
+// CHECK5-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP8]], 1, !dbg [[DBG169]]
+// CHECK5-NEXT:    store i32 [[INC]], i32* [[TMP7]], align 4, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CONV1]], align 4, !dbg [[DBG170:![0-9]+]]
+// CHECK5-NEXT:    [[DEC:%.*]] = add nsw i32 [[TMP9]], -1, !dbg [[DBG170]]
+// CHECK5-NEXT:    store i32 [[DEC]], i32* [[CONV1]], align 4, !dbg [[DBG170]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG168]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, !dbg [[DBG171:![0-9]+]]
+// CHECK5-NEXT:    [[DIV:%.*]] = sdiv i32 [[TMP11]], 1, !dbg [[DBG171]]
+// CHECK5-NEXT:    store i32 [[DIV]], i32* [[TMP10]], align 4, !dbg [[DBG171]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB28]], i32 [[TMP4]]), !dbg [[DBG169]]
+// CHECK5-NEXT:    store i32 1, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG169]]
+// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG169]]
 // CHECK5:       omp_if.end:
-// CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG171:![0-9]+]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*, !dbg [[DBG168]]
-// CHECK5-NEXT:    store i8* [[TMP14]], i8** [[TMP12]], align 8, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 1, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i32* [[CONV1]] to i8*, !dbg [[DBG168]]
-// CHECK5-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 2, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG172:![0-9]+]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i32* [[TMP18]] to i8*, !dbg [[DBG168]]
-// CHECK5-NEXT:    store i8* [[TMP19]], i8** [[TMP17]], align 8, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = bitcast [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG168]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG168]]
-// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB28]], i32 [[TMP4]], i64 24, i8* [[TMP20]], void (i8*, i8*)* @.omp.copyprivate.copy_func.11, i32 [[TMP21]]), !dbg [[DBG168]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG173:![0-9]+]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 0, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = load i32*, i32** [[_TMP4]], align 8, !dbg [[DBG172:![0-9]+]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*, !dbg [[DBG169]]
+// CHECK5-NEXT:    store i8* [[TMP14]], i8** [[TMP12]], align 8, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 1, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = bitcast i32* [[CONV1]] to i8*, !dbg [[DBG169]]
+// CHECK5-NEXT:    store i8* [[TMP16]], i8** [[TMP15]], align 8, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]], i64 0, i64 2, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = load i32*, i32** [[_TMP5]], align 8, !dbg [[DBG173:![0-9]+]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = bitcast i32* [[TMP18]] to i8*, !dbg [[DBG169]]
+// CHECK5-NEXT:    store i8* [[TMP19]], i8** [[TMP17]], align 8, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = bitcast [3 x i8*]* [[DOTOMP_COPYPRIVATE_CPR_LIST]] to i8*, !dbg [[DBG169]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COPYPRIVATE_DID_IT]], align 4, !dbg [[DBG169]]
+// CHECK5-NEXT:    call void @__kmpc_copyprivate(%struct.ident_t* @[[GLOB28]], i32 [[TMP4]], i64 24, i8* [[TMP20]], void (i8*, i8*)* @.omp.copyprivate.copy_func.11, i32 [[TMP21]]), !dbg [[DBG169]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG174:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp.copyprivate.copy_func.11
-// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG174:![0-9]+]] {
+// CHECK5-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR9]] !dbg [[DBG175:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
 // CHECK5-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
 // CHECK5-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
-// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG175:![0-9]+]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, !dbg [[DBG176:![0-9]+]]
-// CHECK5-NEXT:    store i32 [[TMP12]], i32* [[TMP8]], align 4, !dbg [[DBG176]]
-// CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i32*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8* [[TMP17]] to i32*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, !dbg [[DBG177:![0-9]+]]
-// CHECK5-NEXT:    store i32 [[TMP19]], i32* [[TMP15]], align 4, !dbg [[DBG177]]
-// CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8* [[TMP21]] to i32*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8* [[TMP24]] to i32*, !dbg [[DBG175]]
-// CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4, !dbg [[DBG178:![0-9]+]]
-// CHECK5-NEXT:    store i32 [[TMP26]], i32* [[TMP22]], align 4, !dbg [[DBG178]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG178]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8, !dbg [[DBG176:![0-9]+]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [3 x i8*]*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [3 x i8*]*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 0, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 0, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4, !dbg [[DBG177:![0-9]+]]
+// CHECK5-NEXT:    store i32 [[TMP12]], i32* [[TMP8]], align 4, !dbg [[DBG177]]
+// CHECK5-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 1, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i32*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 1, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP18:%.*]] = bitcast i8* [[TMP17]] to i32*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4, !dbg [[DBG178:![0-9]+]]
+// CHECK5-NEXT:    store i32 [[TMP19]], i32* [[TMP15]], align 4, !dbg [[DBG178]]
+// CHECK5-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP3]], i64 0, i64 2, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP22:%.*]] = bitcast i8* [[TMP21]] to i32*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP5]], i64 0, i64 2, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP23]], align 8, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP25:%.*]] = bitcast i8* [[TMP24]] to i32*, !dbg [[DBG176]]
+// CHECK5-NEXT:    [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4, !dbg [[DBG179:![0-9]+]]
+// CHECK5-NEXT:    store i32 [[TMP26]], i32* [[TMP22]], align 4, !dbg [[DBG179]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG179]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_Z15parallel_singlev
-// CHECK5-SAME: () #[[ATTR10]] !dbg [[DBG179:![0-9]+]] {
+// CHECK5-SAME: () #[[ATTR10]] !dbg [[DBG180:![0-9]+]] {
 // CHECK5-NEXT:  entry:
-// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB35:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..12 to void (i32*, i32*, ...)*)), !dbg [[DBG180:![0-9]+]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG181:![0-9]+]]
+// CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB35:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..12 to void (i32*, i32*, ...)*)), !dbg [[DBG181:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG182:![0-9]+]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..12
-// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR12]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG182:![0-9]+]] {
+// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR12]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG183:![0-9]+]] {
 // CHECK5-NEXT:  entry:
 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
@@ -5135,42 +5129,42 @@ void array_func(int n, int a[n], St s[2]) {
 // CHECK5-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
-// CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG183:![0-9]+]]
-// CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4, !dbg [[DBG183]]
-// CHECK5-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB32:[0-9]+]], i32 [[TMP1]]), !dbg [[DBG183]]
-// CHECK5-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0, !dbg [[DBG183]]
-// CHECK5-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG183]]
+// CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG184:![0-9]+]]
+// CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4, !dbg [[DBG184]]
+// CHECK5-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB32:[0-9]+]], i32 [[TMP1]]), !dbg [[DBG184]]
+// CHECK5-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0, !dbg [[DBG184]]
+// CHECK5-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]], !dbg [[DBG184]]
 // CHECK5:       omp_if.then:
 // CHECK5-NEXT:    invoke void @_Z3foov()
-// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG184:![0-9]+]]
+// CHECK5-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG185:![0-9]+]]
 // CHECK5:       invoke.cont:
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB32]], i32 [[TMP1]]), !dbg [[DBG184]]
-// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG184]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB32]], i32 [[TMP1]]), !dbg [[DBG185]]
+// CHECK5-NEXT:    br label [[OMP_IF_END]], !dbg [[DBG185]]
 // CHECK5:       lpad:
 // CHECK5-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
-// CHECK5-NEXT:    catch i8* null, !dbg [[DBG185:![0-9]+]]
-// CHECK5-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG185]]
-// CHECK5-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG185]]
-// CHECK5-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1, !dbg [[DBG185]]
-// CHECK5-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG185]]
-// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB32]], i32 [[TMP1]]), !dbg [[DBG184]]
-// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG184]]
+// CHECK5-NEXT:    catch i8* null, !dbg [[DBG186:![0-9]+]]
+// CHECK5-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG186]]
+// CHECK5-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8, !dbg [[DBG186]]
+// CHECK5-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1, !dbg [[DBG186]]
+// CHECK5-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG186]]
+// CHECK5-NEXT:    call void @__kmpc_end_single(%struct.ident_t* @[[GLOB32]], i32 [[TMP1]]), !dbg [[DBG185]]
+// CHECK5-NEXT:    br label [[TERMINATE_HANDLER:%.*]], !dbg [[DBG185]]
 // CHECK5:       omp_if.end:
-// CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB33:[0-9]+]], i32 [[TMP1]]), !dbg [[DBG186:![0-9]+]]
-// CHECK5-NEXT:    ret void, !dbg [[DBG186]]
+// CHECK5-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB33:[0-9]+]], i32 [[TMP1]]), !dbg [[DBG187:![0-9]+]]
+// CHECK5-NEXT:    ret void, !dbg [[DBG187]]
 // CHECK5:       terminate.handler:
-// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG184]]
-// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13]], !dbg [[DBG184]]
-// CHECK5-NEXT:    unreachable, !dbg [[DBG184]]
+// CHECK5-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8, !dbg [[DBG185]]
+// CHECK5-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR13]], !dbg [[DBG185]]
+// CHECK5-NEXT:    unreachable, !dbg [[DBG185]]
 //
 //
 // CHECK5-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_single_codegen.cpp
-// CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" !dbg [[DBG187:![0-9]+]] {
+// CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" !dbg [[DBG188:![0-9]+]] {
 // CHECK5-NEXT:  entry:
-// CHECK5-NEXT:    call void @__cxx_global_var_init(), !dbg [[DBG188:![0-9]+]]
-// CHECK5-NEXT:    call void @__cxx_global_var_init.4(), !dbg [[DBG188]]
-// CHECK5-NEXT:    call void @.__omp_threadprivate_init_.(), !dbg [[DBG188]]
-// CHECK5-NEXT:    call void @.__omp_threadprivate_init_..3(), !dbg [[DBG188]]
+// CHECK5-NEXT:    call void @__cxx_global_var_init(), !dbg [[DBG189:![0-9]+]]
+// CHECK5-NEXT:    call void @__cxx_global_var_init.4(), !dbg [[DBG189]]
+// CHECK5-NEXT:    call void @.__omp_threadprivate_init_.(), !dbg [[DBG189]]
+// CHECK5-NEXT:    call void @.__omp_threadprivate_init_..3(), !dbg [[DBG189]]
 // CHECK5-NEXT:    ret void
 //
 //

diff  --git a/clang/test/OpenMP/teams_private_codegen.cpp b/clang/test/OpenMP/teams_private_codegen.cpp
index 9848b3a295761..3d1d8de758451 100644
--- a/clang/test/OpenMP/teams_private_codegen.cpp
+++ b/clang/test/OpenMP/teams_private_codegen.cpp
@@ -204,7 +204,8 @@ int main() {
 // CHECK1-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK1-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK1-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK1-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK1-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK1-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK1-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -371,7 +372,8 @@ int main() {
 // CHECK2-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK2-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK2-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK2-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK2-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK2-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK2-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -538,7 +540,8 @@ int main() {
 // CHECK3-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK3-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK3-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK3-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK3-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK3-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK3-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK3-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -705,7 +708,8 @@ int main() {
 // CHECK4-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK4-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK4-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK4-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK4-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK4-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK4-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK4-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -1035,7 +1039,8 @@ int main() {
 // CHECK9-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK9-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK9-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK9-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK9-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK9-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK9-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK9-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -1533,7 +1538,8 @@ int main() {
 // CHECK10-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK10-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK10-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK10-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK10-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK10-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK10-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 8
 // CHECK10-NEXT:    store i32* [[TMP0]], i32** [[C]], align 8
@@ -2031,7 +2037,8 @@ int main() {
 // CHECK11-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK11-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK11-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK11-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK11-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK11-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK11-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK11-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -2529,7 +2536,8 @@ int main() {
 // CHECK12-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 1
 // CHECK12-NEXT:    [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 4
 // CHECK12-NEXT:    [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -16
-// CHECK12-NEXT:    store i8 [[BF_CLEAR]], i8* [[B]], align 4
+// CHECK12-NEXT:    [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 0
+// CHECK12-NEXT:    store i8 [[BF_SET]], i8* [[B]], align 4
 // CHECK12-NEXT:    [[C:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 2
 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[D_ADDR]], align 4
 // CHECK12-NEXT:    store i32* [[TMP0]], i32** [[C]], align 4
@@ -2822,4 +2830,3 @@ int main() {
 // CHECK12-NEXT:    call void @__tgt_register_requires(i64 1)
 // CHECK12-NEXT:    ret void
 //
-//
\ No newline at end of file

diff  --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
new file mode 100644
index 0000000000000..82e331e8bd532
--- /dev/null
+++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -0,0 +1,286 @@
+//===- InstSimplifyFolder.h - InstSimplify folding helper --------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the InstSimplifyFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for folding operations to
+// existing values using InstructionSimplify. At the moment, only a subset of
+// the implementation uses InstructionSimplify. The rest of the implementation
+// only folds constants.
+//
+// The folder also applies target-specific constant folding.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
+#define LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilderFolder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+
+/// InstSimplifyFolder - Use InstructionSimplify to fold operations to existing
+/// values. Also applies target-specific constant folding when not using
+/// InstructionSimplify.
+class InstSimplifyFolder final : public IRBuilderFolder {
+  const DataLayout &DL;
+  TargetFolder ConstFolder;
+
+  virtual void anchor();
+
+public:
+  InstSimplifyFolder(const DataLayout &DL) : DL(DL), ConstFolder(DL) {}
+
+  //===--------------------------------------------------------------------===//
+  // Value-based folders.
+  //
+  // Return an existing value or a constant if the operation can be simplified.
+  // Otherwise return nullptr.
+  //===--------------------------------------------------------------------===//
+  Value *FoldOr(Value *LHS, Value *RHS) const override {
+    return SimplifyOrInst(LHS, RHS, SimplifyQuery(DL));
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Binary Operators
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateAdd(Constant *LHS, Constant *RHS, bool HasNUW = false,
+                   bool HasNSW = false) const override {
+    return ConstFolder.CreateAdd(LHS, RHS, HasNUW, HasNSW);
+  }
+  Value *CreateFAdd(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateFAdd(LHS, RHS);
+  }
+  Value *CreateSub(Constant *LHS, Constant *RHS, bool HasNUW = false,
+                   bool HasNSW = false) const override {
+    return ConstFolder.CreateSub(LHS, RHS, HasNUW, HasNSW);
+  }
+  Value *CreateFSub(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateFSub(LHS, RHS);
+  }
+  Value *CreateMul(Constant *LHS, Constant *RHS, bool HasNUW = false,
+                   bool HasNSW = false) const override {
+    return ConstFolder.CreateMul(LHS, RHS, HasNUW, HasNSW);
+  }
+  Value *CreateFMul(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateFMul(LHS, RHS);
+  }
+  Value *CreateUDiv(Constant *LHS, Constant *RHS,
+                    bool isExact = false) const override {
+    return ConstFolder.CreateUDiv(LHS, RHS, isExact);
+  }
+  Value *CreateSDiv(Constant *LHS, Constant *RHS,
+                    bool isExact = false) const override {
+    return ConstFolder.CreateSDiv(LHS, RHS, isExact);
+  }
+  Value *CreateFDiv(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateFDiv(LHS, RHS);
+  }
+  Value *CreateURem(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateURem(LHS, RHS);
+  }
+  Value *CreateSRem(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateSRem(LHS, RHS);
+  }
+  Value *CreateFRem(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateFRem(LHS, RHS);
+  }
+  Value *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
+                   bool HasNSW = false) const override {
+    return ConstFolder.CreateShl(LHS, RHS, HasNUW, HasNSW);
+  }
+  Value *CreateLShr(Constant *LHS, Constant *RHS,
+                    bool isExact = false) const override {
+    return ConstFolder.CreateLShr(LHS, RHS, isExact);
+  }
+  Value *CreateAShr(Constant *LHS, Constant *RHS,
+                    bool isExact = false) const override {
+    return ConstFolder.CreateAShr(LHS, RHS, isExact);
+  }
+  Value *CreateAnd(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateAnd(LHS, RHS);
+  }
+  Value *CreateXor(Constant *LHS, Constant *RHS) const override {
+    return ConstFolder.CreateXor(LHS, RHS);
+  }
+
+  Value *CreateBinOp(Instruction::BinaryOps Opc, Constant *LHS,
+                     Constant *RHS) const override {
+    return ConstFolder.CreateBinOp(Opc, LHS, RHS);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Unary Operators
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateNeg(Constant *C, bool HasNUW = false,
+                   bool HasNSW = false) const override {
+    return ConstFolder.CreateNeg(C, HasNUW, HasNSW);
+  }
+  Value *CreateFNeg(Constant *C) const override {
+    return ConstFolder.CreateFNeg(C);
+  }
+  Value *CreateNot(Constant *C) const override {
+    return ConstFolder.CreateNot(C);
+  }
+
+  Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
+    return ConstFolder.CreateUnOp(Opc, C);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Memory Instructions
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateGetElementPtr(Type *Ty, Constant *C,
+                             ArrayRef<Constant *> IdxList) const override {
+    return ConstFolder.CreateGetElementPtr(Ty, C, IdxList);
+  }
+  Value *CreateGetElementPtr(Type *Ty, Constant *C,
+                             Constant *Idx) const override {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return ConstFolder.CreateGetElementPtr(Ty, C, Idx);
+  }
+  Value *CreateGetElementPtr(Type *Ty, Constant *C,
+                             ArrayRef<Value *> IdxList) const override {
+    return ConstFolder.CreateGetElementPtr(Ty, C, IdxList);
+  }
+
+  Value *
+  CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                              ArrayRef<Constant *> IdxList) const override {
+    return ConstFolder.CreateInBoundsGetElementPtr(Ty, C, IdxList);
+  }
+  Value *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                     Constant *Idx) const override {
+    // This form of the function only exists to avoid ambiguous overload
+    // warnings about whether to convert Idx to ArrayRef<Constant *> or
+    // ArrayRef<Value *>.
+    return ConstFolder.CreateInBoundsGetElementPtr(Ty, C, Idx);
+  }
+  Value *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+                                     ArrayRef<Value *> IdxList) const override {
+    return ConstFolder.CreateInBoundsGetElementPtr(Ty, C, IdxList);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Cast/Conversion Operators
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateCast(Instruction::CastOps Op, Constant *C,
+                    Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreateCast(Op, C, DestTy);
+  }
+  Value *CreateIntCast(Constant *C, Type *DestTy,
+                       bool isSigned) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreateIntCast(C, DestTy, isSigned);
+  }
+  Value *CreatePointerCast(Constant *C, Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreatePointerCast(C, DestTy);
+  }
+  Value *CreateFPCast(Constant *C, Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreateFPCast(C, DestTy);
+  }
+  Value *CreateBitCast(Constant *C, Type *DestTy) const override {
+    return ConstFolder.CreateBitCast(C, DestTy);
+  }
+  Value *CreateIntToPtr(Constant *C, Type *DestTy) const override {
+    return ConstFolder.CreateIntToPtr(C, DestTy);
+  }
+  Value *CreatePtrToInt(Constant *C, Type *DestTy) const override {
+    return ConstFolder.CreatePtrToInt(C, DestTy);
+  }
+  Value *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreateZExtOrBitCast(C, DestTy);
+  }
+  Value *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreateSExtOrBitCast(C, DestTy);
+  }
+  Value *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreateTruncOrBitCast(C, DestTy);
+  }
+
+  Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+                                             Type *DestTy) const override {
+    if (C->getType() == DestTy)
+      return C; // avoid calling Fold
+    return ConstFolder.CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Compare Instructions
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+                    Constant *RHS) const override {
+    return ConstFolder.CreateICmp(P, LHS, RHS);
+  }
+  Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+                    Constant *RHS) const override {
+    return ConstFolder.CreateFCmp(P, LHS, RHS);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Other Instructions
+  //===--------------------------------------------------------------------===//
+
+  Value *CreateSelect(Constant *C, Constant *True,
+                      Constant *False) const override {
+    return ConstFolder.CreateSelect(C, True, False);
+  }
+
+  Value *CreateExtractElement(Constant *Vec, Constant *Idx) const override {
+    return ConstFolder.CreateExtractElement(Vec, Idx);
+  }
+
+  Value *CreateInsertElement(Constant *Vec, Constant *NewElt,
+                             Constant *Idx) const override {
+    return ConstFolder.CreateInsertElement(Vec, NewElt, Idx);
+  }
+
+  Value *CreateShuffleVector(Constant *V1, Constant *V2,
+                             ArrayRef<int> Mask) const override {
+    return ConstFolder.CreateShuffleVector(V1, V2, Mask);
+  }
+
+  Value *CreateExtractValue(Constant *Agg,
+                            ArrayRef<unsigned> IdxList) const override {
+    return ConstFolder.CreateExtractValue(Agg, IdxList);
+  }
+
+  Value *CreateInsertValue(Constant *Agg, Constant *Val,
+                           ArrayRef<unsigned> IdxList) const override {
+    return ConstFolder.CreateInsertValue(Agg, Val, IdxList);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H

diff  --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h
index b23316ac3d9b6..af0450d5a1fb4 100644
--- a/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/llvm/include/llvm/Analysis/TargetFolder.h
@@ -42,6 +42,20 @@ class TargetFolder final : public IRBuilderFolder {
 public:
   explicit TargetFolder(const DataLayout &DL) : DL(DL) {}
 
+  //===--------------------------------------------------------------------===//
+  // Value-based folders.
+  //
+  // Return an existing value or a constant if the operation can be simplified.
+  // Otherwise return nullptr.
+  //===--------------------------------------------------------------------===//
+  Value *FoldOr(Value *LHS, Value *RHS) const override {
+    auto *LC = dyn_cast<Constant>(LHS);
+    auto *RC = dyn_cast<Constant>(RHS);
+    if (LC && RC)
+      return Fold(ConstantExpr::getOr(LC, RC));
+    return nullptr;
+  }
+
   //===--------------------------------------------------------------------===//
   // Binary Operators
   //===--------------------------------------------------------------------===//
@@ -102,9 +116,6 @@ class TargetFolder final : public IRBuilderFolder {
   Constant *CreateAnd(Constant *LHS, Constant *RHS) const override {
     return Fold(ConstantExpr::getAnd(LHS, RHS));
   }
-  Constant *CreateOr(Constant *LHS, Constant *RHS) const override {
-    return Fold(ConstantExpr::getOr(LHS, RHS));
-  }
   Constant *CreateXor(Constant *LHS, Constant *RHS) const override {
     return Fold(ConstantExpr::getXor(LHS, RHS));
   }

diff  --git a/llvm/include/llvm/IR/ConstantFolder.h b/llvm/include/llvm/IR/ConstantFolder.h
index da4a18e3c181a..ee5033914261e 100644
--- a/llvm/include/llvm/IR/ConstantFolder.h
+++ b/llvm/include/llvm/IR/ConstantFolder.h
@@ -31,6 +31,20 @@ class ConstantFolder final : public IRBuilderFolder {
 public:
   explicit ConstantFolder() = default;
 
+  //===--------------------------------------------------------------------===//
+  // Value-based folders.
+  //
+  // Return an existing value or a constant if the operation can be simplified.
+  // Otherwise return nullptr.
+  //===--------------------------------------------------------------------===//
+  Value *FoldOr(Value *LHS, Value *RHS) const override {
+    auto *LC = dyn_cast<Constant>(LHS);
+    auto *RC = dyn_cast<Constant>(RHS);
+    if (LC && RC)
+      return ConstantExpr::getOr(LC, RC);
+    return nullptr;
+  }
+
   //===--------------------------------------------------------------------===//
   // Binary Operators
   //===--------------------------------------------------------------------===//
@@ -107,7 +121,7 @@ class ConstantFolder final : public IRBuilderFolder {
     return ConstantExpr::getAnd(LHS, RHS);
   }
 
-  Constant *CreateOr(Constant *LHS, Constant *RHS) const override {
+  Constant *CreateOr(Constant *LHS, Constant *RHS) const {
     return ConstantExpr::getOr(LHS, RHS);
   }
 

diff  --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index bdc733519a923..2c074b399c54e 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -1386,12 +1386,8 @@ class IRBuilderBase {
   }
 
   Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
-    if (auto *RC = dyn_cast<Constant>(RHS)) {
-      if (RC->isNullValue())
-        return LHS;  // LHS | 0 -> LHS
-      if (auto *LC = dyn_cast<Constant>(LHS))
-        return Insert(Folder.CreateOr(LC, RC), Name);
-    }
+    if (auto *V = Folder.FoldOr(LHS, RHS))
+      return V;
     return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
   }
 

diff  --git a/llvm/include/llvm/IR/IRBuilderFolder.h b/llvm/include/llvm/IR/IRBuilderFolder.h
index e781e8e094af9..6a003868c37df 100644
--- a/llvm/include/llvm/IR/IRBuilderFolder.h
+++ b/llvm/include/llvm/IR/IRBuilderFolder.h
@@ -25,6 +25,14 @@ class IRBuilderFolder {
 public:
   virtual ~IRBuilderFolder();
 
+  //===--------------------------------------------------------------------===//
+  // Value-based folders.
+  //
+  // Return an existing value or a constant if the operation can be simplified.
+  // Otherwise return nullptr.
+  //===--------------------------------------------------------------------===//
+  virtual Value *FoldOr(Value *LHS, Value *RHS) const = 0;
+
   //===--------------------------------------------------------------------===//
   // Binary Operators
   //===--------------------------------------------------------------------===//
@@ -53,7 +61,6 @@ class IRBuilderFolder {
   virtual Value *CreateAShr(Constant *LHS, Constant *RHS,
                             bool isExact = false) const = 0;
   virtual Value *CreateAnd(Constant *LHS, Constant *RHS) const = 0;
-  virtual Value *CreateOr(Constant *LHS, Constant *RHS) const = 0;
   virtual Value *CreateXor(Constant *LHS, Constant *RHS) const = 0;
   virtual Value *CreateBinOp(Instruction::BinaryOps Opc,
                              Constant *LHS, Constant *RHS) const = 0;

diff  --git a/llvm/include/llvm/IR/NoFolder.h b/llvm/include/llvm/IR/NoFolder.h
index dcffa6b2f9dad..14a18852fe4ce 100644
--- a/llvm/include/llvm/IR/NoFolder.h
+++ b/llvm/include/llvm/IR/NoFolder.h
@@ -37,6 +37,14 @@ class NoFolder final : public IRBuilderFolder {
 public:
   explicit NoFolder() = default;
 
+  //===--------------------------------------------------------------------===//
+  // Value-based folders.
+  //
+  // Return an existing value or a constant if the operation can be simplified.
+  // Otherwise return nullptr.
+  //===--------------------------------------------------------------------===//
+  Value *FoldOr(Value *LHS, Value *RHS) const override { return nullptr; }
+
   //===--------------------------------------------------------------------===//
   // Binary Operators
   //===--------------------------------------------------------------------===//
@@ -136,10 +144,6 @@ class NoFolder final : public IRBuilderFolder {
     return BinaryOperator::CreateAnd(LHS, RHS);
   }
 
-  Instruction *CreateOr(Constant *LHS, Constant *RHS) const override {
-    return BinaryOperator::CreateOr(LHS, RHS);
-  }
-
   Instruction *CreateXor(Constant *LHS, Constant *RHS) const override {
     return BinaryOperator::CreateXor(LHS, RHS);
   }

diff  --git a/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
index 4547d8a591138..277eb7acf238c 100644
--- a/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
+++ b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
@@ -18,6 +18,7 @@
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/InstSimplifyFolder.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
 #include "llvm/Analysis/ScalarEvolutionNormalization.h"
 #include "llvm/Analysis/TargetFolder.h"
@@ -122,7 +123,7 @@ class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
   /// "expanded" form.
   bool LSRMode;
 
-  typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderType;
+  typedef IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> BuilderType;
   BuilderType Builder;
 
   // RAII object that stores the current insertion point and restores it when
@@ -178,7 +179,7 @@ class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
       : SE(se), DL(DL), IVName(name), PreserveLCSSA(PreserveLCSSA),
         IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr), CanonicalMode(true),
         LSRMode(false),
-        Builder(se.getContext(), TargetFolder(DL),
+        Builder(se.getContext(), InstSimplifyFolder(DL),
                 IRBuilderCallbackInserter(
                     [this](Instruction *I) { rememberInstruction(I); })) {
 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS

diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 0a3855f219275..7dc709e9e9322 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -27,6 +27,7 @@
 #include "llvm/Analysis/CaptureTracking.h"
 #include "llvm/Analysis/CmpInstAnalysis.h"
 #include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/InstSimplifyFolder.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
 #include "llvm/Analysis/OverflowInstAnalysis.h"
@@ -6457,3 +6458,5 @@ const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
                                                   Function &);
 }
+
+void InstSimplifyFolder::anchor() {}

diff  --git a/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll b/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
index 353a45aea7a68..e5cb8bb92d318 100644
--- a/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
+++ b/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll
@@ -14,7 +14,6 @@ define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %
 ; CHECK:       for.body.lver.check:
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; CHECK-NEXT:    [[TMP8:%.*]] = or i1 false, [[TMP7]]
 ; CHECK-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
 ; CHECK-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
 ; CHECK-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
@@ -22,7 +21,7 @@ define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %
 ; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
 ; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; CHECK-NEXT:    [[TMP18:%.*]] = or i1 [[TMP8]], [[TMP17]]
+; CHECK-NEXT:    [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
 ; CHECK-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
 ; CHECK:       for.body.ph.lver.orig:
 ; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
@@ -149,7 +148,6 @@ define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i3
 ; CHECK:       for.body.lver.check:
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; CHECK-NEXT:    [[TMP8:%.*]] = or i1 false, [[TMP7]]
 ; CHECK-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
 ; CHECK-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
 ; CHECK-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
@@ -157,7 +155,7 @@ define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i3
 ; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* bitcast (i32* getelementptr inbounds ([8192 x i32], [8192 x i32]* @global_a, i64 0, i64 42) to i8*), i64 [[MUL_RESULT3]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], bitcast (i32* getelementptr inbounds ([8192 x i32], [8192 x i32]* @global_a, i64 0, i64 42) to i8*)
 ; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; CHECK-NEXT:    [[TMP18:%.*]] = or i1 [[TMP8]], [[TMP17]]
+; CHECK-NEXT:    [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
 ; CHECK-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
 ; CHECK:       for.body.ph.lver.orig:
 ; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]

diff  --git a/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll
index 4f796baf66d11..53b2d9e103eab 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll
@@ -20,8 +20,7 @@ define void @load_clamped_index(i32* %A, i32* %B, i32 %N) {
 ; CHECK:       vector.scevcheck:
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[N]], -1
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i32 [[TMP0]], 3
-; CHECK-NEXT:    [[TMP8:%.*]] = or i1 false, [[TMP7]]
-; CHECK-NEXT:    br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK-NEXT:    br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
 ; CHECK:       vector.memcheck:
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i32 [[N]], -1
 ; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
@@ -104,8 +103,7 @@ define void @store_clamped_index(i32* %A, i32* %B, i32 %N) {
 ; CHECK:       vector.scevcheck:
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[N]], -1
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i32 [[TMP0]], 3
-; CHECK-NEXT:    [[TMP8:%.*]] = or i1 false, [[TMP7]]
-; CHECK-NEXT:    br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK-NEXT:    br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
 ; CHECK:       vector.memcheck:
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i32 [[N]], -1
 ; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
@@ -267,8 +265,7 @@ define void @clamped_index_equal_dependence(i32* %A, i32* %B, i32 %N) {
 ; CHECK:       vector.scevcheck:
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[N]], -1
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i32 [[TMP0]], 3
-; CHECK-NEXT:    [[TMP8:%.*]] = or i1 false, [[TMP7]]
-; CHECK-NEXT:    br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT:    br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[N]], 2
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]

diff  --git a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
index eec2ba94c750b..b32ba26884ee3 100644
--- a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
+++ b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll
@@ -31,7 +31,6 @@ define void @f1(i16* noalias %a,
 ; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
 ; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
 ; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
-; LV-NEXT:    [[TMP8:%.*]] = or i1 false, [[TMP7]]
 ; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
 ; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
 ; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
@@ -39,7 +38,7 @@ define void @f1(i16* noalias %a,
 ; LV-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
 ; LV-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
 ; LV-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
-; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP8]], [[TMP17]]
+; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
 ; LV-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
 ; LV:       for.body.ph.lver.orig:
 ; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]

diff  --git a/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll b/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
index 8e859699554b0..8be24472e341f 100644
--- a/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
+++ b/llvm/test/Transforms/SROA/basictest-opaque-ptrs.ll
@@ -1174,10 +1174,12 @@ define void @PR14059.1(double* %d) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double undef to i64
 ; CHECK-NEXT:    [[X_SROA_0_I_0_INSERT_MASK:%.*]] = and i64 [[TMP0]], -4294967296
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64 [[X_SROA_0_I_0_INSERT_MASK]] to double
+; CHECK-NEXT:    [[X_SROA_0_I_0_INSERT_INSERT:%.*]] = or i64 [[X_SROA_0_I_0_INSERT_MASK]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64 [[X_SROA_0_I_0_INSERT_INSERT]] to double
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP1]] to i64
 ; CHECK-NEXT:    [[X_SROA_0_I_2_INSERT_MASK:%.*]] = and i64 [[TMP2]], -281474976645121
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64 [[X_SROA_0_I_2_INSERT_MASK]] to double
+; CHECK-NEXT:    [[X_SROA_0_I_2_INSERT_INSERT:%.*]] = or i64 [[X_SROA_0_I_2_INSERT_MASK]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64 [[X_SROA_0_I_2_INSERT_INSERT]] to double
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
 ; CHECK-NEXT:    [[D_RAW:%.*]] = bitcast ptr [[D:%.*]] to ptr
 ; CHECK-NEXT:    [[X_SROA_0_I_4_COPYLOAD:%.*]] = load i32, ptr [[D_RAW]], align 1

diff  --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index 0dc30c4e094c1..1fd95a4605d93 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -1253,10 +1253,12 @@ define void @PR14059.1(double* %d) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double undef to i64
 ; CHECK-NEXT:    [[X_SROA_0_I_0_INSERT_MASK:%.*]] = and i64 [[TMP0]], -4294967296
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64 [[X_SROA_0_I_0_INSERT_MASK]] to double
+; CHECK-NEXT:    [[X_SROA_0_I_0_INSERT_INSERT:%.*]] = or i64 [[X_SROA_0_I_0_INSERT_MASK]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i64 [[X_SROA_0_I_0_INSERT_INSERT]] to double
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP1]] to i64
 ; CHECK-NEXT:    [[X_SROA_0_I_2_INSERT_MASK:%.*]] = and i64 [[TMP2]], -281474976645121
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64 [[X_SROA_0_I_2_INSERT_MASK]] to double
+; CHECK-NEXT:    [[X_SROA_0_I_2_INSERT_INSERT:%.*]] = or i64 [[X_SROA_0_I_2_INSERT_MASK]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i64 [[X_SROA_0_I_2_INSERT_INSERT]] to double
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
 ; CHECK-NEXT:    [[X_SROA_0_I_4_D_RAW_SROA_CAST:%.*]] = bitcast double* [[D:%.*]] to i32*
 ; CHECK-NEXT:    [[X_SROA_0_I_4_COPYLOAD:%.*]] = load i32, i32* [[X_SROA_0_I_4_D_RAW_SROA_CAST]], align 1


        


More information about the cfe-commits mailing list