[PATCH] D121756: [clang-format] Clean up code looking for if statements
sstwcw via Phabricator via cfe-commits
cfe-commits at lists.llvm.org
Thu Mar 24 16:31:46 PDT 2022
sstwcw marked 2 inline comments as done.
sstwcw added a comment.
This is how checking for `while` changes behavior.
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index 68b2a40d48c5..3f811fee9bad 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -868,8 +868,8 @@ bool HasVectorSubscript(const Expr<SomeType> &expr) {
parser::Message *AttachDeclaration(
parser::Message &message, const Symbol &symbol) {
const Symbol *unhosted{&symbol};
- while (
- const auto *assoc{unhosted->detailsIf<semantics::HostAssocDetails>()}) {
+ while (const auto *assoc{
+ unhosted->detailsIf<semantics::HostAssocDetails>()}) {
unhosted = &assoc->symbol();
}
if (const auto *binding{
diff --git a/flang/lib/Semantics/data-to-inits.cpp b/flang/lib/Semantics/data-to-inits.cpp
index 6bdbf5f6549f..aa8188656bcb 100644
--- a/flang/lib/Semantics/data-to-inits.cpp
+++ b/flang/lib/Semantics/data-to-inits.cpp
@@ -407,8 +407,8 @@ bool DataInitializationCompiler<DSV>::InitElement(
DescribeElement(), designatorType->AsFortran());
}
auto folded{evaluate::Fold(context, std::move(converted->first))};
- switch (GetImage().Add(
- offsetSymbol.offset(), offsetSymbol.size(), folded, context)) {
+ switch (GetImage().Add(offsetSymbol.offset(), offsetSymbol.size(), folded,
+ context)) {
case evaluate::InitialImage::Ok:
return true;
case evaluate::InitialImage::NotAConstant:
diff --git a/lldb/source/API/SBDebugger.cpp b/lldb/source/API/SBDebugger.cpp
index 3391665786d5..f1b86a133f1c 100644
--- a/lldb/source/API/SBDebugger.cpp
+++ b/lldb/source/API/SBDebugger.cpp
@@ -526,7 +526,7 @@ void SBDebugger::HandleCommand(const char *command) {
EventSP event_sp;
ListenerSP lldb_listener_sp = m_opaque_sp->GetListener();
while (lldb_listener_sp->GetEventForBroadcaster(
- process_sp.get(), event_sp, std::chrono::seconds(0))) {
+ process_sp.get(), event_sp, std::chrono::seconds(0))) {
SBEvent event(event_sp);
HandleProcessEvent(process, event, GetOutputFile(), GetErrorFile());
}
diff --git a/openmp/runtime/src/kmp_alloc.cpp b/openmp/runtime/src/kmp_alloc.cpp
index 0f76906714b1..319c3e779fda 100644
--- a/openmp/runtime/src/kmp_alloc.cpp
+++ b/openmp/runtime/src/kmp_alloc.cpp
@@ -2051,7 +2051,8 @@ void *___kmp_fast_allocate(kmp_info_t *this_thr, size_t size KMP_SRC_LOC_DECL) {
// threads only)
// pop the head of the sync free list, push NULL instead
while (!KMP_COMPARE_AND_STORE_PTR(
- &this_thr->th.th_free_lists[index].th_free_list_sync, ptr, nullptr)) {
+ &this_thr->th.th_free_lists[index].th_free_list_sync, ptr,
+ nullptr)) {
KMP_CPU_PAUSE();
ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
}
@@ -2178,7 +2179,8 @@ void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL) {
*((void **)tail) = old_ptr;
while (!KMP_COMPARE_AND_STORE_PTR(
- &q_th->th.th_free_lists[index].th_free_list_sync, old_ptr, head)) {
+ &q_th->th.th_free_lists[index].th_free_list_sync, old_ptr,
+ head)) {
KMP_CPU_PAUSE();
old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
*((void **)tail) = old_ptr;
diff --git a/openmp/runtime/src/kmp_atomic.cpp b/openmp/runtime/src/kmp_atomic.cpp
index 21c2c60bfb60..79b8373319e4 100644
--- a/openmp/runtime/src/kmp_atomic.cpp
+++ b/openmp/runtime/src/kmp_atomic.cpp
@@ -793,8 +793,9 @@ static inline kmp_cmplx128_a16_t operator/(kmp_cmplx128_a16_t &lhs,
old_value = *(TYPE volatile *)lhs; \
new_value = (TYPE)(old_value OP rhs); \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
- *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
+ *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
KMP_DO_PAUSE; \
\
old_value = *(TYPE volatile *)lhs; \
@@ -821,8 +822,9 @@ static inline kmp_cmplx128_a16_t operator/(kmp_cmplx128_a16_t &lhs,
*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \
new_value.cmp = (TYPE)(old_value.cmp OP rhs); \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
- *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
+ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv)) { \
KMP_DO_PAUSE; \
\
*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \
@@ -848,8 +850,9 @@ static inline kmp_cmplx128_a16_t operator/(kmp_cmplx128_a16_t &lhs,
*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \
new_value.cmp = old_value.cmp OP rhs; \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
- *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
+ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv)) { \
KMP_DO_PAUSE; \
\
*old_value.vvv = *(volatile kmp_int##BITS *)lhs; \
@@ -1461,8 +1464,9 @@ ATOMIC_CRITICAL(cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c,
old_value = temp_val; \
new_value = (TYPE)(rhs OP old_value); \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
- *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
+ *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
KMP_DO_PAUSE; \
\
temp_val = *lhs; \
@@ -2127,8 +2131,9 @@ ATOMIC_CRITICAL_READ(cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c,
old_value = temp_val; \
new_value = rhs; \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
- *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
+ *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
@@ -2275,8 +2280,9 @@ ATOMIC_CRITICAL_WR(cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c,
old_value = temp_val; \
new_value = (TYPE)(old_value OP rhs); \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
- *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
+ *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
temp_val = *lhs; \
old_value = temp_val; \
new_value = (TYPE)(old_value OP rhs); \
@@ -2971,8 +2977,9 @@ ATOMIC_CRITICAL_CPT(cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c,
old_value = temp_val; \
new_value = (TYPE)(rhs OP old_value); \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
- *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
+ *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
temp_val = *lhs; \
old_value = temp_val; \
new_value = (TYPE)(rhs OP old_value); \
@@ -3295,8 +3302,9 @@ ATOMIC_CRITICAL_CPT_REV_MIX(float10, long double, div_cpt_rev, /, fp, _Quad,
old_value = temp_val; \
new_value = rhs; \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
- (kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
- *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
+ (kmp_int##BITS *)lhs, \
+ *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
+ *VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
@@ -3476,8 +3484,9 @@ void __kmpc_atomic_2(ident_t *id_ref, int gtid, void *lhs, void *rhs,
(*f)(&new_value, &old_value, rhs);
/* TODO: Should this be acquire or release? */
- while (!KMP_COMPARE_AND_STORE_ACQ16(
- (kmp_int16 *)lhs, *(kmp_int16 *)&old_value, *(kmp_int16 *)&new_value)) {
+ while (!KMP_COMPARE_AND_STORE_ACQ16((kmp_int16 *)lhs,
+ *(kmp_int16 *)&old_value,
+ *(kmp_int16 *)&new_value)) {
KMP_CPU_PAUSE();
old_value = *(kmp_int16 *)lhs;
@@ -3525,8 +3534,9 @@ void __kmpc_atomic_4(ident_t *id_ref, int gtid, void *lhs, void *rhs,
(*f)(&new_value, &old_value, rhs);
/* TODO: Should this be acquire or release? */
- while (!KMP_COMPARE_AND_STORE_ACQ32(
- (kmp_int32 *)lhs, *(kmp_int32 *)&old_value, *(kmp_int32 *)&new_value)) {
+ while (!KMP_COMPARE_AND_STORE_ACQ32((kmp_int32 *)lhs,
+ *(kmp_int32 *)&old_value,
+ *(kmp_int32 *)&new_value)) {
KMP_CPU_PAUSE();
old_value = *(kmp_int32 *)lhs;
@@ -3574,8 +3584,9 @@ void __kmpc_atomic_8(ident_t *id_ref, int gtid, void *lhs, void *rhs,
old_value = *(kmp_int64 *)lhs;
(*f)(&new_value, &old_value, rhs);
/* TODO: Should this be acquire or release? */
- while (!KMP_COMPARE_AND_STORE_ACQ64(
- (kmp_int64 *)lhs, *(kmp_int64 *)&old_value, *(kmp_int64 *)&new_value)) {
+ while (!KMP_COMPARE_AND_STORE_ACQ64((kmp_int64 *)lhs,
+ *(kmp_int64 *)&old_value,
+ *(kmp_int64 *)&new_value)) {
KMP_CPU_PAUSE();
old_value = *(kmp_int64 *)lhs;
diff --git a/openmp/runtime/src/kmp_dispatch.cpp b/openmp/runtime/src/kmp_dispatch.cpp
index 648332109dbb..d3dd63f57401 100644
--- a/openmp/runtime/src/kmp_dispatch.cpp
+++ b/openmp/runtime/src/kmp_dispatch.cpp
@@ -1359,9 +1359,9 @@ int __kmp_dispatch_next_algorithm(int gtid,
vnew.b = vold.b;
vnew.p.count++; // get chunk from head of self range
while (!KMP_COMPARE_AND_STORE_REL64(
- (volatile kmp_int64 *)&pr->u.p.count,
- *VOLATILE_CAST(kmp_int64 *) & vold.b,
- *VOLATILE_CAST(kmp_int64 *) & vnew.b)) {
+ (volatile kmp_int64 *)&pr->u.p.count,
+ *VOLATILE_CAST(kmp_int64 *) & vold.b,
+ *VOLATILE_CAST(kmp_int64 *) & vnew.b)) {
KMP_CPU_PAUSE();
vold.b = *(volatile kmp_int64 *)(&pr->u.p.count);
vnew.b = vold.b;
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D121756/new/
https://reviews.llvm.org/D121756
More information about the cfe-commits
mailing list