[Openmp-commits] [openmp] [OpenMP] [NFC] Remove KMP_NESTED_HOT_TEAMS macro (PR #143584)

Jonathan Peyton via Openmp-commits openmp-commits at lists.llvm.org
Tue Jun 10 12:06:07 PDT 2025


https://github.com/jpeyton52 created https://github.com/llvm/llvm-project/pull/143584

The feature was introduced back in 2014 and has been on ever since. Leave the feature in place. Removing only the macro.

>From 158f5d086f0952546624c804dcd2550c2b0b2551 Mon Sep 17 00:00:00 2001
From: Jonathan Peyton <jonathan.l.peyton at intel.com>
Date: Tue, 10 Jun 2025 10:45:14 -0500
Subject: [PATCH] [OpenMP] [NFC] Remove KMP_NESTED_HOT_TEAMS macro

The feature was introduced back in 2014 and has been on ever since.
---
 openmp/runtime/src/kmp.h              |  31 ++----
 openmp/runtime/src/kmp_config.h.cmake |   1 -
 openmp/runtime/src/kmp_global.cpp     |   2 -
 openmp/runtime/src/kmp_runtime.cpp    | 136 ++++++++------------------
 openmp/runtime/src/kmp_settings.cpp   |   8 +-
 5 files changed, 52 insertions(+), 126 deletions(-)

diff --git a/openmp/runtime/src/kmp.h b/openmp/runtime/src/kmp.h
index a2cacc8792b15..0a28715590c14 100644
--- a/openmp/runtime/src/kmp.h
+++ b/openmp/runtime/src/kmp.h
@@ -169,17 +169,6 @@ class kmp_stats_list;
 #define USE_FAST_MEMORY 3
 #endif
 
-#ifndef KMP_NESTED_HOT_TEAMS
-#define KMP_NESTED_HOT_TEAMS 0
-#define USE_NESTED_HOT_ARG(x)
-#else
-#if KMP_NESTED_HOT_TEAMS
-#define USE_NESTED_HOT_ARG(x) , x
-#else
-#define USE_NESTED_HOT_ARG(x)
-#endif
-#endif
-
 // Assume using BGET compare_exchange instruction instead of lock by default.
 #ifndef USE_CMP_XCHG_FOR_BGET
 #define USE_CMP_XCHG_FOR_BGET 1
@@ -2940,14 +2929,12 @@ typedef struct kmp_free_list {
   // sync list)
 } kmp_free_list_t;
 #endif
-#if KMP_NESTED_HOT_TEAMS
 // Hot teams array keeps hot teams and their sizes for given thread. Hot teams
 // are not put in teams pool, and they don't put threads in threads pool.
 typedef struct kmp_hot_team_ptr {
   kmp_team_p *hot_team; // pointer to hot_team of given nesting level
   kmp_int32 hot_team_nth; // number of threads allocated for the hot_team
 } kmp_hot_team_ptr_t;
-#endif
 typedef struct kmp_teams_size {
   kmp_int32 nteams; // number of teams in a league
   kmp_int32 nth; // number of threads in each team of the league
@@ -3022,9 +3009,7 @@ typedef struct KMP_ALIGN_CACHE kmp_base_info {
   int th_nt_sev; // error severity for strict modifier
   const char *th_nt_msg; // error message for strict modifier
   int th_set_nested_nth_sz;
-#if KMP_NESTED_HOT_TEAMS
   kmp_hot_team_ptr_t *th_hot_teams; /* array of hot teams */
-#endif
   kmp_proc_bind_t
       th_set_proc_bind; /* if != proc_bind_default, use request for next fork */
   kmp_teams_size_t
@@ -3580,10 +3565,8 @@ extern int __kmp_dflt_max_active_levels;
 extern bool __kmp_dflt_max_active_levels_set;
 extern int __kmp_dispatch_num_buffers; /* max possible dynamic loops in
                                           concurrent execution per team */
-#if KMP_NESTED_HOT_TEAMS
 extern int __kmp_hot_teams_mode;
 extern int __kmp_hot_teams_max_level;
-#endif
 
 #if KMP_MIC_SUPPORTED
 extern enum mic_type __kmp_mic_type;
@@ -4067,16 +4050,16 @@ extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
 
 extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
                                          int tid);
-extern kmp_team_t *
-__kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
+extern kmp_team_t *__kmp_allocate_team(kmp_root_t *root, int new_nproc,
+                                       int max_nproc,
 #if OMPT_SUPPORT
-                    ompt_data_t ompt_parallel_data,
+                                       ompt_data_t ompt_parallel_data,
 #endif
-                    kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
-                    int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
+                                       kmp_proc_bind_t proc_bind,
+                                       kmp_internal_control_t *new_icvs,
+                                       int argc, kmp_info_t *thr);
 extern void __kmp_free_thread(kmp_info_t *);
-extern void __kmp_free_team(kmp_root_t *,
-                            kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
+extern void __kmp_free_team(kmp_root_t *, kmp_team_t *, kmp_info_t *);
 extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
 
 /* ------------------------------------------------------------------------ */
diff --git a/openmp/runtime/src/kmp_config.h.cmake b/openmp/runtime/src/kmp_config.h.cmake
index d64c9a4b557df..40f1087fd7f27 100644
--- a/openmp/runtime/src/kmp_config.h.cmake
+++ b/openmp/runtime/src/kmp_config.h.cmake
@@ -114,7 +114,6 @@
 # define BUILD_I8 1
 #endif
 
-#define KMP_NESTED_HOT_TEAMS 1
 #define KMP_ADJUST_BLOCKTIME 1
 #define BUILD_PARALLEL_ORDERED 1
 #define KMP_ASM_INTRINS 1
diff --git a/openmp/runtime/src/kmp_global.cpp b/openmp/runtime/src/kmp_global.cpp
index 87c0a66a16c0a..323d13e948b42 100644
--- a/openmp/runtime/src/kmp_global.cpp
+++ b/openmp/runtime/src/kmp_global.cpp
@@ -135,11 +135,9 @@ int __kmp_tp_cached = 0;
 int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF;
 int __kmp_dflt_max_active_levels = 1; // Nesting off by default
 bool __kmp_dflt_max_active_levels_set = false; // Don't override set value
-#if KMP_NESTED_HOT_TEAMS
 int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */
 /* 1 - keep extra threads when reduced */
 int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */
-#endif
 enum library_type __kmp_library = library_none;
 enum sched_type __kmp_sched =
     kmp_sch_default; /* scheduling method for runtime scheduling */
diff --git a/openmp/runtime/src/kmp_runtime.cpp b/openmp/runtime/src/kmp_runtime.cpp
index 417eceb8ebecc..e0f5f387bf30c 100644
--- a/openmp/runtime/src/kmp_runtime.cpp
+++ b/openmp/runtime/src/kmp_runtime.cpp
@@ -977,8 +977,7 @@ static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
   master_th->th.th_team_serialized = FALSE;
   master_th->th.th_dispatch = &team->t.t_dispatch[0];
 
-/* make sure we are not the optimized hot team */
-#if KMP_NESTED_HOT_TEAMS
+  /* make sure we are not the optimized hot team */
   use_hot_team = 0;
   kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
   if (hot_teams) { // hot teams array is not allocated if
@@ -1009,9 +1008,6 @@ static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
       use_hot_team = 0;
     }
   }
-#else
-  use_hot_team = team == root->r.r_hot_team;
-#endif
   if (!use_hot_team) {
 
     /* install the primary thread */
@@ -1249,13 +1245,12 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
 
       __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
 
-      new_team =
-          __kmp_allocate_team(this_thr->th.th_root, 1, 1,
+      new_team = __kmp_allocate_team(
+          this_thr->th.th_root, 1, 1,
 #if OMPT_SUPPORT
-                              ompt_parallel_data,
+          ompt_parallel_data,
 #endif
-                              proc_bind, &this_thr->th.th_current_task->td_icvs,
-                              0 USE_NESTED_HOT_ARG(NULL));
+          proc_bind, &this_thr->th.th_current_task->td_icvs, 0, NULL);
       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
       KMP_ASSERT(new_team);
 
@@ -1946,9 +1941,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
   int level;
   int active_level;
   int teams_level;
-#if KMP_NESTED_HOT_TEAMS
   kmp_hot_team_ptr_t **p_hot_teams;
-#endif
   { // KMP_TIME_BLOCK
     KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
     KMP_COUNT_VALUE(OMP_PARALLEL_args, argc);
@@ -2006,7 +1999,6 @@ int __kmp_fork_call(ident_t *loc, int gtid,
     active_level = parent_team->t.t_active_level;
     // needed to check nesting inside the teams
     teams_level = master_th->th.th_teams_level;
-#if KMP_NESTED_HOT_TEAMS
     p_hot_teams = &master_th->th.th_hot_teams;
     if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
       *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
@@ -2015,7 +2007,6 @@ int __kmp_fork_call(ident_t *loc, int gtid,
       // it is either actual or not needed (when active_level > 0)
       (*p_hot_teams)[0].hot_team_nth = 1;
     }
-#endif
 
 #if OMPT_SUPPORT
     if (ompt_enabled.enabled) {
@@ -2194,20 +2185,18 @@ int __kmp_fork_call(ident_t *loc, int gtid,
 #if OMPT_SUPPORT
                                  ompt_parallel_data,
 #endif
-                                 proc_bind, &new_icvs,
-                                 argc USE_NESTED_HOT_ARG(master_th));
+                                 proc_bind, &new_icvs, argc, master_th);
       if (__kmp_barrier_release_pattern[bs_forkjoin_barrier] == bp_dist_bar)
         copy_icvs((kmp_internal_control_t *)team->t.b->team_icvs, &new_icvs);
     } else {
       /* allocate a new parallel team */
       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
-      team = __kmp_allocate_team(root, nthreads, nthreads,
+      team = __kmp_allocate_team(
+          root, nthreads, nthreads,
 #if OMPT_SUPPORT
-                                 ompt_parallel_data,
+          ompt_parallel_data,
 #endif
-                                 proc_bind,
-                                 &master_th->th.th_current_task->td_icvs,
-                                 argc USE_NESTED_HOT_ARG(master_th));
+          proc_bind, &master_th->th.th_current_task->td_icvs, argc, master_th);
       if (__kmp_barrier_release_pattern[bs_forkjoin_barrier] == bp_dist_bar)
         copy_icvs((kmp_internal_control_t *)team->t.b->team_icvs,
                   &master_th->th.th_current_task->td_icvs);
@@ -2693,8 +2682,7 @@ void __kmp_join_call(ident_t *loc, int gtid
   if (root->r.r_active != master_active)
     root->r.r_active = master_active;
 
-  __kmp_free_team(root, team USE_NESTED_HOT_ARG(
-                            master_th)); // this will free worker threads
+  __kmp_free_team(root, team, master_th); // this will free worker threads
 
   /* this race was fun to find. make sure the following is in the critical
      region otherwise assertions may fail occasionally since the old team may be
@@ -2710,8 +2698,7 @@ void __kmp_join_call(ident_t *loc, int gtid
   if (parent_team->t.t_serialized &&
       parent_team != master_th->th.th_serial_team &&
       parent_team != root->r.r_root_team) {
-    __kmp_free_team(root,
-                    master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
+    __kmp_free_team(root, master_th->th.th_serial_team, NULL);
     master_th->th.th_serial_team = parent_team;
   }
 
@@ -2817,11 +2804,8 @@ void __kmp_set_num_threads(int new_nth, int gtid) {
   // rather than waiting for the next parallel region.
   root = thread->th.th_root;
   if (__kmp_init_parallel && (!root->r.r_active) &&
-      (root->r.r_hot_team->t.t_nproc > new_nth)
-#if KMP_NESTED_HOT_TEAMS
-      && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
-#endif
-  ) {
+      (root->r.r_hot_team->t.t_nproc > new_nth) && __kmp_hot_teams_max_level &&
+      !__kmp_hot_teams_mode) {
     kmp_team_t *hot_team = root->r.r_hot_team;
     int f;
 
@@ -2842,12 +2826,10 @@ void __kmp_set_num_threads(int new_nth, int gtid) {
       hot_team->t.t_threads[f] = NULL;
     }
     hot_team->t.t_nproc = new_nth;
-#if KMP_NESTED_HOT_TEAMS
     if (thread->th.th_hot_teams) {
       KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
       thread->th.th_hot_teams[0].hot_team_nth = new_nth;
     }
-#endif
 
     if (__kmp_barrier_release_pattern[bs_forkjoin_barrier] == bp_dist_bar) {
       hot_team->t.b->update_num_threads(new_nth);
@@ -3369,17 +3351,16 @@ static void __kmp_initialize_root(kmp_root_t *root) {
   /* allocate the root team structure */
   KF_TRACE(10, ("__kmp_initialize_root: before root_team\n"));
 
-  root_team =
-      __kmp_allocate_team(root,
-                          1, // new_nproc
-                          1, // max_nproc
+  root_team = __kmp_allocate_team(root,
+                                  1, // new_nproc
+                                  1, // max_nproc
 #if OMPT_SUPPORT
-                          ompt_data_none, // root parallel id
+                                  ompt_data_none, // root parallel id
 #endif
-                          __kmp_nested_proc_bind.bind_types[0], &r_icvs,
-                          0 // argc
-                          USE_NESTED_HOT_ARG(NULL) // primary thread is unknown
-      );
+                                  __kmp_nested_proc_bind.bind_types[0], &r_icvs,
+                                  0 // argc
+                                  NULL // primary thread is unknown
+  );
 #if USE_DEBUGGER
   // Non-NULL value should be assigned to make the debugger display the root
   // team.
@@ -3407,17 +3388,16 @@ static void __kmp_initialize_root(kmp_root_t *root) {
   /* allocate the hot team structure */
   KF_TRACE(10, ("__kmp_initialize_root: before hot_team\n"));
 
-  hot_team =
-      __kmp_allocate_team(root,
-                          1, // new_nproc
-                          __kmp_dflt_team_nth_ub * 2, // max_nproc
+  hot_team = __kmp_allocate_team(root,
+                                 1, // new_nproc
+                                 __kmp_dflt_team_nth_ub * 2, // max_nproc
 #if OMPT_SUPPORT
-                          ompt_data_none, // root parallel id
+                                 ompt_data_none, // root parallel id
 #endif
-                          __kmp_nested_proc_bind.bind_types[0], &r_icvs,
-                          0 // argc
-                          USE_NESTED_HOT_ARG(NULL) // primary thread is unknown
-      );
+                                 __kmp_nested_proc_bind.bind_types[0], &r_icvs,
+                                 0 // argc
+                                 NULL // primary thread is unknown
+  );
   KF_TRACE(10, ("__kmp_initialize_root: after hot_team = %p\n", hot_team));
 
   root->r.r_hot_team = hot_team;
@@ -3956,12 +3936,12 @@ int __kmp_register_root(int initial_thread) {
   if (!root_thread->th.th_serial_team) {
     kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
     KF_TRACE(10, ("__kmp_register_root: before serial_team\n"));
-    root_thread->th.th_serial_team = __kmp_allocate_team(
-        root, 1, 1,
+    root_thread->th.th_serial_team =
+        __kmp_allocate_team(root, 1, 1,
 #if OMPT_SUPPORT
-        ompt_data_none, // root parallel id
+                            ompt_data_none, // root parallel id
 #endif
-        proc_bind_default, &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
+                            proc_bind_default, &r_icvs, 0, NULL);
   }
   KMP_ASSERT(root_thread->th.th_serial_team);
   KF_TRACE(10, ("__kmp_register_root: after serial_team = %p\n",
@@ -4067,7 +4047,6 @@ int __kmp_register_root(int initial_thread) {
   return gtid;
 }
 
-#if KMP_NESTED_HOT_TEAMS
 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
                                 const int max_level) {
   int i, n, nth;
@@ -4092,7 +4071,6 @@ static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
   __kmp_free_team(root, team, NULL);
   return n;
 }
-#endif
 
 // Resets a root thread and clear its root and hot teams.
 // Returns the number of __kmp_threads entries directly and indirectly freed.
@@ -4108,8 +4086,7 @@ static int __kmp_reset_root(int gtid, kmp_root_t *root) {
   root->r.r_hot_team = NULL;
   // __kmp_free_team() does not free hot teams, so we have to clear r_hot_team
   // before call to __kmp_free_team().
-  __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
-#if KMP_NESTED_HOT_TEAMS
+  __kmp_free_team(root, root_team, NULL);
   if (__kmp_hot_teams_max_level >
       0) { // need to free nested hot teams and their threads if any
     for (i = 0; i < hot_team->t.t_nproc; ++i) {
@@ -4123,8 +4100,7 @@ static int __kmp_reset_root(int gtid, kmp_root_t *root) {
       }
     }
   }
-#endif
-  __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
+  __kmp_free_team(root, hot_team, NULL);
 
   // Before we can reap the thread, we need to make certain that all other
   // threads in the teams that had this root as ancestor have stopped trying to
@@ -4431,9 +4407,6 @@ kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
 
   KA_TRACE(20, ("__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
   KMP_DEBUG_ASSERT(root && team);
-#if !KMP_NESTED_HOT_TEAMS
-  KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
-#endif
   KMP_MB();
 
   /* first, try to get one from the thread pool unless allocating thread is
@@ -4608,8 +4581,7 @@ kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
 #if OMPT_SUPPORT
                                           ompt_data_none, // root parallel id
 #endif
-                                          proc_bind_default, &r_icvs,
-                                          0 USE_NESTED_HOT_ARG(NULL));
+                                          proc_bind_default, &r_icvs, 0, NULL);
   }
   KMP_ASSERT(serial_team);
   serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
@@ -5133,14 +5105,13 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
 
 /* allocate a new team data structure to use.  take one off of the free pool if
    available */
-kmp_team_t *
-__kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
+kmp_team_t *__kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
 #if OMPT_SUPPORT
-                    ompt_data_t ompt_parallel_data,
+                                ompt_data_t ompt_parallel_data,
 #endif
-                    kmp_proc_bind_t new_proc_bind,
-                    kmp_internal_control_t *new_icvs,
-                    int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
+                                kmp_proc_bind_t new_proc_bind,
+                                kmp_internal_control_t *new_icvs, int argc,
+                                kmp_info_t *master) {
   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
   int f;
   kmp_team_t *team;
@@ -5153,7 +5124,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
   KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
   KMP_MB();
 
-#if KMP_NESTED_HOT_TEAMS
   kmp_hot_team_ptr_t *hot_teams;
   if (master) {
     team = master->th.th_team;
@@ -5187,15 +5157,10 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
     // check we won't access uninitialized hot_teams, just in case
     KMP_DEBUG_ASSERT(new_nproc == 1);
   }
-#endif
   // Optimization to use a "hot" team
   if (use_hot_team && new_nproc > 1) {
     KMP_DEBUG_ASSERT(new_nproc <= max_nproc);
-#if KMP_NESTED_HOT_TEAMS
     team = hot_teams[level].hot_team;
-#else
-    team = root->r.r_hot_team;
-#endif
 #if KMP_DEBUG
     if (__kmp_tasking_mode != tskm_immediate_exec) {
       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
@@ -5282,20 +5247,17 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
           th->th.th_task_team = NULL;
         }
       }
-#if KMP_NESTED_HOT_TEAMS
       if (__kmp_hot_teams_mode == 0) {
         // AC: saved number of threads should correspond to team's value in this
         // mode, can be bigger in mode 1, when hot team has threads in reserve
         KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
         hot_teams[level].hot_team_nth = new_nproc;
-#endif // KMP_NESTED_HOT_TEAMS
         /* release the extra threads we don't need any more */
         for (f = new_nproc; f < team->t.t_nproc; f++) {
           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
           __kmp_free_thread(team->t.t_threads[f]);
           team->t.t_threads[f] = NULL;
         }
-#if KMP_NESTED_HOT_TEAMS
       } // (__kmp_hot_teams_mode == 0)
       else {
         // When keeping extra threads in team, switch threads to wait on own
@@ -5311,7 +5273,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
           }
         }
       }
-#endif // KMP_NESTED_HOT_TEAMS
       team->t.t_nproc = new_nproc;
       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
@@ -5352,7 +5313,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
       int old_nproc = team->t.t_nproc; // save old value and use to update only
       team->t.t_size_changed = 1;
 
-#if KMP_NESTED_HOT_TEAMS
       int avail_threads = hot_teams[level].hot_team_nth;
       if (new_nproc < avail_threads)
         avail_threads = new_nproc;
@@ -5380,7 +5340,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
         // get reserved threads involved if any.
         team->t.t_nproc = hot_teams[level].hot_team_nth;
         hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
-#endif // KMP_NESTED_HOT_TEAMS
         if (team->t.t_max_nproc < new_nproc) {
           /* reallocate larger arrays */
           __kmp_reallocate_team_arrays(team, new_nproc);
@@ -5429,9 +5388,7 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
         /* Restore initial primary thread's affinity mask */
         new_temp_affinity.restore();
 #endif
-#if KMP_NESTED_HOT_TEAMS
       } // end of check of t_nproc vs. new_nproc vs. hot_team_nth
-#endif // KMP_NESTED_HOT_TEAMS
       if (__kmp_barrier_release_pattern[bs_forkjoin_barrier] == bp_dist_bar) {
         // Barrier size already increased earlier in this function
         // Activate team threads via th_used_in_team
@@ -5478,7 +5435,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
         thr->th.th_teams_size = master->th.th_teams_size;
       }
     }
-#if KMP_NESTED_HOT_TEAMS
     if (level) {
       // Sync barrier state for nested hot teams, not needed for outermost hot
       // team.
@@ -5495,7 +5451,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
         }
       }
     }
-#endif // KMP_NESTED_HOT_TEAMS
 
     /* reallocate space for arguments if necessary */
     __kmp_alloc_argv_entries(argc, team, TRUE);
@@ -5660,8 +5615,7 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
 
 /* free the team.  return it to the team pool.  release all the threads
  * associated with it */
-void __kmp_free_team(kmp_root_t *root,
-                     kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
+void __kmp_free_team(kmp_root_t *root, kmp_team_t *team, kmp_info_t *master) {
   int f;
   KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
                 team->t.t_id));
@@ -5673,7 +5627,6 @@ void __kmp_free_team(kmp_root_t *root,
   KMP_DEBUG_ASSERT(team->t.t_threads);
 
   int use_hot_team = team == root->r.r_hot_team;
-#if KMP_NESTED_HOT_TEAMS
   int level;
   if (master) {
     level = team->t.t_active_level - 1;
@@ -5696,7 +5649,6 @@ void __kmp_free_team(kmp_root_t *root,
       use_hot_team = 1;
     }
   }
-#endif // KMP_NESTED_HOT_TEAMS
 
   /* team is done working */
   TCW_SYNC_PTR(team->t.t_pkfn,
@@ -5743,9 +5695,7 @@ void __kmp_free_team(kmp_root_t *root,
               20,
               ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
                __kmp_get_gtid(), task_team, team->t.t_id));
-#if KMP_NESTED_HOT_TEAMS
           __kmp_free_task_team(master, task_team);
-#endif
           team->t.t_task_team[tt_idx] = NULL;
         }
       }
diff --git a/openmp/runtime/src/kmp_settings.cpp b/openmp/runtime/src/kmp_settings.cpp
index 392a02ebbd9aa..ec617ff870c47 100644
--- a/openmp/runtime/src/kmp_settings.cpp
+++ b/openmp/runtime/src/kmp_settings.cpp
@@ -1501,7 +1501,6 @@ static void __kmp_stg_print_disp_buffers(kmp_str_buf_t *buffer,
   __kmp_stg_print_int(buffer, name, __kmp_dispatch_num_buffers);
 } // __kmp_stg_print_disp_buffers
 
-#if KMP_NESTED_HOT_TEAMS
 // -----------------------------------------------------------------------------
 // KMP_HOT_TEAMS_MAX_LEVEL, KMP_HOT_TEAMS_MODE
 
@@ -1535,8 +1534,6 @@ static void __kmp_stg_print_hot_teams_mode(kmp_str_buf_t *buffer,
   __kmp_stg_print_int(buffer, name, __kmp_hot_teams_mode);
 } // __kmp_stg_print_hot_teams_mode
 
-#endif // KMP_NESTED_HOT_TEAMS
-
 // -----------------------------------------------------------------------------
 // KMP_HANDLE_SIGNALS
 
@@ -5569,12 +5566,10 @@ static kmp_setting_t __kmp_stg_table[] = {
      __kmp_stg_print_wait_policy, NULL, 0, 0},
     {"KMP_DISP_NUM_BUFFERS", __kmp_stg_parse_disp_buffers,
      __kmp_stg_print_disp_buffers, NULL, 0, 0},
-#if KMP_NESTED_HOT_TEAMS
     {"KMP_HOT_TEAMS_MAX_LEVEL", __kmp_stg_parse_hot_teams_level,
      __kmp_stg_print_hot_teams_level, NULL, 0, 0},
     {"KMP_HOT_TEAMS_MODE", __kmp_stg_parse_hot_teams_mode,
      __kmp_stg_print_hot_teams_mode, NULL, 0, 0},
-#endif // KMP_NESTED_HOT_TEAMS
 
 #if KMP_HANDLE_SIGNALS
     {"KMP_HANDLE_SIGNALS", __kmp_stg_parse_handle_signals,
@@ -5758,7 +5753,8 @@ static kmp_setting_t __kmp_stg_table[] = {
 #if OMPX_TASKGRAPH
     {"KMP_MAX_TDGS", __kmp_stg_parse_max_tdgs, __kmp_std_print_max_tdgs, NULL,
      0, 0},
-    {"KMP_TDG_DOT", __kmp_stg_parse_tdg_dot, __kmp_stg_print_tdg_dot, NULL, 0, 0},
+    {"KMP_TDG_DOT", __kmp_stg_parse_tdg_dot, __kmp_stg_print_tdg_dot, NULL, 0,
+     0},
 #endif
 
 #if OMPT_SUPPORT



More information about the Openmp-commits mailing list