summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_runtime.cpp
diff options
context:
space:
mode:
authorJonathan Peyton <jonathan.l.peyton@intel.com>2019-07-12 21:45:36 +0000
committerJonathan Peyton <jonathan.l.peyton@intel.com>2019-07-12 21:45:36 +0000
commite4b4f994d2f6a090694276b40d433dc1a58beb24 (patch)
tree9b652f3db2787e6c8f18aa74d87eedd890ed0a64 /openmp/runtime/src/kmp_runtime.cpp
parent603580216f0aa387b0d4c01b8d2f05873e6a98e2 (diff)
downloadbcm5719-llvm-e4b4f994d2f6a090694276b40d433dc1a58beb24.tar.gz
bcm5719-llvm-e4b4f994d2f6a090694276b40d433dc1a58beb24.zip
[OpenMP] Remove OMP spec versioning
Remove all older OMP spec versioning from the runtime and build system. Patch by Terry Wilmarth Differential Revision: https://reviews.llvm.org/D64534 llvm-svn: 365963
Diffstat (limited to 'openmp/runtime/src/kmp_runtime.cpp')
-rw-r--r--openmp/runtime/src/kmp_runtime.cpp333
1 files changed, 55 insertions, 278 deletions
diff --git a/openmp/runtime/src/kmp_runtime.cpp b/openmp/runtime/src/kmp_runtime.cpp
index 4167d199d9a..7f6c149c792 100644
--- a/openmp/runtime/src/kmp_runtime.cpp
+++ b/openmp/runtime/src/kmp_runtime.cpp
@@ -46,16 +46,8 @@ char const __kmp_version_alt_comp[] =
KMP_VERSION_PREFIX "alternative compiler support: yes";
#endif /* defined(KMP_GOMP_COMPAT) */
-char const __kmp_version_omp_api[] = KMP_VERSION_PREFIX "API version: "
-#if OMP_50_ENABLED
- "5.0 (201611)";
-#elif OMP_45_ENABLED
- "4.5 (201511)";
-#elif OMP_40_ENABLED
- "4.0 (201307)";
-#else
- "3.1 (201107)";
-#endif
+char const __kmp_version_omp_api[] =
+ KMP_VERSION_PREFIX "API version: 5.0 (201611)";
#ifdef KMP_DEBUG
char const __kmp_version_lock[] =
@@ -79,7 +71,7 @@ static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *, int tid,
static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
kmp_internal_control_t *new_icvs,
ident_t *loc);
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
+#if KMP_AFFINITY_SUPPORTED
static void __kmp_partition_places(kmp_team_t *team,
int update_master_only = 0);
#endif
@@ -534,16 +526,8 @@ static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
"%s_%d.t_disp_buffer", header, team_id);
}
-static void __kmp_init_allocator() {
-#if OMP_50_ENABLED
- __kmp_init_memkind();
-#endif
-}
-static void __kmp_fini_allocator() {
-#if OMP_50_ENABLED
- __kmp_fini_memkind();
-#endif
-}
+static void __kmp_init_allocator() { __kmp_init_memkind(); }
+static void __kmp_fini_allocator() { __kmp_fini_memkind(); }
/* ------------------------------------------------------------------------ */
@@ -728,10 +712,7 @@ int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) {
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
-
-#if OMP_50_ENABLED
__kmp_resume_if_soft_paused();
-#endif
th = __kmp_threads[gtid];
team = th->th.th_team;
@@ -754,10 +735,7 @@ int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) {
}
#if USE_ITT_BUILD
if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
- KMP_MASTER_GTID(gtid) &&
-#if OMP_40_ENABLED
- th->th.th_teams_microtask == NULL &&
-#endif
+ KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL &&
team->t.t_active_level ==
1) { // Only report metadata by master of active team at level 1
__kmp_itt_metadata_single(id_ref);
@@ -795,12 +773,8 @@ void __kmp_exit_single(int gtid) {
* otherwise the number of threads to use
* The forkjoin lock is held by the caller. */
static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
- int master_tid, int set_nthreads
-#if OMP_40_ENABLED
- ,
- int enter_teams
-#endif /* OMP_40_ENABLED */
- ) {
+ int master_tid, int set_nthreads,
+ int enter_teams) {
int capacity;
int new_nthreads;
KMP_DEBUG_ASSERT(__kmp_init_serial);
@@ -1053,11 +1027,9 @@ static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
__kmp_gtid_from_tid(i, team), team->t.t_id, i,
team->t.t_bar[bs_forkjoin_barrier].b_arrived,
team->t.t_bar[bs_plain_barrier].b_arrived));
-#if OMP_40_ENABLED
thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
thr->th.th_teams_level = master_th->th.th_teams_level;
thr->th.th_teams_size = master_th->th.th_teams_size;
-#endif
{ // Initialize threads' barrier data.
int b;
kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
@@ -1071,12 +1043,11 @@ static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
}
}
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
+#if KMP_AFFINITY_SUPPORTED
__kmp_partition_places(team);
#endif
}
-#if OMP_50_ENABLED
if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
for (i = 0; i < team->t.t_nproc; i++) {
kmp_info_t *thr = team->t.t_threads[i];
@@ -1087,7 +1058,6 @@ static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
}
}
}
-#endif
KMP_MB();
}
@@ -1172,10 +1142,7 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
-
-#if OMP_50_ENABLED
__kmp_resume_if_soft_paused();
-#endif
this_thr = __kmp_threads[global_tid];
serial_team = this_thr->th.th_serial_team;
@@ -1196,7 +1163,6 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
this_thr->th.th_task_team = NULL;
}
-#if OMP_40_ENABLED
kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
proc_bind = proc_bind_false;
@@ -1207,7 +1173,6 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
}
// Reset for next parallel region
this_thr->th.th_set_proc_bind = proc_bind_default;
-#endif /* OMP_40_ENABLED */
#if OMPT_SUPPORT
ompt_data_t ompt_parallel_data = ompt_data_none;
@@ -1242,15 +1207,13 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
__kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
- new_team = __kmp_allocate_team(this_thr->th.th_root, 1, 1,
+ new_team =
+ __kmp_allocate_team(this_thr->th.th_root, 1, 1,
#if OMPT_SUPPORT
- ompt_parallel_data,
-#endif
-#if OMP_40_ENABLED
- proc_bind,
+ ompt_parallel_data,
#endif
- &this_thr->th.th_current_task->td_icvs,
- 0 USE_NESTED_HOT_ARG(NULL));
+ proc_bind, &this_thr->th.th_current_task->td_icvs,
+ 0 USE_NESTED_HOT_ARG(NULL));
__kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
KMP_ASSERT(new_team);
@@ -1307,13 +1270,11 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
__kmp_nested_nth.nth[level + 1];
}
-#if OMP_40_ENABLED
if (__kmp_nested_proc_bind.used &&
(level + 1 < __kmp_nested_proc_bind.used)) {
this_thr->th.th_current_task->td_icvs.proc_bind =
__kmp_nested_proc_bind.bind_types[level + 1];
}
-#endif /* OMP_40_ENABLED */
#if USE_DEBUGGER
serial_team->t.t_pkfn = (microtask_t)(~0); // For the debugger.
@@ -1327,9 +1288,7 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
-#if OMP_50_ENABLED
serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save
-#endif
propagateFPControl(serial_team);
@@ -1379,11 +1338,8 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
KMP_MB();
}
-#if OMP_40_ENABLED
KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
-#endif
-#if OMP_50_ENABLED
// Perform the display affinity functionality for
// serialized parallel regions
if (__kmp_display_affinity) {
@@ -1395,7 +1351,6 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
this_thr->th.th_prev_num_threads = 1;
}
}
-#endif
if (__kmp_env_consistency_check)
__kmp_push_parallel(global_tid, NULL);
@@ -1453,10 +1408,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
int master_active;
int master_set_numthreads;
int level;
-#if OMP_40_ENABLED
int active_level;
int teams_level;
-#endif
#if KMP_NESTED_HOT_TEAMS
kmp_hot_team_ptr_t **p_hot_teams;
#endif
@@ -1479,10 +1432,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
__kmp_init_serial); // AC: potentially unsafe, not in sync with shutdown
if (!TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
-
-#if OMP_50_ENABLED
__kmp_resume_if_soft_paused();
-#endif
/* setup current data */
master_th = __kmp_threads[gtid]; // AC: potentially unsafe, not in sync with
@@ -1512,10 +1462,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
level = parent_team->t.t_level;
// used to launch non-serial teams even if nested is not allowed
active_level = parent_team->t.t_active_level;
-#if OMP_40_ENABLED
// needed to check nesting inside the teams
teams_level = master_th->th.th_teams_level;
-#endif
#if KMP_NESTED_HOT_TEAMS
p_hot_teams = &master_th->th.th_hot_teams;
if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
@@ -1543,7 +1491,6 @@ int __kmp_fork_call(ident_t *loc, int gtid,
master_th->th.th_ident = loc;
-#if OMP_40_ENABLED
if (master_th->th.th_teams_microtask && ap &&
microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
// AC: This is start of parallel that is nested inside teams construct.
@@ -1637,9 +1584,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
KMP_ATOMIC_INC(&root->r.r_in_parallel);
parent_team->t.t_active_level++;
parent_team->t.t_level++;
-#if OMP_50_ENABLED
parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save
-#endif
/* Change number of threads in the team if requested */
if (master_set_numthreads) { // The parallel has num_threads clause
@@ -1687,7 +1632,6 @@ int __kmp_fork_call(ident_t *loc, int gtid,
return TRUE;
} // Parallel closely nested in teams construct
-#endif /* OMP_40_ENABLED */
#if KMP_DEBUG
if (__kmp_tasking_mode != tskm_immediate_exec) {
@@ -1700,10 +1644,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
master_th->th.th_current_task->td_icvs.max_active_levels) {
nthreads = 1;
} else {
-#if OMP_40_ENABLED
int enter_teams = ((ap == NULL && active_level == 0) ||
(ap && teams_level > 0 && teams_level == level));
-#endif
nthreads =
master_set_numthreads
? master_set_numthreads
@@ -1715,11 +1657,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
// parallel out of teams construct). This code moved here from
// __kmp_reserve_threads() to speedup nested serialized parallels.
if (nthreads > 1) {
- if ((get__max_active_levels(master_th) == 1 && (root->r.r_in_parallel
-#if OMP_40_ENABLED
- && !enter_teams
-#endif /* OMP_40_ENABLED */
- )) ||
+ if ((get__max_active_levels(master_th) == 1 &&
+ (root->r.r_in_parallel && !enter_teams)) ||
(__kmp_library == library_serial)) {
KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
" threads\n",
@@ -1730,18 +1669,12 @@ int __kmp_fork_call(ident_t *loc, int gtid,
if (nthreads > 1) {
/* determine how many new threads we can use */
__kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
- nthreads = __kmp_reserve_threads(
- root, parent_team, master_tid, nthreads
-#if OMP_40_ENABLED
- /* AC: If we execute teams from parallel region (on host), then
- teams should be created but each can only have 1 thread if
- nesting is disabled. If teams called from serial region, then
- teams and their threads should be created regardless of the
- nesting setting. */
- ,
- enter_teams
-#endif /* OMP_40_ENABLED */
- );
+ /* AC: If we execute teams from parallel region (on host), then teams
+ should be created but each can only have 1 thread if nesting is
+ disabled. If teams called from serial region, then teams and their
+ threads should be created regardless of the nesting setting. */
+ nthreads = __kmp_reserve_threads(root, parent_team, master_tid,
+ nthreads, enter_teams);
if (nthreads == 1) {
// Free lock for single thread execution here; for multi-thread
// execution it will be freed later after team of threads created
@@ -1774,7 +1707,6 @@ int __kmp_fork_call(ident_t *loc, int gtid,
if (call_context == fork_context_intel) {
/* TODO this sucks, use the compiler itself to pass args! :) */
master_th->th.th_serial_team->t.t_ident = loc;
-#if OMP_40_ENABLED
if (!ap) {
// revert change made in __kmpc_serialized_parallel()
master_th->th.th_serial_team->t.t_level--;
@@ -1869,14 +1801,13 @@ int __kmp_fork_call(ident_t *loc, int gtid,
// AC: call special invoker for outer "parallel" of teams construct
invoker(gtid);
} else {
-#endif /* OMP_40_ENABLED */
argv = args;
for (i = argc - 1; i >= 0; --i)
// TODO: revert workaround for Intel(R) 64 tracker #96
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
*argv++ = va_arg(*ap, void *);
#else
- *argv++ = va_arg(ap, void *);
+ *argv++ = va_arg(ap, void *);
#endif
KMP_MB();
@@ -1942,9 +1873,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
master_th->th.ompt_thread_info.state = ompt_state_overhead;
}
#endif
-#if OMP_40_ENABLED
}
-#endif /* OMP_40_ENABLED */
} else if (call_context == fork_context_gnu) {
#if OMPT_SUPPORT
ompt_lw_taskteam_t lwt;
@@ -1981,10 +1910,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
// KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 1 );
master_th->th.th_current_task->td_flags.executing = 0;
-#if OMP_40_ENABLED
- if (!master_th->th.th_teams_microtask || level > teams_level)
-#endif /* OMP_40_ENABLED */
- {
+ if (!master_th->th.th_teams_microtask || level > teams_level) {
/* Increment our nested depth level */
KMP_ATOMIC_INC(&root->r.r_in_parallel);
}
@@ -1998,7 +1924,6 @@ int __kmp_fork_call(ident_t *loc, int gtid,
nthreads_icv = 0; // don't update
}
-#if OMP_40_ENABLED
// Figure out the proc_bind_policy for the new team.
kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
kmp_proc_bind_t proc_bind_icv =
@@ -2024,25 +1949,17 @@ int __kmp_fork_call(ident_t *loc, int gtid,
// Reset for next parallel region
master_th->th.th_set_proc_bind = proc_bind_default;
-#endif /* OMP_40_ENABLED */
- if ((nthreads_icv > 0)
-#if OMP_40_ENABLED
- || (proc_bind_icv != proc_bind_default)
-#endif /* OMP_40_ENABLED */
- ) {
+ if ((nthreads_icv > 0) || (proc_bind_icv != proc_bind_default)) {
kmp_internal_control_t new_icvs;
copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
new_icvs.next = NULL;
if (nthreads_icv > 0) {
new_icvs.nproc = nthreads_icv;
}
-
-#if OMP_40_ENABLED
if (proc_bind_icv != proc_bind_default) {
new_icvs.proc_bind = proc_bind_icv;
}
-#endif /* OMP_40_ENABLED */
/* allocate a new parallel team */
KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
@@ -2050,10 +1967,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
#if OMPT_SUPPORT
ompt_parallel_data,
#endif
-#if OMP_40_ENABLED
- proc_bind,
-#endif
- &new_icvs, argc USE_NESTED_HOT_ARG(master_th));
+ proc_bind, &new_icvs,
+ argc USE_NESTED_HOT_ARG(master_th));
} else {
/* allocate a new parallel team */
KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
@@ -2061,9 +1976,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
#if OMPT_SUPPORT
ompt_parallel_data,
#endif
-#if OMP_40_ENABLED
proc_bind,
-#endif
&master_th->th.th_current_task->td_icvs,
argc USE_NESTED_HOT_ARG(master_th));
}
@@ -2081,15 +1994,12 @@ int __kmp_fork_call(ident_t *loc, int gtid,
return_address);
#endif
KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
-// TODO: parent_team->t.t_level == INT_MAX ???
-#if OMP_40_ENABLED
+ // TODO: parent_team->t.t_level == INT_MAX ???
if (!master_th->th.th_teams_microtask || level > teams_level) {
-#endif /* OMP_40_ENABLED */
int new_level = parent_team->t.t_level + 1;
KMP_CHECK_UPDATE(team->t.t_level, new_level);
new_level = parent_team->t.t_active_level + 1;
KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
-#if OMP_40_ENABLED
} else {
// AC: Do not increase parallel level at start of the teams construct
int new_level = parent_team->t.t_level;
@@ -2097,17 +2007,12 @@ int __kmp_fork_call(ident_t *loc, int gtid,
new_level = parent_team->t.t_active_level;
KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
}
-#endif /* OMP_40_ENABLED */
kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
// set master's schedule as new run-time schedule
KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
-#if OMP_40_ENABLED
KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
-#endif
-#if OMP_50_ENABLED
KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
-#endif
// Update the floating point rounding in the team if required.
propagateFPControl(team);
@@ -2183,27 +2088,23 @@ int __kmp_fork_call(ident_t *loc, int gtid,
/* now, setup the arguments */
argv = (void **)team->t.t_argv;
-#if OMP_40_ENABLED
if (ap) {
-#endif /* OMP_40_ENABLED */
for (i = argc - 1; i >= 0; --i) {
// TODO: revert workaround for Intel(R) 64 tracker #96
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
void *new_argv = va_arg(*ap, void *);
#else
- void *new_argv = va_arg(ap, void *);
+ void *new_argv = va_arg(ap, void *);
#endif
KMP_CHECK_UPDATE(*argv, new_argv);
argv++;
}
-#if OMP_40_ENABLED
} else {
for (i = 0; i < argc; ++i) {
// Get args from parent team for teams construct
KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
}
}
-#endif /* OMP_40_ENABLED */
/* now actually fork the threads */
KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
@@ -2222,10 +2123,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
#if USE_ITT_BUILD
if (team->t.t_active_level == 1 // only report frames at level 1
-#if OMP_40_ENABLED
- && !master_th->th.th_teams_microtask // not in teams construct
-#endif /* OMP_40_ENABLED */
- ) {
+ && !master_th->th.th_teams_microtask) { // not in teams construct
#if USE_ITT_NOTIFY
if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
(__kmp_forkjoin_frames_mode == 3 ||
@@ -2263,12 +2161,9 @@ int __kmp_fork_call(ident_t *loc, int gtid,
}
#endif /* USE_ITT_BUILD */
-#if OMP_40_ENABLED
// AC: skip __kmp_internal_fork at teams construct, let only master
// threads execute
- if (ap)
-#endif /* OMP_40_ENABLED */
- {
+ if (ap) {
__kmp_internal_fork(loc, gtid, team);
KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
"master_th=%p, gtid=%d\n",
@@ -2285,7 +2180,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
team->t.t_id, team->t.t_pkfn));
} // END of timer KMP_fork_call block
-#if KMP_STATS_ENABLED && OMP_40_ENABLED
+#if KMP_STATS_ENABLED
// If beginning a teams construct, then change thread state
stats_state_e previous_state = KMP_GET_THREAD_STATE();
if (!ap) {
@@ -2297,7 +2192,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
}
-#if KMP_STATS_ENABLED && OMP_40_ENABLED
+#if KMP_STATS_ENABLED
// If was beginning of a teams construct, then reset thread state
if (!ap) {
KMP_SET_THREAD_STATE(previous_state);
@@ -2348,11 +2243,8 @@ void __kmp_join_call(ident_t *loc, int gtid
,
enum fork_context_e fork_context
#endif
-#if OMP_40_ENABLED
,
- int exit_teams
-#endif /* OMP_40_ENABLED */
- ) {
+ int exit_teams) {
KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
kmp_team_t *team;
kmp_team_t *parent_team;
@@ -2389,7 +2281,6 @@ void __kmp_join_call(ident_t *loc, int gtid
#endif
if (team->t.t_serialized) {
-#if OMP_40_ENABLED
if (master_th->th.th_teams_microtask) {
// We are in teams construct
int level = team->t.t_level;
@@ -2405,7 +2296,6 @@ void __kmp_join_call(ident_t *loc, int gtid
team->t.t_serialized++;
}
}
-#endif /* OMP_40_ENABLED */
__kmpc_end_serialized_parallel(loc, gtid);
#if OMPT_SUPPORT
@@ -2419,20 +2309,14 @@ void __kmp_join_call(ident_t *loc, int gtid
master_active = team->t.t_master_active;
-#if OMP_40_ENABLED
- if (!exit_teams)
-#endif /* OMP_40_ENABLED */
- {
+ if (!exit_teams) {
// AC: No barrier for internal teams at exit from teams construct.
// But there is barrier for external team (league).
__kmp_internal_join(loc, gtid, team);
- }
-#if OMP_40_ENABLED
- else {
+ } else {
master_th->th.th_task_state =
0; // AC: no tasking in teams (out of any parallel)
}
-#endif /* OMP_40_ENABLED */
KMP_MB();
@@ -2449,11 +2333,8 @@ void __kmp_join_call(ident_t *loc, int gtid
}
// Mark end of "parallel" region for Intel(R) VTune(TM) analyzer.
- if (team->t.t_active_level == 1
-#if OMP_40_ENABLED
- && !master_th->th.th_teams_microtask /* not in teams construct */
-#endif /* OMP_40_ENABLED */
- ) {
+ if (team->t.t_active_level == 1 &&
+ !master_th->th.th_teams_microtask) { /* not in teams construct */
master_th->th.th_ident = loc;
// only one notification scheme (either "submit" or "forking/joined", not
// both)
@@ -2468,7 +2349,6 @@ void __kmp_join_call(ident_t *loc, int gtid
} // active_level == 1
#endif /* USE_ITT_BUILD */
-#if OMP_40_ENABLED
if (master_th->th.th_teams_microtask && !exit_teams &&
team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
team->t.t_level == master_th->th.th_teams_level + 1) {
@@ -2521,7 +2401,6 @@ void __kmp_join_call(ident_t *loc, int gtid
return;
}
-#endif /* OMP_40_ENABLED */
/* do cleanup and restore the parent team */
master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
@@ -2534,11 +2413,8 @@ void __kmp_join_call(ident_t *loc, int gtid
from the serial user code called after this function returns. */
__kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
-#if OMP_40_ENABLED
if (!master_th->th.th_teams_microtask ||
- team->t.t_level > master_th->th.th_teams_level)
-#endif /* OMP_40_ENABLED */
- {
+ team->t.t_level > master_th->th.th_teams_level) {
/* Decrement our nested depth level */
KMP_ATOMIC_DEC(&root->r.r_in_parallel);
}
@@ -2563,14 +2439,12 @@ void __kmp_join_call(ident_t *loc, int gtid
master_th, team));
__kmp_pop_current_task_from_thread(master_th);
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
+#if KMP_AFFINITY_SUPPORTED
// Restore master thread's partition.
master_th->th.th_first_place = team->t.t_first_place;
master_th->th.th_last_place = team->t.t_last_place;
-#endif /* OMP_40_ENABLED */
-#if OMP_50_ENABLED
+#endif // KMP_AFFINITY_SUPPORTED
master_th->th.th_def_allocator = team->t.t_def_allocator;
-#endif
updateHWFPControl(team);
@@ -2930,7 +2804,6 @@ int __kmp_get_ancestor_thread_num(int gtid, int level) {
if (level > ii)
return -1;
-#if OMP_40_ENABLED
if (thr->th.th_teams_microtask) {
// AC: we are in teams region where multiple nested teams have same level
int tlevel = thr->th.th_teams_level; // the level of the teams construct
@@ -2946,7 +2819,6 @@ int __kmp_get_ancestor_thread_num(int gtid, int level) {
}
}
}
-#endif
if (ii == level)
return __kmp_tid_from_gtid(gtid);
@@ -2990,7 +2862,6 @@ int __kmp_get_team_size(int gtid, int level) {
if (level > ii)
return -1;
-#if OMP_40_ENABLED
if (thr->th.th_teams_microtask) {
// AC: we are in teams region where multiple nested teams have same level
int tlevel = thr->th.th_teams_level; // the level of the teams construct
@@ -3006,7 +2877,6 @@ int __kmp_get_team_size(int gtid, int level) {
}
}
}
-#endif
while (ii > level) {
for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
@@ -3036,9 +2906,7 @@ kmp_r_sched_t __kmp_get_schedule_global() {
// KMP_SCHEDULE multiple times, and thus have different run-time schedules in
// different roots (even in OMP 2.5)
enum sched_type s = SCHEDULE_WITHOUT_MODIFIERS(__kmp_sched);
-#if OMP_45_ENABLED
enum sched_type sched_modifiers = SCHEDULE_GET_MODIFIERS(__kmp_sched);
-#endif
if (s == kmp_sch_static) {
// replace STATIC with more detailed schedule (balanced or greedy)
r_sched.r_sched_type = __kmp_static;
@@ -3048,9 +2916,7 @@ kmp_r_sched_t __kmp_get_schedule_global() {
} else { // (STATIC_CHUNKED), or (DYNAMIC_CHUNKED), or other
r_sched.r_sched_type = __kmp_sched;
}
-#if OMP_45_ENABLED
SCHEDULE_SET_MODIFIERS(r_sched.r_sched_type, sched_modifiers);
-#endif
if (__kmp_chunk < KMP_DEFAULT_CHUNK) {
// __kmp_chunk may be wrong here (if it was not ever set)
@@ -3126,9 +2992,7 @@ static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
/* setup dispatch buffers */
for (i = 0; i < num_disp_buff; ++i) {
team->t.t_disp_buffer[i].buffer_index = i;
-#if OMP_45_ENABLED
team->t.t_disp_buffer[i].doacross_buf_idx = i;
-#endif
}
}
@@ -3173,9 +3037,7 @@ static kmp_internal_control_t __kmp_get_global_icvs(void) {
kmp_r_sched_t r_sched =
__kmp_get_schedule_global(); // get current state of scheduling globals
-#if OMP_40_ENABLED
KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
-#endif /* OMP_40_ENABLED */
kmp_internal_control_t g_icvs = {
0, // int serial_nesting_level; //corresponds to value of th_team_serialized
@@ -3195,11 +3057,9 @@ static kmp_internal_control_t __kmp_get_global_icvs(void) {
__kmp_dflt_max_active_levels, // int max_active_levels; //internal control
// for max_active_levels
r_sched, // kmp_r_sched_t sched; //internal control for runtime schedule
-// {sched,chunk} pair
-#if OMP_40_ENABLED
+ // {sched,chunk} pair
__kmp_nested_proc_bind.bind_types[0],
__kmp_default_device,
-#endif /* OMP_40_ENABLED */
NULL // struct kmp_internal_control *next;
};
@@ -3246,10 +3106,7 @@ static void __kmp_initialize_root(kmp_root_t *root) {
#if OMPT_SUPPORT
ompt_data_none, // root parallel id
#endif
-#if OMP_40_ENABLED
- __kmp_nested_proc_bind.bind_types[0],
-#endif
- &r_icvs,
+ __kmp_nested_proc_bind.bind_types[0], &r_icvs,
0 // argc
USE_NESTED_HOT_ARG(NULL) // master thread is unknown
);
@@ -3286,10 +3143,7 @@ static void __kmp_initialize_root(kmp_root_t *root) {
#if OMPT_SUPPORT
ompt_data_none, // root parallel id
#endif
-#if OMP_40_ENABLED
- __kmp_nested_proc_bind.bind_types[0],
-#endif
- &r_icvs,
+ __kmp_nested_proc_bind.bind_types[0], &r_icvs,
0 // argc
USE_NESTED_HOT_ARG(NULL) // master thread is unknown
);
@@ -3430,9 +3284,7 @@ void __kmp_print_structure(void) {
thread->th.th_team_master);
__kmp_printf(" Serialized?: %2d\n", thread->th.th_team_serialized);
__kmp_printf(" Set NProc: %2d\n", thread->th.th_set_nproc);
-#if OMP_40_ENABLED
__kmp_printf(" Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
-#endif
__kmp_print_structure_thread(" Next in pool: ",
thread->th.th_next_pool);
__kmp_printf("\n");
@@ -3798,15 +3650,12 @@ int __kmp_register_root(int initial_thread) {
if (!root_thread->th.th_serial_team) {
kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
KF_TRACE(10, ("__kmp_register_root: before serial_team\n"));
- root_thread->th.th_serial_team =
- __kmp_allocate_team(root, 1, 1,
+ root_thread->th.th_serial_team = __kmp_allocate_team(
+ root, 1, 1,
#if OMPT_SUPPORT
- ompt_data_none, // root parallel id
+ ompt_data_none, // root parallel id
#endif
-#if OMP_40_ENABLED
- proc_bind_default,
-#endif
- &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
+ proc_bind_default, &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
}
KMP_ASSERT(root_thread->th.th_serial_team);
KF_TRACE(10, ("__kmp_register_root: after serial_team = %p\n",
@@ -3857,21 +3706,17 @@ int __kmp_register_root(int initial_thread) {
KMP_INIT_BARRIER_STATE);
#if KMP_AFFINITY_SUPPORTED
-#if OMP_40_ENABLED
root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
-#endif
if (TCR_4(__kmp_init_middle)) {
__kmp_affinity_set_init_mask(gtid, TRUE);
}
#endif /* KMP_AFFINITY_SUPPORTED */
-#if OMP_50_ENABLED
root_thread->th.th_def_allocator = __kmp_def_allocator;
root_thread->th.th_prev_level = 0;
root_thread->th.th_prev_num_threads = 1;
-#endif
kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(sizeof(kmp_cg_root_t));
tmp->cg_root = root_thread;
@@ -4052,7 +3897,6 @@ void __kmp_unregister_root_current_thread(int gtid) {
KMP_MB();
-#if OMP_45_ENABLED
kmp_info_t *thread = __kmp_threads[gtid];
kmp_team_t *team = thread->th.th_team;
kmp_task_team_t *task_team = thread->th.th_task_team;
@@ -4065,7 +3909,6 @@ void __kmp_unregister_root_current_thread(int gtid) {
#endif
__kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
}
-#endif
__kmp_reset_root(gtid, root);
@@ -4149,12 +3992,10 @@ static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
else // no tasking --> always safe to reap
this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
-#if OMP_40_ENABLED
this_thr->th.th_set_proc_bind = proc_bind_default;
#if KMP_AFFINITY_SUPPORTED
this_thr->th.th_new_place = this_thr->th.th_current_place;
#endif
-#endif
this_thr->th.th_root = master->th.th_root;
/* setup the thread's cache of the team structure */
@@ -4233,9 +4074,7 @@ static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
dispatch->th_disp_index = 0;
-#if OMP_45_ENABLED
dispatch->th_doacross_buf_idx = 0;
-#endif
if (!dispatch->th_disp_buffer) {
dispatch->th_disp_buffer =
(dispatch_private_info_t *)__kmp_allocate(disp_size);
@@ -4414,10 +4253,8 @@ kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
#if OMPT_SUPPORT
ompt_data_none, // root parallel id
#endif
-#if OMP_40_ENABLED
- proc_bind_default,
-#endif
- &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
+ proc_bind_default, &r_icvs,
+ 0 USE_NESTED_HOT_ARG(NULL));
}
KMP_ASSERT(serial_team);
serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
@@ -4461,17 +4298,15 @@ kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
new_thr->th.th_blocking = false;
#endif
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
+#if KMP_AFFINITY_SUPPORTED
new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
#endif
-#if OMP_50_ENABLED
new_thr->th.th_def_allocator = __kmp_def_allocator;
new_thr->th.th_prev_level = 0;
new_thr->th.th_prev_num_threads = 1;
-#endif
TCW_4(new_thr->th.th_in_pool, FALSE);
new_thr->th.th_active_in_pool = FALSE;
@@ -4616,7 +4451,7 @@ __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
}
#endif
-#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
+#if KMP_AFFINITY_SUPPORTED
// __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism.
// It calculats the worker + master thread's partition based upon the parent
@@ -4655,12 +4490,10 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
th->th.th_first_place = first_place;
th->th.th_last_place = last_place;
th->th.th_new_place = masters_place;
-#if OMP_50_ENABLED
if (__kmp_display_affinity && masters_place != th->th.th_current_place &&
team->t.t_display_affinity != 1) {
team->t.t_display_affinity = 1;
}
-#endif
KA_TRACE(100, ("__kmp_partition_places: master: T#%d(%d:%d) place %d "
"partition = [%d,%d]\n",
@@ -4694,12 +4527,10 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
th->th.th_first_place = first_place;
th->th.th_last_place = last_place;
th->th.th_new_place = place;
-#if OMP_50_ENABLED
if (__kmp_display_affinity && place != th->th.th_current_place &&
team->t.t_display_affinity != 1) {
team->t.t_display_affinity = 1;
}
-#endif
KA_TRACE(100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
"partition = [%d,%d]\n",
@@ -4721,12 +4552,10 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
th->th.th_first_place = first_place;
th->th.th_last_place = last_place;
th->th.th_new_place = place;
-#if OMP_50_ENABLED
if (__kmp_display_affinity && place != th->th.th_current_place &&
team->t.t_display_affinity != 1) {
team->t.t_display_affinity = 1;
}
-#endif
s_count++;
if ((s_count == S) && rem && (gap_ct == gap)) {
@@ -4795,12 +4624,10 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
th->th.th_first_place = place;
th->th.th_new_place = place;
-#if OMP_50_ENABLED
if (__kmp_display_affinity && place != th->th.th_current_place &&
team->t.t_display_affinity != 1) {
team->t.t_display_affinity = 1;
}
-#endif
s_count = 1;
while (s_count < S) {
if (place == last_place) {
@@ -4892,12 +4719,10 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
th->th.th_first_place = first;
th->th.th_new_place = place;
th->th.th_last_place = last;
-#if OMP_50_ENABLED
if (__kmp_display_affinity && place != th->th.th_current_place &&
team->t.t_display_affinity != 1) {
team->t.t_display_affinity = 1;
}
-#endif
KA_TRACE(100,
("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
"partition = [%d,%d], spacing = %.4f\n",
@@ -4926,12 +4751,10 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
th->th.th_first_place = place;
th->th.th_last_place = place;
th->th.th_new_place = place;
-#if OMP_50_ENABLED
if (__kmp_display_affinity && place != th->th.th_current_place &&
team->t.t_display_affinity != 1) {
team->t.t_display_affinity = 1;
}
-#endif
s_count++;
if ((s_count == S) && rem && (gap_ct == gap)) {
@@ -4977,7 +4800,7 @@ static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
}
-#endif /* OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED */
+#endif // KMP_AFFINITY_SUPPORTED
/* allocate a new team data structure to use. take one off of the free pool if
available */
@@ -4986,9 +4809,7 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
#if OMPT_SUPPORT
ompt_data_t ompt_parallel_data,
#endif
-#if OMP_40_ENABLED
kmp_proc_bind_t new_proc_bind,
-#endif
kmp_internal_control_t *new_icvs,
int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
@@ -5069,7 +4890,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
team->t.t_threads[0], team));
__kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
-#if OMP_40_ENABLED
#if KMP_AFFINITY_SUPPORTED
if ((team->t.t_size_changed == 0) &&
(team->t.t_proc_bind == new_proc_bind)) {
@@ -5088,7 +4908,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
#else
KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
#endif /* KMP_AFFINITY_SUPPORTED */
-#endif /* OMP_40_ENABLED */
} else if (team->t.t_nproc > new_nproc) {
KA_TRACE(20,
("__kmp_allocate_team: decreasing hot team thread count to %d\n",
@@ -5156,12 +4975,10 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
}
#endif
-#if OMP_40_ENABLED
KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
#if KMP_AFFINITY_SUPPORTED
__kmp_partition_places(team);
#endif
-#endif
} else { // team->t.t_nproc < new_nproc
#if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
kmp_affin_mask_t *old_mask;
@@ -5293,15 +5110,12 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
}
#endif
-#if OMP_40_ENABLED
KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
#if KMP_AFFINITY_SUPPORTED
__kmp_partition_places(team);
#endif
-#endif
} // Check changes in number of threads
-#if OMP_40_ENABLED
kmp_info_t *master = team->t.t_threads[0];
if (master->th.th_teams_microtask) {
for (f = 1; f < new_nproc; ++f) {
@@ -5312,7 +5126,6 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
thr->th.th_teams_size = master->th.th_teams_size;
}
}
-#endif /* OMP_40_ENABLED */
#if KMP_NESTED_HOT_TEAMS
if (level) {
// Sync barrier state for nested hot teams, not needed for outermost hot
@@ -5393,9 +5206,7 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
}
}
-#if OMP_40_ENABLED
team->t.t_proc_bind = new_proc_bind;
-#endif
KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
team->t.t_id));
@@ -5460,9 +5271,7 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
}
}
-#if OMP_40_ENABLED
team->t.t_proc_bind = new_proc_bind;
-#endif
#if OMPT_SUPPORT
__ompt_team_assign_id(team, ompt_parallel_data);
@@ -6395,9 +6204,7 @@ void __kmp_internal_end_thread(int gtid_req) {
}
}
#if KMP_DYNAMIC_LIB
-#if OMP_50_ENABLED
if (__kmp_pause_status != kmp_hard_paused)
-#endif
// AC: lets not shutdown the dynamic library at the exit of uber thread,
// because we will better shutdown later in the library destructor.
{
@@ -6878,11 +6685,9 @@ static void __kmp_do_serial_initialize(void) {
__kmp_env_print();
}
-#if OMP_40_ENABLED
if (__kmp_display_env || __kmp_display_env_verbose) {
__kmp_env_print_2();
}
-#endif // OMP_40_ENABLED
#if OMPT_SUPPORT
ompt_post_init();
@@ -7050,10 +6855,7 @@ void __kmp_parallel_initialize(void) {
if (!__kmp_init_middle) {
__kmp_do_middle_initialize();
}
-
-#if OMP_50_ENABLED
__kmp_resume_if_hard_paused();
-#endif
/* begin initialization */
KA_TRACE(10, ("__kmp_parallel_initialize: enter\n"));
@@ -7119,10 +6921,7 @@ void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
// this_thr->th.th_info.ds.ds_tid ] );
dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */
-#if OMP_45_ENABLED
- dispatch->th_doacross_buf_idx =
- 0; /* reset the doacross dispatch buffer counter */
-#endif
+ dispatch->th_doacross_buf_idx = 0; // reset doacross dispatch buffer counter
if (__kmp_env_consistency_check)
__kmp_push_parallel(gtid, team->t.t_ident);
@@ -7221,7 +7020,6 @@ int __kmp_invoke_task_func(int gtid) {
return rc;
}
-#if OMP_40_ENABLED
void __kmp_teams_master(int gtid) {
// This routine is called by all master threads in teams construct
kmp_info_t *thr = __kmp_threads[gtid];
@@ -7283,7 +7081,6 @@ int __kmp_invoke_teams_master(int gtid) {
__kmp_run_after_invoked_task(gtid, 0, this_thr, team);
return 1;
}
-#endif /* OMP_40_ENABLED */
/* this sets the requested number of threads for the next parallel region
encountered by this team. since this should be enclosed in the forkjoin
@@ -7297,8 +7094,6 @@ void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) {
thr->th.th_set_nproc = num_threads;
}
-#if OMP_40_ENABLED
-
/* this sets the requested number of teams for the teams region and/or
the number of threads for the next parallel region encountered */
void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams,
@@ -7356,8 +7151,6 @@ void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) {
thr->th.th_set_proc_bind = proc_bind;
}
-#endif /* OMP_40_ENABLED */
-
/* Launch the worker threads into the microtask. */
void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
@@ -7382,15 +7175,11 @@ void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
int i;
for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
team->t.t_disp_buffer[i].buffer_index = i;
-#if OMP_45_ENABLED
team->t.t_disp_buffer[i].doacross_buf_idx = i;
-#endif
}
} else {
team->t.t_disp_buffer[0].buffer_index = 0;
-#if OMP_45_ENABLED
team->t.t_disp_buffer[0].doacross_buf_idx = 0;
-#endif
}
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -7644,12 +7433,10 @@ void __kmp_cleanup(void) {
__kmp_nested_proc_bind.bind_types = NULL;
__kmp_nested_proc_bind.size = 0;
__kmp_nested_proc_bind.used = 0;
-#if OMP_50_ENABLED
if (__kmp_affinity_format) {
KMP_INTERNAL_FREE(__kmp_affinity_format);
__kmp_affinity_format = NULL;
}
-#endif
__kmp_i18n_catclose();
@@ -7865,7 +7652,6 @@ int __kmp_aux_get_num_teams() {
/* ------------------------------------------------------------------------ */
-#if OMP_50_ENABLED
/*
* Affinity Format Parser
*
@@ -8138,7 +7924,6 @@ void __kmp_aux_display_affinity(int gtid, const char *format) {
__kmp_fprintf(kmp_out, "%s" KMP_END_OF_LINE, buf.str);
__kmp_str_buf_free(&buf);
}
-#endif // OMP_50_ENABLED
/* ------------------------------------------------------------------------ */
@@ -8192,11 +7977,7 @@ void __kmp_aux_set_defaults(char const *str, int len) {
}
__kmp_env_initialize(str);
- if (__kmp_settings
-#if OMP_40_ENABLED
- || __kmp_display_env || __kmp_display_env_verbose
-#endif // OMP_40_ENABLED
- ) {
+ if (__kmp_settings || __kmp_display_env || __kmp_display_env_verbose) {
__kmp_env_print();
}
} // __kmp_aux_set_defaults
@@ -8360,8 +8141,6 @@ kmp_int32 __kmp_get_reduce_method(void) {
return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
}
-#if OMP_50_ENABLED
-
// Soft pause sets up threads to ignore blocktime and just go to sleep.
// Spin-wait code checks __kmp_pause_status and reacts accordingly.
void __kmp_soft_pause() { __kmp_pause_status = kmp_soft_paused; }
@@ -8436,5 +8215,3 @@ int __kmp_pause_resource(kmp_pause_status_t level) {
return 1;
}
}
-
-#endif // OMP_50_ENABLED
OpenPOWER on IntegriCloud