summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src
diff options
context:
space:
mode:
authorJonathan Peyton <jonathan.l.peyton@intel.com>2015-09-21 18:11:22 +0000
committerJonathan Peyton <jonathan.l.peyton@intel.com>2015-09-21 18:11:22 +0000
commitb68a85d1f68203ce2d51c83bc43657a1cd21491a (patch)
tree7eac31f92d23430f516ec9a28c060997a13adc6b /openmp/runtime/src
parentbe58ff82325e8892dc8199f2ca0acba887507895 (diff)
downloadbcm5719-llvm-b68a85d1f68203ce2d51c83bc43657a1cd21491a.tar.gz
bcm5719-llvm-b68a85d1f68203ce2d51c83bc43657a1cd21491a.zip
[OMPT] Simplify control variable logic for OMPT
Prior to this change, OMPT had a status flag ompt_status, which could take several values. This was due to an earlier OMPT design that had several levels of enablement (ready, disabled, tracking state, tracking callbacks). The current OMPT design has OMPT support either on or off. This revision replaces ompt_status with a boolean flag ompt_enabled, which simplifies the runtime logic for OMPT. Patch by John Mellor-Crummey Differential Revision: http://reviews.llvm.org/D12999 llvm-svn: 248189
Diffstat (limited to 'openmp/runtime/src')
-rw-r--r--openmp/runtime/src/kmp_atomic.h6
-rw-r--r--openmp/runtime/src/kmp_barrier.cpp40
-rw-r--r--openmp/runtime/src/kmp_csupport.c36
-rw-r--r--openmp/runtime/src/kmp_dispatch.cpp4
-rw-r--r--openmp/runtime/src/kmp_gsupport.c52
-rw-r--r--openmp/runtime/src/kmp_lock.cpp6
-rw-r--r--openmp/runtime/src/kmp_runtime.c81
-rw-r--r--openmp/runtime/src/kmp_sched.cpp8
-rw-r--r--openmp/runtime/src/kmp_tasking.c14
-rw-r--r--openmp/runtime/src/kmp_wait_release.h4
-rw-r--r--openmp/runtime/src/ompt-general.c15
-rw-r--r--openmp/runtime/src/ompt-internal.h13
12 files changed, 122 insertions, 157 deletions
diff --git a/openmp/runtime/src/kmp_atomic.h b/openmp/runtime/src/kmp_atomic.h
index 419ad08670e..33feae2189f 100644
--- a/openmp/runtime/src/kmp_atomic.h
+++ b/openmp/runtime/src/kmp_atomic.h
@@ -371,7 +371,7 @@ static inline void
__kmp_acquire_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
{
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_wait_atomic)) {
ompt_callbacks.ompt_callback(ompt_event_wait_atomic)(
(ompt_wait_id_t) lck);
@@ -381,7 +381,7 @@ __kmp_acquire_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
__kmp_acquire_queuing_lock( lck, gtid );
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_acquired_atomic)) {
ompt_callbacks.ompt_callback(ompt_event_acquired_atomic)(
(ompt_wait_id_t) lck);
@@ -400,7 +400,7 @@ __kmp_release_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
{
__kmp_release_queuing_lock( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_atomic)) {
ompt_callbacks.ompt_callback(ompt_event_release_atomic)(
(ompt_wait_id_t) lck);
diff --git a/openmp/runtime/src/kmp_barrier.cpp b/openmp/runtime/src/kmp_barrier.cpp
index 505daec8795..237f2984f2c 100644
--- a/openmp/runtime/src/kmp_barrier.cpp
+++ b/openmp/runtime/src/kmp_barrier.cpp
@@ -1057,25 +1057,23 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid)));
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
#if OMPT_BLAME
- if (ompt_status == ompt_status_track_callback) {
- my_task_id = team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id;
- my_parallel_id = team->t.ompt_team_info.parallel_id;
+ my_task_id = team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id;
+ my_parallel_id = team->t.ompt_team_info.parallel_id;
#if OMPT_TRACE
- if (this_thr->th.ompt_thread_info.state == ompt_state_wait_single) {
- if (ompt_callbacks.ompt_callback(ompt_event_single_others_end)) {
- ompt_callbacks.ompt_callback(ompt_event_single_others_end)(
- my_parallel_id, my_task_id);
- }
- }
-#endif
- if (ompt_callbacks.ompt_callback(ompt_event_barrier_begin)) {
- ompt_callbacks.ompt_callback(ompt_event_barrier_begin)(
+ if (this_thr->th.ompt_thread_info.state == ompt_state_wait_single) {
+ if (ompt_callbacks.ompt_callback(ompt_event_single_others_end)) {
+ ompt_callbacks.ompt_callback(ompt_event_single_others_end)(
my_parallel_id, my_task_id);
}
- }
+ }
+#endif
+ if (ompt_callbacks.ompt_callback(ompt_event_barrier_begin)) {
+ ompt_callbacks.ompt_callback(ompt_event_barrier_begin)(
+ my_parallel_id, my_task_id);
+ }
#endif
// It is OK to report the barrier state after the barrier begin callback.
// According to the OMPT specification, a compliant implementation may
@@ -1280,10 +1278,9 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid), status));
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
#if OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_barrier_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_barrier_end)) {
ompt_callbacks.ompt_callback(ompt_event_barrier_end)(
my_parallel_id, my_task_id);
}
@@ -1385,7 +1382,7 @@ __kmp_join_barrier(int gtid)
#if OMPT_SUPPORT
#if OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_barrier_begin)) {
ompt_callbacks.ompt_callback(ompt_event_barrier_begin)(
team->t.ompt_team_info.parallel_id,
@@ -1517,14 +1514,13 @@ __kmp_join_barrier(int gtid)
KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) leaving\n", gtid, team_id, tid));
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
#if OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_barrier_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_barrier_end)) {
ompt_callbacks.ompt_callback(ompt_event_barrier_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
- }
+ }
#endif
// return to default state
diff --git a/openmp/runtime/src/kmp_csupport.c b/openmp/runtime/src/kmp_csupport.c
index 281f47924bb..b24537caa2f 100644
--- a/openmp/runtime/src/kmp_csupport.c
+++ b/openmp/runtime/src/kmp_csupport.c
@@ -304,7 +304,7 @@ __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
int tid = __kmp_tid_from_gtid( gtid );
kmp_info_t *master_th = __kmp_threads[ gtid ];
kmp_team_t *parent_team = master_th->th.th_team;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
@@ -339,7 +339,7 @@ __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
va_end( ap );
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = 0;
}
@@ -398,7 +398,7 @@ __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
#if OMPT_SUPPORT
kmp_team_t *parent_team = this_thr->th.th_team;
int tid = __kmp_tid_from_gtid( gtid );
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
@@ -432,7 +432,7 @@ __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = NULL;
}
@@ -747,7 +747,7 @@ __kmpc_master(ident_t *loc, kmp_int32 global_tid)
#if OMPT_SUPPORT && OMPT_TRACE
if (status) {
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_begin)) {
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
@@ -796,7 +796,7 @@ __kmpc_end_master(ident_t *loc, kmp_int32 global_tid)
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_end)) {
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_master_end)(
@@ -841,14 +841,13 @@ __kmpc_ordered( ident_t * loc, kmp_int32 gtid )
th = __kmp_threads[ gtid ];
#if OMPT_SUPPORT && OMPT_TRACE
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.wait_id = (uint64_t) loc;
th->th.ompt_thread_info.state = ompt_state_wait_ordered;
/* OMPT event callback */
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_wait_ordered)(
th->th.ompt_thread_info.wait_id);
}
@@ -861,14 +860,13 @@ __kmpc_ordered( ident_t * loc, kmp_int32 gtid )
__kmp_parallel_deo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_TRACE
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.state = ompt_state_work_parallel;
th->th.ompt_thread_info.wait_id = 0;
/* OMPT event callback */
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)(
th->th.ompt_thread_info.wait_id);
}
@@ -908,7 +906,7 @@ __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid )
__kmp_parallel_dxo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_release_ordered)(
th->th.ompt_thread_info.wait_id);
@@ -1287,7 +1285,7 @@ __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
__kmp_release_user_lock_with_checks( lck, global_tid );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_critical)) {
ompt_callbacks.ompt_callback(ompt_event_release_critical)(
(uint64_t) lck);
@@ -1423,7 +1421,7 @@ __kmpc_single(ident_t *loc, kmp_int32 global_tid)
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
- if ((ompt_status == ompt_status_track_callback)) {
+ if (ompt_enabled) {
if (rc) {
if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)(
@@ -1465,7 +1463,7 @@ __kmpc_end_single(ident_t *loc, kmp_int32 global_tid)
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)(
team->t.ompt_team_info.parallel_id,
@@ -1491,7 +1489,7 @@ __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid )
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_end)) {
ompt_callbacks.ompt_callback(ompt_event_loop_end)(
team->t.ompt_team_info.parallel_id,
@@ -2141,7 +2139,7 @@ __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
RELEASE_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_lock)) {
ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck);
}
@@ -2202,7 +2200,7 @@ __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
int release_status;
release_status = RELEASE_NESTED_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
if (release_status == KMP_LOCK_RELEASED) {
if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) {
ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)(
diff --git a/openmp/runtime/src/kmp_dispatch.cpp b/openmp/runtime/src/kmp_dispatch.cpp
index c0bd71b12d4..a1d0fc44f2b 100644
--- a/openmp/runtime/src/kmp_dispatch.cpp
+++ b/openmp/runtime/src/kmp_dispatch.cpp
@@ -1209,7 +1209,7 @@ __kmp_dispatch_init(
#endif // ( KMP_STATIC_STEAL_ENABLED && USE_STEALING )
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
@@ -1373,7 +1373,7 @@ __kmp_dispatch_finish_chunk( int gtid, ident_t *loc )
#if OMPT_SUPPORT && OMPT_TRACE
#define OMPT_LOOP_END \
if (status == 0) { \
- if ((ompt_status == ompt_status_track_callback) && \
+ if (ompt_enabled && \
ompt_callbacks.ompt_callback(ompt_event_loop_end)) { \
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); \
ompt_task_info_t *task_info = __ompt_get_taskinfo(0); \
diff --git a/openmp/runtime/src/kmp_gsupport.c b/openmp/runtime/src/kmp_gsupport.c
index cff0e81873e..48487a3ffd3 100644
--- a/openmp/runtime/src/kmp_gsupport.c
+++ b/openmp/runtime/src/kmp_gsupport.c
@@ -259,7 +259,7 @@ __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
ompt_frame_t *ompt_frame;
ompt_state_t enclosing_state;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// get pointer to thread data structure
thr = __kmp_threads[*gtid];
@@ -276,7 +276,7 @@ __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
task(data);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// clear task frame
ompt_frame->exit_runtime_frame = NULL;
@@ -306,7 +306,7 @@ __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
ompt_frame_t *ompt_frame;
ompt_state_t enclosing_state;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
thr = __kmp_threads[*gtid];
// save enclosing task state; set current state for task
enclosing_state = thr->th.ompt_thread_info.state;
@@ -324,7 +324,7 @@ __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
task(data);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// clear task frame
ompt_frame->exit_runtime_frame = NULL;
@@ -368,14 +368,13 @@ __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), mic
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
#if OMPT_TRACE
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
// implicit task callback
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
team_info->parallel_id, task_info->task_id);
}
@@ -391,7 +390,7 @@ __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *
__kmp_serialized_parallel(loc, gtid);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
ompt_task_id_t ompt_task_id = __ompt_get_task_id_internal(0);
ompt_frame_t *ompt_frame = __ompt_get_task_frame_internal(0);
kmp_info_t *thr = __kmp_threads[gtid];
@@ -402,8 +401,7 @@ __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *
ompt_frame->exit_runtime_frame = NULL;
// parallel region callback
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
int team_size = 1;
ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
ompt_task_id, ompt_frame, ompt_parallel_id,
@@ -421,8 +419,7 @@ __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *
#if OMPT_TRACE
// implicit task callback
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
ompt_parallel_id, my_ompt_task_id);
}
@@ -441,7 +438,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsi
#if OMPT_SUPPORT
ompt_frame_t *parent_frame;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_frame = __ompt_get_task_frame_internal(0);
parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
}
@@ -462,7 +459,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsi
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_frame->reenter_runtime_frame = NULL;
}
#endif
@@ -485,7 +482,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
ompt_parallel_id_t parallel_id;
ompt_frame_t *ompt_frame = NULL;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
parallel_id = team_info->parallel_id;
@@ -495,7 +492,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
#if OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
@@ -510,7 +507,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
__kmp_free(lwt);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// Since a lightweight task was destroyed, make sure that the
// remaining deepest task knows the stack frame where the runtime
// was reentered.
@@ -527,7 +524,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
thr->th.th_team);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// Set reenter frame in parent task, which will become current task
// in the midst of join. This is needed before the end_parallel callback.
ompt_frame = __ompt_get_task_frame_internal(1);
@@ -541,7 +538,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
#endif
);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
ompt_frame->reenter_runtime_frame = NULL;
}
#endif
@@ -550,13 +547,12 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
__kmpc_end_serialized_parallel(&loc, gtid);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// Record that we re-entered the runtime system in the frame that
// created the parallel region.
ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
parallel_id, task_info->task_id,
@@ -895,14 +891,14 @@ LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
#define OMPT_LOOP_PRE() \
ompt_frame_t *parent_frame; \
- if (ompt_status & ompt_status_track) { \
+ if (ompt_enabled) { \
parent_frame = __ompt_get_task_frame_internal(0); \
parent_frame->reenter_runtime_frame = __builtin_frame_address(0); \
}
#define OMPT_LOOP_POST() \
- if (ompt_status & ompt_status_track) { \
+ if (ompt_enabled) { \
parent_frame->reenter_runtime_frame = NULL; \
}
@@ -978,7 +974,7 @@ xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_fu
ompt_thread_info_t oldInfo;
kmp_info_t *thread;
kmp_taskdata_t *taskdata;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// Store the threads states and restore them after the task
thread = __kmp_threads[ gtid ];
taskdata = KMP_TASK_TO_TASKDATA(task);
@@ -995,7 +991,7 @@ xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_fu
__kmpc_omp_task_complete_if0(&loc, gtid, task);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
thread->th.ompt_thread_info = oldInfo;
taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
}
@@ -1094,7 +1090,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *
#if OMPT_SUPPORT
ompt_frame_t *parent_frame;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_frame = __ompt_get_task_frame_internal(0);
parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
}
@@ -1117,7 +1113,7 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_frame->reenter_runtime_frame = NULL;
}
#endif
diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp
index 21c87e63c1c..77e25aa176f 100644
--- a/openmp/runtime/src/kmp_lock.cpp
+++ b/openmp/runtime/src/kmp_lock.cpp
@@ -1315,8 +1315,7 @@ __kmp_acquire_queuing_lock_timed_template( kmp_queuing_lock_t *lck,
#endif
#if OMPT_SUPPORT
- if ((ompt_status & ompt_status_track) &&
- prev_state != ompt_state_undefined) {
+ if (ompt_enabled && prev_state != ompt_state_undefined) {
/* change the state before clearing wait_id */
this_thr->th.ompt_thread_info.state = prev_state;
this_thr->th.ompt_thread_info.wait_id = 0;
@@ -1332,8 +1331,7 @@ __kmp_acquire_queuing_lock_timed_template( kmp_queuing_lock_t *lck,
}
#if OMPT_SUPPORT
- if ((ompt_status & ompt_status_track) &&
- prev_state == ompt_state_undefined) {
+ if (ompt_enabled && prev_state == ompt_state_undefined) {
/* this thread will spin; set wait_id before entering wait state */
prev_state = this_thr->th.ompt_thread_info.state;
this_thr->th.ompt_thread_info.wait_id = (uint64_t) lck;
diff --git a/openmp/runtime/src/kmp_runtime.c b/openmp/runtime/src/kmp_runtime.c
index e02f5051fe4..4c75808735b 100644
--- a/openmp/runtime/src/kmp_runtime.c
+++ b/openmp/runtime/src/kmp_runtime.c
@@ -765,7 +765,7 @@ __kmp_parallel_dxo( int *gtid_ref, int *cid_ref, ident_t *loc_ref )
team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_ordered)) {
/* accept blame for "ordered" waiting */
kmp_info_t *this_thread = __kmp_threads[gtid];
@@ -1511,7 +1511,7 @@ __kmp_fork_call(
ompt_task_id_t my_task_id;
ompt_parallel_id_t my_parallel_id;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
ompt_parallel_id = __ompt_parallel_id_new(gtid);
ompt_task_id = __ompt_get_task_id_internal(0);
ompt_frame = __ompt_get_task_frame_internal(0);
@@ -1535,7 +1535,7 @@ __kmp_fork_call(
#endif
#if OMPT_SUPPORT
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
int team_size = master_set_numthreads;
@@ -1577,7 +1577,7 @@ __kmp_fork_call(
ompt_lw_taskteam_t lw_taskteam;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
__ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
unwrapped_task, ompt_parallel_id);
lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid);
@@ -1589,8 +1589,7 @@ __kmp_fork_call(
/* OMPT implicit task begin */
my_task_id = lw_taskteam.ompt_task_info.task_id;
my_parallel_id = parent_team->t.ompt_team_info.parallel_id;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
my_parallel_id, my_task_id);
}
@@ -1613,12 +1612,11 @@ __kmp_fork_call(
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
#if OMPT_TRACE
lw_taskteam.ompt_task_info.frame.exit_runtime_frame = 0;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
ompt_parallel_id, ompt_task_id);
}
@@ -1628,8 +1626,7 @@ __kmp_fork_call(
lw_taskteam.ompt_task_info.task_id = ompt_task_id_none;
#endif
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
ompt_parallel_id, ompt_task_id,
OMPT_INVOKER(call_context));
@@ -1778,7 +1775,7 @@ __kmp_fork_call(
ompt_lw_taskteam_t lw_taskteam;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
__ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
unwrapped_task, ompt_parallel_id);
lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid);
@@ -1788,8 +1785,7 @@ __kmp_fork_call(
#if OMPT_TRACE
my_task_id = lw_taskteam.ompt_task_info.task_id;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
ompt_parallel_id, my_task_id);
}
@@ -1812,12 +1808,11 @@ __kmp_fork_call(
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
lw_taskteam.ompt_task_info.frame.exit_runtime_frame = 0;
#if OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
ompt_parallel_id, ompt_task_id);
}
@@ -1827,8 +1822,7 @@ __kmp_fork_call(
// reset clear the task id only after unlinking the task
lw_taskteam.ompt_task_info.task_id = ompt_task_id_none;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
ompt_parallel_id, ompt_task_id,
OMPT_INVOKER(call_context));
@@ -1883,7 +1877,7 @@ __kmp_fork_call(
ompt_lw_taskteam_t lw_taskteam;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
__ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
unwrapped_task, ompt_parallel_id);
lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid);
@@ -1895,8 +1889,7 @@ __kmp_fork_call(
/* OMPT implicit task begin */
my_task_id = lw_taskteam.ompt_task_info.task_id;
my_parallel_id = ompt_parallel_id;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
my_parallel_id, my_task_id);
}
@@ -1919,12 +1912,11 @@ __kmp_fork_call(
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
#if OMPT_TRACE
lw_taskteam.ompt_task_info.frame.exit_runtime_frame = 0;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
my_parallel_id, my_task_id);
}
@@ -1934,8 +1926,7 @@ __kmp_fork_call(
// reset clear the task id only after unlinking the task
lw_taskteam.ompt_task_info.task_id = ompt_task_id_none;
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
ompt_parallel_id, ompt_task_id,
OMPT_INVOKER(call_context));
@@ -2245,7 +2236,7 @@ __kmp_fork_call(
KA_TRACE( 20, ("__kmp_fork_call: parallel exit T#%d\n", gtid ));
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
master_th->th.ompt_thread_info.state = ompt_state_overhead;
}
#endif
@@ -2310,7 +2301,7 @@ __kmp_join_call(ident_t *loc, int gtid
master_th->th.th_ident = loc;
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
master_th->th.ompt_thread_info.state = ompt_state_overhead;
}
#endif
@@ -2344,7 +2335,7 @@ __kmp_join_call(ident_t *loc, int gtid
__kmpc_end_serialized_parallel( loc, gtid );
#if OMPT_SUPPORT
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
__kmp_join_restore_state(master_th, parent_team);
}
#endif
@@ -2444,7 +2435,7 @@ __kmp_join_call(ident_t *loc, int gtid
}
#if OMPT_SUPPORT
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
__kmp_join_ompt(master_th, parent_team, parallel_id, fork_context);
}
#endif
@@ -2535,7 +2526,7 @@ __kmp_join_call(ident_t *loc, int gtid
__kmp_release_bootstrap_lock( &__kmp_forkjoin_lock );
#if OMPT_SUPPORT
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
__kmp_join_ompt(master_th, parent_team, parallel_id, fork_context);
}
#endif
@@ -3940,7 +3931,7 @@ __kmp_reset_root(int gtid, kmp_root_t *root)
#endif /* KMP_OS_WINDOWS */
#if OMPT_SUPPORT
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_thread_end)) {
int gtid = __kmp_get_gtid();
__ompt_thread_end(ompt_thread_initial, gtid);
@@ -5540,12 +5531,11 @@ __kmp_launch_thread( kmp_info_t *this_thr )
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
this_thr->th.ompt_thread_info.state = ompt_state_overhead;
this_thr->th.ompt_thread_info.wait_id = 0;
this_thr->th.ompt_thread_info.idle_frame = __builtin_frame_address(0);
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_thread_begin)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_thread_begin)) {
__ompt_thread_begin(ompt_thread_worker, gtid);
}
}
@@ -5560,7 +5550,7 @@ __kmp_launch_thread( kmp_info_t *this_thr )
KA_TRACE( 20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid ));
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
this_thr->th.ompt_thread_info.state = ompt_state_idle;
}
#endif
@@ -5569,7 +5559,7 @@ __kmp_launch_thread( kmp_info_t *this_thr )
__kmp_fork_barrier( gtid, KMP_GTID_DNE );
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
this_thr->th.ompt_thread_info.state = ompt_state_overhead;
}
#endif
@@ -5587,7 +5577,7 @@ __kmp_launch_thread( kmp_info_t *this_thr )
updateHWFPControl (*pteam);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
// Initialize OMPT task id for implicit task.
int tid = __kmp_tid_from_gtid(gtid);
@@ -5605,7 +5595,7 @@ __kmp_launch_thread( kmp_info_t *this_thr )
KMP_ASSERT( rc );
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
/* no frame set while outside task */
int tid = __kmp_tid_from_gtid(gtid);
(*pteam)->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_runtime_frame = 0;
@@ -5624,7 +5614,7 @@ __kmp_launch_thread( kmp_info_t *this_thr )
TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
#if OMPT_SUPPORT
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_thread_end)) {
__ompt_thread_end(ompt_thread_worker, gtid);
}
@@ -6910,7 +6900,7 @@ __kmp_invoke_task_func( int gtid )
ompt_task_id_t my_task_id;
ompt_parallel_id_t my_parallel_id;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
exit_runtime_p = &(team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.exit_runtime_frame);
} else {
@@ -6920,7 +6910,7 @@ __kmp_invoke_task_func( int gtid )
#if OMPT_TRACE
my_task_id = team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id;
my_parallel_id = team->t.ompt_team_info.parallel_id;
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
my_parallel_id, my_task_id);
@@ -6939,9 +6929,8 @@ __kmp_invoke_task_func( int gtid )
}
#if OMPT_SUPPORT && OMPT_TRACE
- if (ompt_status & ompt_status_track) {
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
+ if (ompt_enabled) {
+ if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
my_parallel_id, my_task_id);
}
diff --git a/openmp/runtime/src/kmp_sched.cpp b/openmp/runtime/src/kmp_sched.cpp
index 5fb5e4779e5..a68c9af77d6 100644
--- a/openmp/runtime/src/kmp_sched.cpp
+++ b/openmp/runtime/src/kmp_sched.cpp
@@ -146,7 +146,7 @@ __kmp_for_static_init(
KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
team_info->parallel_id, task_info->task_id,
@@ -192,7 +192,7 @@ __kmp_for_static_init(
KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
team_info->parallel_id, task_info->task_id,
@@ -220,7 +220,7 @@ __kmp_for_static_init(
KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
team_info->parallel_id, task_info->task_id,
@@ -352,7 +352,7 @@ __kmp_for_static_init(
KE_TRACE( 10, ("__kmpc_for_static_init: T#%d return\n", global_tid ) );
#if OMPT_SUPPORT && OMPT_TRACE
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
team_info->parallel_id, task_info->task_id, team_info->microtask);
diff --git a/openmp/runtime/src/kmp_tasking.c b/openmp/runtime/src/kmp_tasking.c
index 647f241e4b5..abbfe3e2e76 100644
--- a/openmp/runtime/src/kmp_tasking.c
+++ b/openmp/runtime/src/kmp_tasking.c
@@ -451,7 +451,7 @@ __kmp_task_start( kmp_int32 gtid, kmp_task_t * task, kmp_taskdata_t * current_ta
gtid, taskdata ) );
#if OMPT_SUPPORT
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_task_begin)) {
kmp_taskdata_t *parent = taskdata->td_parent;
ompt_callbacks.ompt_callback(ompt_event_task_begin)(
@@ -608,7 +608,7 @@ __kmp_task_finish( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t *resumed_tas
kmp_int32 children = 0;
#if OMPT_SUPPORT
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_task_end)) {
kmp_taskdata_t *parent = taskdata->td_parent;
ompt_callbacks.ompt_callback(ompt_event_task_end)(
@@ -1031,7 +1031,7 @@ __kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags,
gtid, taskdata, taskdata->td_parent) );
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
taskdata->ompt_task_info.task_id = __ompt_task_id_new(gtid);
taskdata->ompt_task_info.function = (void*) task_entry;
taskdata->ompt_task_info.frame.exit_runtime_frame = NULL;
@@ -1118,7 +1118,7 @@ __kmp_invoke_task( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t * current_ta
#if OMPT_SUPPORT
ompt_thread_info_t oldInfo;
kmp_info_t * thread;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
// Store the threads states and restore them after the task
thread = __kmp_threads[ gtid ];
oldInfo = thread->th.ompt_thread_info;
@@ -1166,7 +1166,7 @@ __kmp_invoke_task( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t * current_ta
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
thread->th.ompt_thread_info = oldInfo;
taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
}
@@ -1233,7 +1233,7 @@ __kmp_omp_task( kmp_int32 gtid, kmp_task_t * new_task, bool serialize_immediate
kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
new_taskdata->ompt_task_info.frame.reenter_runtime_frame =
__builtin_frame_address(0);
}
@@ -1254,7 +1254,7 @@ __kmp_omp_task( kmp_int32 gtid, kmp_task_t * new_task, bool serialize_immediate
}
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
new_taskdata->ompt_task_info.frame.reenter_runtime_frame = 0;
}
#endif
diff --git a/openmp/runtime/src/kmp_wait_release.h b/openmp/runtime/src/kmp_wait_release.h
index 62d05605ae4..261df2762f6 100644
--- a/openmp/runtime/src/kmp_wait_release.h
+++ b/openmp/runtime/src/kmp_wait_release.h
@@ -97,7 +97,7 @@ static inline void __kmp_wait_template(kmp_info_t *this_thr, C *flag, int final_
#if OMPT_SUPPORT && OMPT_BLAME
ompt_state_t ompt_state = this_thr->th.ompt_thread_info.state;
- if (ompt_status == ompt_status_track_callback &&
+ if (ompt_enabled &&
ompt_state != ompt_state_undefined) {
if (ompt_state == ompt_state_idle) {
if (ompt_callbacks.ompt_callback(ompt_event_idle_begin)) {
@@ -237,7 +237,7 @@ static inline void __kmp_wait_template(kmp_info_t *this_thr, C *flag, int final_
}
#if OMPT_SUPPORT && OMPT_BLAME
- if (ompt_status == ompt_status_track_callback &&
+ if (ompt_enabled &&
ompt_state != ompt_state_undefined) {
if (ompt_state == ompt_state_idle) {
if (ompt_callbacks.ompt_callback(ompt_event_idle_end)) {
diff --git a/openmp/runtime/src/ompt-general.c b/openmp/runtime/src/ompt-general.c
index de588a96632..3d2aba2f388 100644
--- a/openmp/runtime/src/ompt-general.c
+++ b/openmp/runtime/src/ompt-general.c
@@ -64,8 +64,7 @@ typedef void (*ompt_initialize_t) (
* global variables
****************************************************************************/
-ompt_status_t ompt_status = ompt_status_ready;
-
+int ompt_enabled = 0;
ompt_state_info_t ompt_state_info[] = {
#define ompt_state_macro(state, code) { # state, state },
@@ -126,14 +125,13 @@ void ompt_pre_init()
switch(tool_setting) {
case omp_tool_disabled:
- ompt_status = ompt_status_disabled;
break;
case omp_tool_unset:
case omp_tool_enabled:
ompt_initialize_fn = ompt_tool();
if (ompt_initialize_fn) {
- ompt_status = ompt_status_track_callback;
+ ompt_enabled = 1;
}
break;
@@ -162,7 +160,7 @@ void ompt_post_init()
//--------------------------------------------------
// Initialize the tool if so indicated.
//--------------------------------------------------
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
ompt_initialize_fn(ompt_fn_lookup, ompt_get_runtime_version(),
OMPT_VERSION);
@@ -182,13 +180,13 @@ void ompt_post_init()
void ompt_fini()
{
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
if (ompt_callbacks.ompt_callback(ompt_event_runtime_shutdown)) {
ompt_callbacks.ompt_callback(ompt_event_runtime_shutdown)();
}
}
- ompt_status = ompt_status_disabled;
+ ompt_enabled = 0;
}
@@ -426,8 +424,7 @@ OMPT_API_ROUTINE int ompt_get_ompt_version()
_OMP_EXTERN void ompt_control(uint64_t command, uint64_t modifier)
{
- if (ompt_status == ompt_status_track_callback &&
- ompt_callbacks.ompt_callback(ompt_event_control)) {
+ if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_control)) {
ompt_callbacks.ompt_callback(ompt_event_control)(command, modifier);
}
}
diff --git a/openmp/runtime/src/ompt-internal.h b/openmp/runtime/src/ompt-internal.h
index 6d9f6f0291f..64e8d2e8fd6 100644
--- a/openmp/runtime/src/ompt-internal.h
+++ b/openmp/runtime/src/ompt-internal.h
@@ -14,16 +14,6 @@
#define ompt_callback(e) e ## _callback
-/* track and track_callback share a bit so that one can test whether either is
- * set by anding a bit.
- */
-typedef enum {
- ompt_status_disabled = 0x0,
- ompt_status_ready = 0x1,
- ompt_status_track = 0x2,
- ompt_status_track_callback = 0x6,
-} ompt_status_t;
-
typedef struct ompt_callbacks_s {
#define ompt_event_macro(event, callback, eventid) callback ompt_callback(event);
@@ -70,7 +60,6 @@ typedef struct {
} ompt_thread_info_t;
-extern ompt_status_t ompt_status;
extern ompt_callbacks_t ompt_callbacks;
#ifdef __cplusplus
@@ -81,6 +70,8 @@ void ompt_pre_init(void);
void ompt_post_init(void);
void ompt_fini(void);
+extern int ompt_enabled;
+
#ifdef __cplusplus
};
#endif
OpenPOWER on IntegriCloud