summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src/kmp_csupport.c
diff options
context:
space:
mode:
authorJonathan Peyton <jonathan.l.peyton@intel.com>2015-09-21 18:11:22 +0000
committerJonathan Peyton <jonathan.l.peyton@intel.com>2015-09-21 18:11:22 +0000
commitb68a85d1f68203ce2d51c83bc43657a1cd21491a (patch)
tree7eac31f92d23430f516ec9a28c060997a13adc6b /openmp/runtime/src/kmp_csupport.c
parentbe58ff82325e8892dc8199f2ca0acba887507895 (diff)
downloadbcm5719-llvm-b68a85d1f68203ce2d51c83bc43657a1cd21491a.tar.gz
bcm5719-llvm-b68a85d1f68203ce2d51c83bc43657a1cd21491a.zip
[OMPT] Simplify control variable logic for OMPT
Prior to this change, OMPT had a status flag ompt_status, which could take several values. This was due to an earlier OMPT design that had several levels of enablement (ready, disabled, tracking state, tracking callbacks). The current OMPT design has OMPT support either on or off. This revision replaces ompt_status with a boolean flag ompt_enabled, which simplifies the runtime logic for OMPT. Patch by John Mellor-Crummey Differential Revision: http://reviews.llvm.org/D12999 llvm-svn: 248189
Diffstat (limited to 'openmp/runtime/src/kmp_csupport.c')
-rw-r--r--openmp/runtime/src/kmp_csupport.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/openmp/runtime/src/kmp_csupport.c b/openmp/runtime/src/kmp_csupport.c
index 281f47924bb..b24537caa2f 100644
--- a/openmp/runtime/src/kmp_csupport.c
+++ b/openmp/runtime/src/kmp_csupport.c
@@ -304,7 +304,7 @@ __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
int tid = __kmp_tid_from_gtid( gtid );
kmp_info_t *master_th = __kmp_threads[ gtid ];
kmp_team_t *parent_team = master_th->th.th_team;
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
@@ -339,7 +339,7 @@ __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
va_end( ap );
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = 0;
}
@@ -398,7 +398,7 @@ __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
#if OMPT_SUPPORT
kmp_team_t *parent_team = this_thr->th.th_team;
int tid = __kmp_tid_from_gtid( gtid );
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
@@ -432,7 +432,7 @@ __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
);
#if OMPT_SUPPORT
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = NULL;
}
@@ -747,7 +747,7 @@ __kmpc_master(ident_t *loc, kmp_int32 global_tid)
#if OMPT_SUPPORT && OMPT_TRACE
if (status) {
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_begin)) {
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
@@ -796,7 +796,7 @@ __kmpc_end_master(ident_t *loc, kmp_int32 global_tid)
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_end)) {
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_master_end)(
@@ -841,14 +841,13 @@ __kmpc_ordered( ident_t * loc, kmp_int32 gtid )
th = __kmp_threads[ gtid ];
#if OMPT_SUPPORT && OMPT_TRACE
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.wait_id = (uint64_t) loc;
th->th.ompt_thread_info.state = ompt_state_wait_ordered;
/* OMPT event callback */
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_wait_ordered)(
th->th.ompt_thread_info.wait_id);
}
@@ -861,14 +860,13 @@ __kmpc_ordered( ident_t * loc, kmp_int32 gtid )
__kmp_parallel_deo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_TRACE
- if (ompt_status & ompt_status_track) {
+ if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.state = ompt_state_work_parallel;
th->th.ompt_thread_info.wait_id = 0;
/* OMPT event callback */
- if ((ompt_status == ompt_status_track_callback) &&
- ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) {
+ if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)(
th->th.ompt_thread_info.wait_id);
}
@@ -908,7 +906,7 @@ __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid )
__kmp_parallel_dxo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_release_ordered)(
th->th.ompt_thread_info.wait_id);
@@ -1287,7 +1285,7 @@ __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
__kmp_release_user_lock_with_checks( lck, global_tid );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_critical)) {
ompt_callbacks.ompt_callback(ompt_event_release_critical)(
(uint64_t) lck);
@@ -1423,7 +1421,7 @@ __kmpc_single(ident_t *loc, kmp_int32 global_tid)
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
- if ((ompt_status == ompt_status_track_callback)) {
+ if (ompt_enabled) {
if (rc) {
if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)(
@@ -1465,7 +1463,7 @@ __kmpc_end_single(ident_t *loc, kmp_int32 global_tid)
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)(
team->t.ompt_team_info.parallel_id,
@@ -1491,7 +1489,7 @@ __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid )
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_end)) {
ompt_callbacks.ompt_callback(ompt_event_loop_end)(
team->t.ompt_team_info.parallel_id,
@@ -2141,7 +2139,7 @@ __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
RELEASE_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
- if ((ompt_status == ompt_status_track_callback) &&
+ if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_lock)) {
ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck);
}
@@ -2202,7 +2200,7 @@ __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
int release_status;
release_status = RELEASE_NESTED_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
- if (ompt_status == ompt_status_track_callback) {
+ if (ompt_enabled) {
if (release_status == KMP_LOCK_RELEASED) {
if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) {
ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)(
OpenPOWER on IntegriCloud