diff options
Diffstat (limited to 'openmp/runtime/src/kmp_csupport.cpp')
| -rw-r--r-- | openmp/runtime/src/kmp_csupport.cpp | 208 |
1 files changed, 104 insertions, 104 deletions
diff --git a/openmp/runtime/src/kmp_csupport.cpp b/openmp/runtime/src/kmp_csupport.cpp index 6a63834ce8b..4c627201f16 100644 --- a/openmp/runtime/src/kmp_csupport.cpp +++ b/openmp/runtime/src/kmp_csupport.cpp @@ -293,7 +293,7 @@ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { va_start(ap, microtask); #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { kmp_info_t *master_th = __kmp_threads[gtid]; kmp_team_t *parent_team = master_th->th.th_team; @@ -305,7 +305,7 @@ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { ompt_frame = &( parent_team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame); } - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(gtid); } #endif @@ -395,7 +395,7 @@ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, int tid = __kmp_tid_from_gtid(gtid); if (ompt_enabled.enabled) { parent_team->t.t_implicit_task_taskdata[tid] - .ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1); + .ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); } OMPT_STORE_RETURN_ADDRESS(gtid); #endif @@ -506,8 +506,8 @@ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { #if OMPT_SUPPORT if (ompt_enabled.enabled && - this_thr->th.ompt_thread_info.state != omp_state_overhead) { - OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame = NULL; + this_thr->th.ompt_thread_info.state != ompt_state_overhead) { + OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame = ompt_data_none; if (ompt_enabled.ompt_callback_implicit_task) { ompt_callbacks.ompt_callback(ompt_callback_implicit_task)( ompt_scope_end, NULL, OMPT_CUR_TASK_DATA(this_thr), 1, @@ -524,7 +524,7 @@ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { ompt_parallel_invoker_program, OMPT_LOAD_RETURN_ADDRESS(global_tid)); } __ompt_lw_taskteam_unlink(this_thr); - this_thr->th.ompt_thread_info.state = omp_state_overhead; + this_thr->th.ompt_thread_info.state = ompt_state_overhead; } #endif @@ -606,8 +606,8 @@ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { #if OMPT_SUPPORT if (ompt_enabled.enabled) this_thr->th.ompt_thread_info.state = - ((this_thr->th.th_team_serialized) ? omp_state_work_serial - : omp_state_work_parallel); + ((this_thr->th.th_team_serialized) ? ompt_state_work_serial + : ompt_state_work_parallel); #endif } @@ -705,11 +705,11 @@ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { } #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -724,7 +724,7 @@ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif } @@ -843,22 +843,22 @@ void __kmpc_ordered(ident_t *loc, kmp_int32 gtid) { #if OMPT_SUPPORT && OMPT_OPTIONAL kmp_team_t *team; - omp_wait_id_t lck; + ompt_wait_id_t lck; void *codeptr_ra; if (ompt_enabled.enabled) { OMPT_STORE_RETURN_ADDRESS(gtid); team = __kmp_team_from_gtid(gtid); - lck = (omp_wait_id_t)&team->t.t_ordered.dt.t_value; + lck = (ompt_wait_id_t)&team->t.t_ordered.dt.t_value; /* OMPT state update */ th->th.ompt_thread_info.wait_id = lck; - th->th.ompt_thread_info.state = omp_state_wait_ordered; + th->th.ompt_thread_info.state = ompt_state_wait_ordered; /* OMPT event callback */ codeptr_ra = OMPT_LOAD_RETURN_ADDRESS(gtid); if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_ordered, omp_lock_hint_none, kmp_mutex_impl_spin, - (omp_wait_id_t)lck, codeptr_ra); + (ompt_wait_id_t)lck, codeptr_ra); } } #endif @@ -871,13 +871,13 @@ void __kmpc_ordered(ident_t *loc, kmp_int32 gtid) { #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { /* OMPT state update */ - th->th.ompt_thread_info.state = omp_state_work_parallel; + th->th.ompt_thread_info.state = ompt_state_work_parallel; th->th.ompt_thread_info.wait_id = 0; /* OMPT event callback */ if (ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_ordered, (omp_wait_id_t)lck, codeptr_ra); + ompt_mutex_ordered, (ompt_wait_id_t)lck, codeptr_ra); } } #endif @@ -917,7 +917,7 @@ void __kmpc_end_ordered(ident_t *loc, kmp_int32 gtid) { if (ompt_enabled.ompt_callback_mutex_released) { ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( ompt_mutex_ordered, - (omp_wait_id_t)&__kmp_team_from_gtid(gtid)->t.t_ordered.dt.t_value, + (ompt_wait_id_t)&__kmp_team_from_gtid(gtid)->t.t_ordered.dt.t_value, OMPT_LOAD_RETURN_ADDRESS(gtid)); } #endif @@ -1144,7 +1144,7 @@ void __kmpc_critical(ident_t *loc, kmp_int32 global_tid, #else KMP_COUNT_BLOCK(OMP_CRITICAL); #if OMPT_SUPPORT && OMPT_OPTIONAL - omp_state_t prev_state = omp_state_undefined; + ompt_state_t prev_state = ompt_state_undefined; ompt_thread_info_t ti; #endif kmp_user_lock_p lck; @@ -1188,15 +1188,15 @@ void __kmpc_critical(ident_t *loc, kmp_int32 global_tid, ti = __kmp_threads[global_tid]->th.ompt_thread_info; /* OMPT state update */ prev_state = ti.state; - ti.wait_id = (omp_wait_id_t)lck; - ti.state = omp_state_wait_critical; + ti.wait_id = (ompt_wait_id_t)lck; + ti.state = ompt_state_wait_critical; /* OMPT event callback */ codeptr_ra = OMPT_LOAD_RETURN_ADDRESS(gtid); if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_critical, omp_lock_hint_none, __ompt_get_mutex_impl_type(), - (omp_wait_id_t)crit, codeptr_ra); + (ompt_wait_id_t)crit, codeptr_ra); } } #endif @@ -1216,7 +1216,7 @@ void __kmpc_critical(ident_t *loc, kmp_int32 global_tid, /* OMPT event callback */ if (ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_critical, (omp_wait_id_t)crit, codeptr_ra); + ompt_mutex_critical, (ompt_wait_id_t)crit, codeptr_ra); } } #endif @@ -1363,7 +1363,7 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid, KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; #if OMPT_SUPPORT && OMPT_OPTIONAL - omp_state_t prev_state = omp_state_undefined; + ompt_state_t prev_state = ompt_state_undefined; ompt_thread_info_t ti; // This is the case, if called from __kmpc_critical: void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid); @@ -1402,14 +1402,14 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid, ti = __kmp_threads[global_tid]->th.ompt_thread_info; /* OMPT state update */ prev_state = ti.state; - ti.wait_id = (omp_wait_id_t)lck; - ti.state = omp_state_wait_critical; + ti.wait_id = (ompt_wait_id_t)lck; + ti.state = ompt_state_wait_critical; /* OMPT event callback */ if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_critical, (unsigned int)hint, - __ompt_get_mutex_impl_type(crit), (omp_wait_id_t)crit, codeptr); + __ompt_get_mutex_impl_type(crit), (ompt_wait_id_t)crit, codeptr); } } #endif @@ -1440,14 +1440,14 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid, ti = __kmp_threads[global_tid]->th.ompt_thread_info; /* OMPT state update */ prev_state = ti.state; - ti.wait_id = (omp_wait_id_t)lck; - ti.state = omp_state_wait_critical; + ti.wait_id = (ompt_wait_id_t)lck; + ti.state = ompt_state_wait_critical; /* OMPT event callback */ if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_critical, (unsigned int)hint, - __ompt_get_mutex_impl_type(0, ilk), (omp_wait_id_t)crit, codeptr); + __ompt_get_mutex_impl_type(0, ilk), (ompt_wait_id_t)crit, codeptr); } } #endif @@ -1467,7 +1467,7 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid, /* OMPT event callback */ if (ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_critical, (omp_wait_id_t)crit, codeptr); + ompt_mutex_critical, (ompt_wait_id_t)crit, codeptr); } } #endif @@ -1565,7 +1565,7 @@ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, OMPT_STORE_RETURN_ADDRESS(global_tid); if (ompt_enabled.ompt_callback_mutex_released) { ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_critical, (omp_wait_id_t)crit, OMPT_LOAD_RETURN_ADDRESS(0)); + ompt_mutex_critical, (ompt_wait_id_t)crit, OMPT_LOAD_RETURN_ADDRESS(0)); } #endif @@ -1594,11 +1594,11 @@ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { __kmp_check_barrier(global_tid, ct_barrier, loc); #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -1608,7 +1608,7 @@ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { status = __kmp_barrier(bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif @@ -1656,11 +1656,11 @@ kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid) { } #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -1670,7 +1670,7 @@ kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid) { __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif @@ -2059,11 +2059,11 @@ void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size, *data_ptr = cpy_data; #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(gtid); } #endif @@ -2091,7 +2091,7 @@ void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size, __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif } @@ -2189,7 +2189,7 @@ void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, if (ompt_enabled.ompt_callback_lock_init) { ompt_callbacks.ompt_callback(ompt_callback_lock_init)( ompt_mutex_lock, (omp_lock_hint_t)hint, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2213,7 +2213,7 @@ void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, if (ompt_enabled.ompt_callback_lock_init) { ompt_callbacks.ompt_callback(ompt_callback_lock_init)( ompt_mutex_nest_lock, (omp_lock_hint_t)hint, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2239,7 +2239,7 @@ void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_lock_init) { ompt_callbacks.ompt_callback(ompt_callback_lock_init)( ompt_mutex_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2282,7 +2282,7 @@ void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_lock_init) { ompt_callbacks.ompt_callback(ompt_callback_lock_init)( ompt_mutex_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(), - (omp_wait_id_t)user_lock, codeptr); + (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2311,7 +2311,7 @@ void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_lock_init) { ompt_callbacks.ompt_callback(ompt_callback_lock_init)( ompt_mutex_nest_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2357,7 +2357,7 @@ void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_lock_init) { ompt_callbacks.ompt_callback(ompt_callback_lock_init)( ompt_mutex_nest_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(), - (omp_wait_id_t)user_lock, codeptr); + (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2393,7 +2393,7 @@ void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { lck = (kmp_user_lock_p)user_lock; } ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)( - ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); @@ -2421,7 +2421,7 @@ void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { codeptr = OMPT_GET_RETURN_ADDRESS(0); if (ompt_enabled.ompt_callback_lock_destroy) { ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)( - ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2461,7 +2461,7 @@ void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { codeptr = OMPT_GET_RETURN_ADDRESS(0); if (ompt_enabled.ompt_callback_lock_destroy) { ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)( - ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); @@ -2493,7 +2493,7 @@ void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { codeptr = OMPT_GET_RETURN_ADDRESS(0); if (ompt_enabled.ompt_callback_lock_destroy) { ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)( - ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2538,7 +2538,7 @@ void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2560,7 +2560,7 @@ void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2593,7 +2593,7 @@ void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(), - (omp_wait_id_t)lck, codeptr); + (ompt_wait_id_t)lck, codeptr); } #endif @@ -2606,7 +2606,7 @@ void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)lck, codeptr); } #endif @@ -2628,7 +2628,7 @@ void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_nest_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } } @@ -2646,13 +2646,13 @@ void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquired) { // lock_first ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)user_lock, codeptr); } } else { if (ompt_enabled.ompt_callback_nest_lock) { // lock_next ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_scope_begin, (omp_wait_id_t)user_lock, codeptr); + ompt_scope_begin, (ompt_wait_id_t)user_lock, codeptr); } } } @@ -2690,7 +2690,7 @@ void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_nest_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(), (omp_wait_id_t)lck, codeptr); + __ompt_get_mutex_impl_type(), (ompt_wait_id_t)lck, codeptr); } } #endif @@ -2707,13 +2707,13 @@ void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquired) { // lock_first ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)lck, codeptr); } } else { if (ompt_enabled.ompt_callback_nest_lock) { // lock_next ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_scope_begin, (omp_wait_id_t)lck, codeptr); + ompt_scope_begin, (ompt_wait_id_t)lck, codeptr); } } } @@ -2749,7 +2749,7 @@ void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { codeptr = OMPT_GET_RETURN_ADDRESS(0); if (ompt_enabled.ompt_callback_mutex_released) { ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2778,7 +2778,7 @@ void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { codeptr = OMPT_GET_RETURN_ADDRESS(0); if (ompt_enabled.ompt_callback_mutex_released) { ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)lck, codeptr); } #endif @@ -2810,7 +2810,7 @@ void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { codeptr = OMPT_GET_RETURN_ADDRESS(0); if (ompt_enabled.ompt_callback_mutex_released) { ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)lck, codeptr); } #endif @@ -2838,12 +2838,12 @@ void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_released) { // release_lock_last ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)user_lock, codeptr); } } else if (ompt_enabled.ompt_callback_nest_lock) { // release_lock_prev ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_scope_end, (omp_wait_id_t)user_lock, codeptr); + ompt_scope_end, (ompt_wait_id_t)user_lock, codeptr); } } #endif @@ -2887,12 +2887,12 @@ void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_released) { // release_lock_last ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)lck, codeptr); } } else if (ompt_enabled.ompt_callback_nest_lock) { // release_lock_previous ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_mutex_scope_end, (omp_wait_id_t)lck, codeptr); + ompt_mutex_scope_end, (ompt_wait_id_t)lck, codeptr); } } #endif @@ -2929,12 +2929,12 @@ void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_released) { // release_lock_last ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( - ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)lck, codeptr); } } else if (ompt_enabled.ompt_callback_nest_lock) { // release_lock_previous ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_mutex_scope_end, (omp_wait_id_t)lck, codeptr); + ompt_mutex_scope_end, (ompt_wait_id_t)lck, codeptr); } } #endif @@ -2960,7 +2960,7 @@ int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -2983,7 +2983,7 @@ int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)user_lock, codeptr); } #endif return FTN_TRUE; @@ -3024,7 +3024,7 @@ int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(), - (omp_wait_id_t)lck, codeptr); + (ompt_wait_id_t)lck, codeptr); } #endif @@ -3039,7 +3039,7 @@ int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { #if OMPT_SUPPORT && OMPT_OPTIONAL if (rc && ompt_enabled.ompt_callback_mutex_acquired) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_lock, (ompt_wait_id_t)lck, codeptr); } #endif @@ -3065,7 +3065,7 @@ int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_nest_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock, + __ompt_get_mutex_impl_type(user_lock), (ompt_wait_id_t)user_lock, codeptr); } #endif @@ -3083,13 +3083,13 @@ int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquired) { // lock_first ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)user_lock, codeptr); } } else { if (ompt_enabled.ompt_callback_nest_lock) { // lock_next ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_scope_begin, (omp_wait_id_t)user_lock, codeptr); + ompt_scope_begin, (ompt_wait_id_t)user_lock, codeptr); } } } @@ -3130,7 +3130,7 @@ int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { ompt_enabled.ompt_callback_mutex_acquire) { ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( ompt_mutex_nest_lock, omp_lock_hint_none, - __ompt_get_mutex_impl_type(), (omp_wait_id_t)lck, codeptr); + __ompt_get_mutex_impl_type(), (ompt_wait_id_t)lck, codeptr); } #endif @@ -3148,13 +3148,13 @@ int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { if (ompt_enabled.ompt_callback_mutex_acquired) { // lock_first ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( - ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr); + ompt_mutex_nest_lock, (ompt_wait_id_t)lck, codeptr); } } else { if (ompt_enabled.ompt_callback_nest_lock) { // lock_next ompt_callbacks.ompt_callback(ompt_callback_nest_lock)( - ompt_mutex_scope_begin, (omp_wait_id_t)lck, codeptr); + ompt_mutex_scope_begin, (ompt_wait_id_t)lck, codeptr); } } } @@ -3445,11 +3445,11 @@ __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, // JP: as long as there is a barrier in the implementation, OMPT should and // will provide the barrier events // so we set-up the necessary frame/return addresses. - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -3462,7 +3462,7 @@ __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, retval = (retval != 0) ? (0) : (1); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif @@ -3626,11 +3626,11 @@ kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, // this barrier should be visible to a customer and to the threading profile // tool (it's a terminating barrier on constructs if NOWAIT not specified) #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -3644,7 +3644,7 @@ kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, retval = (retval != 0) ? (0) : (1); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif @@ -3712,11 +3712,11 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, // TODO: implicit barrier: should be exposed #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -3726,7 +3726,7 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif @@ -3736,11 +3736,11 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, // TODO: implicit barrier: should be exposed #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -3750,18 +3750,18 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif } else if (packed_reduction_method == atomic_reduce_block) { #if OMPT_SUPPORT - omp_frame_t *ompt_frame; + ompt_frame_t *ompt_frame; if (ompt_enabled.enabled) { __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); - if (ompt_frame->enter_frame == NULL) - ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1); + if (ompt_frame->enter_frame.ptr == NULL) + ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); OMPT_STORE_RETURN_ADDRESS(global_tid); } #endif @@ -3772,7 +3772,7 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL); #if OMPT_SUPPORT && OMPT_OPTIONAL if (ompt_enabled.enabled) { - ompt_frame->enter_frame = NULL; + ompt_frame->enter_frame = ompt_data_none; } #endif |

