summaryrefslogtreecommitdiffstats
path: root/openmp/runtime/src
diff options
context:
space:
mode:
Diffstat (limited to 'openmp/runtime/src')
-rw-r--r--openmp/runtime/src/dllexports4
-rw-r--r--openmp/runtime/src/exports_so.txt7
-rw-r--r--openmp/runtime/src/kmp.h29
-rw-r--r--openmp/runtime/src/kmp_barrier.cpp14
-rw-r--r--openmp/runtime/src/kmp_debugger.c308
-rw-r--r--openmp/runtime/src/kmp_debugger.h51
-rw-r--r--openmp/runtime/src/kmp_omp.h5
-rw-r--r--openmp/runtime/src/kmp_runtime.c41
-rw-r--r--openmp/runtime/src/makefile.mk3
9 files changed, 456 insertions, 6 deletions
diff --git a/openmp/runtime/src/dllexports b/openmp/runtime/src/dllexports
index b151a6b4212..dd3c3933126 100644
--- a/openmp/runtime/src/dllexports
+++ b/openmp/runtime/src/dllexports
@@ -182,6 +182,10 @@
%endif
+#if USE_DEBUGGER
+ __kmp_debugging DATA
+ __kmp_omp_debug_struct_info DATA
+#endif /* USE_DEBUGGER */
# Symbols for MS mutual detection:
_You_must_link_with_exactly_one_OpenMP_library DATA
diff --git a/openmp/runtime/src/exports_so.txt b/openmp/runtime/src/exports_so.txt
index 0fc5a0a2c92..06ef2e708e2 100644
--- a/openmp/runtime/src/exports_so.txt
+++ b/openmp/runtime/src/exports_so.txt
@@ -32,6 +32,13 @@ VERSION {
_You_must_link_with_*; # Mutual detection/MS compatibility symbols.
+ #
+ # Debugger support.
+ #
+#if USE_DEBUGGER
+ __kmp_debugging;
+ __kmp_omp_debug_struct_info;
+#endif /* USE_DEBUGGER */
#
# Internal functions exported for testing purposes.
diff --git a/openmp/runtime/src/kmp.h b/openmp/runtime/src/kmp.h
index d003146af00..9f09290d63a 100644
--- a/openmp/runtime/src/kmp.h
+++ b/openmp/runtime/src/kmp.h
@@ -86,6 +86,9 @@ class kmp_stats_list;
#include "kmp_version.h"
#include "kmp_debug.h"
#include "kmp_lock.h"
+#if USE_DEBUGGER
+#include "kmp_debugger.h"
+#endif
#include "kmp_i18n.h"
#define KMP_HANDLE_SIGNALS (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN)
@@ -1663,6 +1666,11 @@ typedef struct KMP_ALIGN_CACHE kmp_bstate {
kmp_uint8 offset;
kmp_uint8 wait_flag;
kmp_uint8 use_oncore_barrier;
+#if USE_DEBUGGER
+ // The following field is intended for the debugger solely. Only the worker thread itself accesses this
+ // field: the worker increases it by 1 when it arrives to a barrier.
+ KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
+#endif /* USE_DEBUGGER */
} kmp_bstate_t;
union KMP_ALIGN_CACHE kmp_barrier_union {
@@ -1679,6 +1687,13 @@ union KMP_ALIGN_CACHE kmp_barrier_team_union {
char b_pad[ CACHE_LINE ];
struct {
kmp_uint b_arrived; /* STATE => task reached synch point. */
+#if USE_DEBUGGER
+ // The following two fields are indended for the debugger solely. Only master of the team accesses
+ // these fields: the first one is increased by 1 when master arrives to a barrier, the
+ // second one is increased by one when all the threads arrived.
+ kmp_uint b_master_arrived;
+ kmp_uint b_team_arrived;
+#endif
};
};
@@ -2718,12 +2733,22 @@ extern kmp_info_t __kmp_monitor;
extern volatile kmp_uint32 __kmp_team_counter; // Used by Debugging Support Library.
extern volatile kmp_uint32 __kmp_task_counter; // Used by Debugging Support Library.
+#if USE_DEBUGGER
+
#define _KMP_GEN_ID( counter ) \
( \
+ __kmp_debugging \
+ ? \
+ KMP_TEST_THEN_INC32( (volatile kmp_int32 *) & counter ) + 1 \
+ : \
~ 0 \
)
-
-
+#else
+#define _KMP_GEN_ID( counter ) \
+ ( \
+ ~ 0 \
+ )
+#endif /* USE_DEBUGGER */
#define KMP_GEN_TASK_ID() _KMP_GEN_ID( __kmp_task_counter )
#define KMP_GEN_TEAM_ID() _KMP_GEN_ID( __kmp_team_counter )
diff --git a/openmp/runtime/src/kmp_barrier.cpp b/openmp/runtime/src/kmp_barrier.cpp
index ea345b0a9ad..a2a24f2e98b 100644
--- a/openmp/runtime/src/kmp_barrier.cpp
+++ b/openmp/runtime/src/kmp_barrier.cpp
@@ -1104,7 +1104,14 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
__kmp_itt_barrier_starting(gtid, itt_sync_obj);
#endif /* USE_ITT_BUILD */
-
+#if USE_DEBUGGER
+ // Let the debugger know: the thread arrived to the barrier and waiting.
+ if (KMP_MASTER_TID(tid)) { // Master counter is stored in team structure.
+ team->t.t_bar[bt].b_master_arrived += 1;
+ } else {
+ this_thr->th.th_bar[bt].bb.b_worker_arrived += 1;
+ } // if
+#endif /* USE_DEBUGGER */
if (reduce != NULL) {
//KMP_DEBUG_ASSERT( is_split == TRUE ); // #C69956
this_thr->th.th_local.reduce_data = reduce_data;
@@ -1142,7 +1149,10 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
USE_ITT_BUILD_ARG(itt_sync_obj) );
__kmp_task_team_setup(this_thr, team, 0, 0); // use 0,0 to only setup the current team if nthreads > 1
}
-
+#if USE_DEBUGGER
+ // Let the debugger know: All threads are arrived and starting leaving the barrier.
+ team->t.t_bar[bt].b_team_arrived += 1;
+#endif
#if USE_ITT_BUILD
/* TODO: In case of split reduction barrier, master thread may send acquired event early,
diff --git a/openmp/runtime/src/kmp_debugger.c b/openmp/runtime/src/kmp_debugger.c
new file mode 100644
index 00000000000..d46c8a9a978
--- /dev/null
+++ b/openmp/runtime/src/kmp_debugger.c
@@ -0,0 +1,308 @@
+#if USE_DEBUGGER
+/*
+ * kmp_debugger.c -- debugger support.
+ */
+
+
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.txt for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "kmp.h"
+#include "kmp_lock.h"
+#include "kmp_omp.h"
+#include "kmp_str.h"
+
+/*
+ NOTE: All variable names are known to the debugger, do not change!
+*/
+
+#ifdef __cplusplus
+ extern "C" {
+ extern kmp_omp_struct_info_t __kmp_omp_debug_struct_info;
+ } // extern "C"
+#endif // __cplusplus
+
+int __kmp_debugging = FALSE; // Boolean whether currently debugging OpenMP RTL.
+
+#define offset_and_size_of( structure, field ) \
+ { \
+ offsetof( structure, field ), \
+ sizeof( ( (structure *) NULL)->field ) \
+ }
+
+#define offset_and_size_not_available \
+ { -1, -1 }
+
+#define addr_and_size_of( var ) \
+ { \
+ (kmp_uint64)( & var ), \
+ sizeof( var ) \
+ }
+
+#define nthr_buffer_size 1024
+static kmp_int32
+kmp_omp_nthr_info_buffer[ nthr_buffer_size ] =
+ { nthr_buffer_size * sizeof( kmp_int32 ) };
+
+/* TODO: Check punctuation for various platforms here */
+static char func_microtask[] = "__kmp_invoke_microtask";
+static char func_fork[] = "__kmpc_fork_call";
+static char func_fork_teams[] = "__kmpc_fork_teams";
+
+
+// Various info about runtime structures: addresses, field offsets, sizes, etc.
+kmp_omp_struct_info_t
+__kmp_omp_debug_struct_info = {
+
+ /* Change this only if you make a fundamental data structure change here */
+ KMP_OMP_VERSION,
+
+ /* sanity check. Only should be checked if versions are identical
+ * This is also used for backward compatibility to get the runtime
+ * structure size if it the runtime is older than the interface */
+ sizeof( kmp_omp_struct_info_t ),
+
+ /* OpenMP RTL version info. */
+ addr_and_size_of( __kmp_version_major ),
+ addr_and_size_of( __kmp_version_minor ),
+ addr_and_size_of( __kmp_version_build ),
+ addr_and_size_of( __kmp_openmp_version ),
+ { (kmp_uint64)( __kmp_copyright ) + KMP_VERSION_MAGIC_LEN, 0 }, // Skip magic prefix.
+
+ /* Various globals. */
+ addr_and_size_of( __kmp_threads ),
+ addr_and_size_of( __kmp_root ),
+ addr_and_size_of( __kmp_threads_capacity ),
+ addr_and_size_of( __kmp_monitor ),
+#if ! KMP_USE_DYNAMIC_LOCK
+ addr_and_size_of( __kmp_user_lock_table ),
+#endif
+ addr_and_size_of( func_microtask ),
+ addr_and_size_of( func_fork ),
+ addr_and_size_of( func_fork_teams ),
+ addr_and_size_of( __kmp_team_counter ),
+ addr_and_size_of( __kmp_task_counter ),
+ addr_and_size_of( kmp_omp_nthr_info_buffer ),
+ sizeof( void * ),
+ OMP_LOCK_T_SIZE < sizeof(void *),
+ bs_last_barrier,
+ TASK_DEQUE_SIZE,
+
+ // thread structure information
+ sizeof( kmp_base_info_t ),
+ offset_and_size_of( kmp_base_info_t, th_info ),
+ offset_and_size_of( kmp_base_info_t, th_team ),
+ offset_and_size_of( kmp_base_info_t, th_root ),
+ offset_and_size_of( kmp_base_info_t, th_serial_team ),
+ offset_and_size_of( kmp_base_info_t, th_ident ),
+ offset_and_size_of( kmp_base_info_t, th_spin_here ),
+ offset_and_size_of( kmp_base_info_t, th_next_waiting ),
+ offset_and_size_of( kmp_base_info_t, th_task_team ),
+ offset_and_size_of( kmp_base_info_t, th_current_task ),
+ offset_and_size_of( kmp_base_info_t, th_task_state ),
+ offset_and_size_of( kmp_base_info_t, th_bar ),
+ offset_and_size_of( kmp_bstate_t, b_worker_arrived ),
+
+ // teams information
+ offset_and_size_of( kmp_base_info_t, th_teams_microtask),
+ offset_and_size_of( kmp_base_info_t, th_teams_level),
+ offset_and_size_of( kmp_teams_size_t, nteams ),
+ offset_and_size_of( kmp_teams_size_t, nth ),
+
+ // kmp_desc structure (for info field above)
+ sizeof( kmp_desc_base_t ),
+ offset_and_size_of( kmp_desc_base_t, ds_tid ),
+ offset_and_size_of( kmp_desc_base_t, ds_gtid ),
+ // On Windows* OS, ds_thread contains a thread /handle/, which is not usable, while thread /id/
+ // is in ds_thread_id.
+ #if KMP_OS_WINDOWS
+ offset_and_size_of( kmp_desc_base_t, ds_thread_id),
+ #else
+ offset_and_size_of( kmp_desc_base_t, ds_thread),
+ #endif
+
+ // team structure information
+ sizeof( kmp_base_team_t ),
+ offset_and_size_of( kmp_base_team_t, t_master_tid ),
+ offset_and_size_of( kmp_base_team_t, t_ident ),
+ offset_and_size_of( kmp_base_team_t, t_parent ),
+ offset_and_size_of( kmp_base_team_t, t_nproc ),
+ offset_and_size_of( kmp_base_team_t, t_threads ),
+ offset_and_size_of( kmp_base_team_t, t_serialized ),
+ offset_and_size_of( kmp_base_team_t, t_id ),
+ offset_and_size_of( kmp_base_team_t, t_pkfn ),
+ offset_and_size_of( kmp_base_team_t, t_task_team ),
+ offset_and_size_of( kmp_base_team_t, t_implicit_task_taskdata ),
+ offset_and_size_of( kmp_base_team_t, t_cancel_request ),
+ offset_and_size_of( kmp_base_team_t, t_bar ),
+ offset_and_size_of( kmp_balign_team_t, b_master_arrived ),
+ offset_and_size_of( kmp_balign_team_t, b_team_arrived ),
+
+ // root structure information
+ sizeof( kmp_base_root_t ),
+ offset_and_size_of( kmp_base_root_t, r_root_team ),
+ offset_and_size_of( kmp_base_root_t, r_hot_team ),
+ offset_and_size_of( kmp_base_root_t, r_uber_thread ),
+ offset_and_size_not_available,
+
+ // ident structure information
+ sizeof( ident_t ),
+ offset_and_size_of( ident_t, psource ),
+ offset_and_size_of( ident_t, flags ),
+
+ // lock structure information
+ sizeof( kmp_base_queuing_lock_t ),
+ offset_and_size_of( kmp_base_queuing_lock_t, initialized ),
+ offset_and_size_of( kmp_base_queuing_lock_t, location ),
+ offset_and_size_of( kmp_base_queuing_lock_t, tail_id ),
+ offset_and_size_of( kmp_base_queuing_lock_t, head_id ),
+ offset_and_size_of( kmp_base_queuing_lock_t, next_ticket ),
+ offset_and_size_of( kmp_base_queuing_lock_t, now_serving ),
+ offset_and_size_of( kmp_base_queuing_lock_t, owner_id ),
+ offset_and_size_of( kmp_base_queuing_lock_t, depth_locked ),
+ offset_and_size_of( kmp_base_queuing_lock_t, flags ),
+
+#if ! KMP_USE_DYNAMIC_LOCK
+ /* Lock table. */
+ sizeof( kmp_lock_table_t ),
+ offset_and_size_of( kmp_lock_table_t, used ),
+ offset_and_size_of( kmp_lock_table_t, allocated ),
+ offset_and_size_of( kmp_lock_table_t, table ),
+#endif
+
+ // Task team structure information.
+ sizeof( kmp_base_task_team_t ),
+ offset_and_size_of( kmp_base_task_team_t, tt_threads_data ),
+ offset_and_size_of( kmp_base_task_team_t, tt_found_tasks ),
+ offset_and_size_of( kmp_base_task_team_t, tt_nproc ),
+ offset_and_size_of( kmp_base_task_team_t, tt_unfinished_threads ),
+ offset_and_size_of( kmp_base_task_team_t, tt_active ),
+
+ // task_data_t.
+ sizeof( kmp_taskdata_t ),
+ offset_and_size_of( kmp_taskdata_t, td_task_id ),
+ offset_and_size_of( kmp_taskdata_t, td_flags ),
+ offset_and_size_of( kmp_taskdata_t, td_team ),
+ offset_and_size_of( kmp_taskdata_t, td_parent ),
+ offset_and_size_of( kmp_taskdata_t, td_level ),
+ offset_and_size_of( kmp_taskdata_t, td_ident ),
+ offset_and_size_of( kmp_taskdata_t, td_allocated_child_tasks ),
+ offset_and_size_of( kmp_taskdata_t, td_incomplete_child_tasks ),
+
+ offset_and_size_of( kmp_taskdata_t, td_taskwait_ident ),
+ offset_and_size_of( kmp_taskdata_t, td_taskwait_counter ),
+ offset_and_size_of( kmp_taskdata_t, td_taskwait_thread ),
+
+ offset_and_size_of( kmp_taskdata_t, td_taskgroup ),
+ offset_and_size_of( kmp_taskgroup_t, count ),
+ offset_and_size_of( kmp_taskgroup_t, cancel_request ),
+
+ offset_and_size_of( kmp_taskdata_t, td_depnode ),
+ offset_and_size_of( kmp_depnode_list_t, node ),
+ offset_and_size_of( kmp_depnode_list_t, next ),
+ offset_and_size_of( kmp_base_depnode_t, successors ),
+ offset_and_size_of( kmp_base_depnode_t, task ),
+ offset_and_size_of( kmp_base_depnode_t, npredecessors ),
+ offset_and_size_of( kmp_base_depnode_t, nrefs ),
+ offset_and_size_of( kmp_task_t, routine ),
+
+ // thread_data_t.
+ sizeof( kmp_thread_data_t ),
+ offset_and_size_of( kmp_base_thread_data_t, td_deque ),
+ offset_and_size_of( kmp_base_thread_data_t, td_deque_head ),
+ offset_and_size_of( kmp_base_thread_data_t, td_deque_tail ),
+ offset_and_size_of( kmp_base_thread_data_t, td_deque_ntasks ),
+ offset_and_size_of( kmp_base_thread_data_t, td_deque_last_stolen ),
+
+ // The last field.
+ KMP_OMP_VERSION,
+
+}; // __kmp_omp_debug_struct_info
+
+#undef offset_and_size_of
+#undef addr_and_size_of
+
+/*
+ Intel compiler on IA-32 architecture issues a warning "conversion
+ from "unsigned long long" to "char *" may lose significant bits"
+ when 64-bit value is assigned to 32-bit pointer. Use this function
+ to suppress the warning.
+*/
+static inline
+void *
+__kmp_convert_to_ptr(
+ kmp_uint64 addr
+) {
+ #if KMP_COMPILER_ICC
+ #pragma warning( push )
+ #pragma warning( disable: 810 ) // conversion from "unsigned long long" to "char *" may lose significant bits
+ #pragma warning( disable: 1195 ) // conversion from integer to smaller pointer
+ #endif // KMP_COMPILER_ICC
+ return (void *) addr;
+ #if KMP_COMPILER_ICC
+ #pragma warning( pop )
+ #endif // KMP_COMPILER_ICC
+} // __kmp_convert_to_ptr
+
+
+static int
+kmp_location_match(
+ kmp_str_loc_t * loc,
+ kmp_omp_nthr_item_t * item
+) {
+
+ int file_match = 0;
+ int func_match = 0;
+ int line_match = 0;
+
+ char * file = (char *) __kmp_convert_to_ptr( item->file );
+ char * func = (char *) __kmp_convert_to_ptr( item->func );
+ file_match = __kmp_str_fname_match( & loc->fname, file );
+ func_match =
+ item->func == 0 // If item->func is NULL, it allows any func name.
+ ||
+ strcmp( func, "*" ) == 0
+ ||
+ ( loc->func != NULL && strcmp( loc->func, func ) == 0 );
+ line_match =
+ item->begin <= loc->line
+ &&
+ ( item->end <= 0 || loc->line <= item->end ); // if item->end <= 0, it means "end of file".
+
+ return ( file_match && func_match && line_match );
+
+} // kmp_location_match
+
+
+int
+__kmp_omp_num_threads(
+ ident_t const * ident
+) {
+
+ int num_threads = 0;
+
+ kmp_omp_nthr_info_t * info =
+ (kmp_omp_nthr_info_t *) __kmp_convert_to_ptr( __kmp_omp_debug_struct_info.nthr_info.addr );
+ if ( info->num > 0 && info->array != 0 ) {
+ kmp_omp_nthr_item_t * items = (kmp_omp_nthr_item_t *) __kmp_convert_to_ptr( info->array );
+ kmp_str_loc_t loc = __kmp_str_loc_init( ident->psource, 1 );
+ int i;
+ for ( i = 0; i < info->num; ++ i ) {
+ if ( kmp_location_match( & loc, & items[ i ] ) ) {
+ num_threads = items[ i ].num_threads;
+ }; // if
+ }; // for
+ __kmp_str_loc_free( & loc );
+ }; // if
+
+ return num_threads;;
+
+} // __kmp_omp_num_threads
+#endif /* USE_DEBUGGER */
diff --git a/openmp/runtime/src/kmp_debugger.h b/openmp/runtime/src/kmp_debugger.h
new file mode 100644
index 00000000000..29f41340dde
--- /dev/null
+++ b/openmp/runtime/src/kmp_debugger.h
@@ -0,0 +1,51 @@
+#if USE_DEBUGGER
+/*
+ * kmp_debugger.h -- debugger support.
+ */
+
+
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.txt for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef KMP_DEBUGGER_H
+#define KMP_DEBUGGER_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif // __cplusplus
+
+/* * This external variable can be set by any debugger to flag to the runtime that we
+ are currently executing inside a debugger. This will allow the debugger to override
+ the number of threads spawned in a parallel region by using __kmp_omp_num_threads() (below).
+ * When __kmp_debugging is TRUE, each team and each task gets a unique integer identifier
+ that can be used by debugger to conveniently identify teams and tasks.
+ * The debugger has access to __kmp_omp_debug_struct_info which contains information
+ about the OpenMP library's important internal structures. This access will allow the debugger
+ to read detailed information from the typical OpenMP constructs (teams, threads, tasking, etc. )
+ during a debugging session and offer detailed and useful information which the user can probe
+ about the OpenMP portion of their code.
+ */
+extern int __kmp_debugging; /* Boolean whether currently debugging OpenMP RTL */
+// Return number of threads specified by the debugger for given parallel region.
+/* The ident field, which represents a source file location, is used to check if the
+ debugger has changed the number of threads for the parallel region at source file
+ location ident. This way, specific parallel regions' number of threads can be changed
+ at the debugger's request.
+ */
+int __kmp_omp_num_threads( ident_t const * ident );
+
+#ifdef __cplusplus
+ } // extern "C"
+#endif // __cplusplus
+
+
+#endif // KMP_DEBUGGER_H
+
+#endif // USE_DEBUGGER
diff --git a/openmp/runtime/src/kmp_omp.h b/openmp/runtime/src/kmp_omp.h
index 7b8fa63b3c6..5a9419ff35c 100644
--- a/openmp/runtime/src/kmp_omp.h
+++ b/openmp/runtime/src/kmp_omp.h
@@ -1,3 +1,4 @@
+#if USE_DEBUGGER
/*
* kmp_omp.h -- OpenMP definition for kmp_omp_struct_info_t.
* This is for information about runtime library structures.
@@ -200,12 +201,13 @@ typedef struct {
/* Task dependency */
offset_and_size_t td_depnode; // pointer to graph node if the task has dependencies
- offset_and_size_t dn_successors;
offset_and_size_t dn_node;
offset_and_size_t dn_next;
+ offset_and_size_t dn_successors;
offset_and_size_t dn_task;
offset_and_size_t dn_npredecessors;
offset_and_size_t dn_nrefs;
+ offset_and_size_t dn_routine;
/* kmp_thread_data_t */
kmp_int32 hd_sizeof_struct;
@@ -220,5 +222,6 @@ typedef struct {
} kmp_omp_struct_info_t;
+#endif /* USE_DEBUGGER */
/* end of file */
diff --git a/openmp/runtime/src/kmp_runtime.c b/openmp/runtime/src/kmp_runtime.c
index 9f21986411b..df8ef6a9d49 100644
--- a/openmp/runtime/src/kmp_runtime.c
+++ b/openmp/runtime/src/kmp_runtime.c
@@ -1136,6 +1136,9 @@ __kmp_fork_team_threads( kmp_root_t *root, kmp_team_t *team,
for ( b = 0; b < bs_last_barrier; ++ b ) {
balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived;
KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
+#if USE_DEBUGGER
+ balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived;
+#endif
}; // for b
}
}
@@ -1360,6 +1363,9 @@ __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
}
#endif /* OMP_40_ENABLED */
+#if USE_DEBUGGER
+ serial_team->t.t_pkfn = (microtask_t)( ~0 ); // For the debugger.
+#endif
this_thr->th.th_info.ds.ds_tid = 0;
/* set thread cache values */
@@ -1671,6 +1677,14 @@ __kmp_fork_call(
master_th->th.th_set_nproc = 0;
}
+#if USE_DEBUGGER
+ if ( __kmp_debugging ) { // Let debugger override number of threads.
+ int nth = __kmp_omp_num_threads( loc );
+ if ( nth > 0 ) { // 0 means debugger does not want to change number of threads.
+ master_set_numthreads = nth;
+ }; // if
+ }; // if
+#endif
KF_TRACE( 10, ( "__kmp_fork_call: before internal fork: root=%p, team=%p, master_th=%p, gtid=%d\n", root, parent_team, master_th, gtid ) );
__kmp_internal_fork( loc, gtid, parent_team );
@@ -2397,6 +2411,9 @@ __kmp_join_call(ident_t *loc, int gtid
for ( b = 0; b < bs_last_barrier; ++ b ) {
balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived;
KMP_DEBUG_ASSERT(balign[ b ].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
+#if USE_DEBUGGER
+ balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived;
+#endif
}
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
// Synchronize thread's task state
@@ -3148,6 +3165,10 @@ __kmp_initialize_root( kmp_root_t *root )
0 // argc
USE_NESTED_HOT_ARG(NULL) // master thread is unknown
);
+#if USE_DEBUGGER
+ // Non-NULL value should be assigned to make the debugger display the root team.
+ TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)( ~ 0 ));
+#endif
KF_TRACE( 10, ( "__kmp_initialize_root: after root_team = %p\n", root_team ) );
@@ -3798,6 +3819,9 @@ __kmp_register_root( int initial_thread )
int b;
for ( b = 0; b < bs_last_barrier; ++ b ) {
root_thread->th.th_bar[ b ].bb.b_arrived = KMP_INIT_BARRIER_STATE;
+#if USE_DEBUGGER
+ root_thread->th.th_bar[ b ].bb.b_worker_arrived = 0;
+#endif
}; // for
}
KMP_DEBUG_ASSERT( root->r.r_hot_team->t.t_bar[ bs_forkjoin_barrier ].b_arrived == KMP_INIT_BARRIER_STATE );
@@ -4968,6 +4992,9 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
for ( b = 0; b < bs_last_barrier; ++ b ) {
balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
+#if USE_DEBUGGER
+ balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
+#endif
}
}
if( hot_teams[level].hot_team_nth >= new_nproc ) {
@@ -5014,6 +5041,9 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
for( b = 0; b < bs_last_barrier; ++ b ) {
balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived;
KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
+#if USE_DEBUGGER
+ balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived;
+#endif
}
}
}
@@ -5098,6 +5128,9 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
for( b = 0; b < bs_last_barrier; ++ b ) {
balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived;
KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
+#if USE_DEBUGGER
+ balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived;
+#endif
}
}
}
@@ -5156,6 +5189,10 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
int b;
for ( b = 0; b < bs_last_barrier; ++ b) {
team->t.t_bar[ b ].b_arrived = KMP_INIT_BARRIER_STATE;
+#if USE_DEBUGGER
+ team->t.t_bar[ b ].b_master_arrived = 0;
+ team->t.t_bar[ b ].b_team_arrived = 0;
+#endif
}
}
@@ -5214,6 +5251,10 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
int b;
for ( b = 0; b < bs_last_barrier; ++ b ) {
team->t.t_bar[ b ].b_arrived = KMP_INIT_BARRIER_STATE;
+#if USE_DEBUGGER
+ team->t.t_bar[ b ].b_master_arrived = 0;
+ team->t.t_bar[ b ].b_team_arrived = 0;
+#endif
}
}
diff --git a/openmp/runtime/src/makefile.mk b/openmp/runtime/src/makefile.mk
index 20e261377f9..0210102d85e 100644
--- a/openmp/runtime/src/makefile.mk
+++ b/openmp/runtime/src/makefile.mk
@@ -709,7 +709,8 @@ else # norm or prof
kmp_atomic \
kmp_csupport \
kmp_debug \
- kmp_itt \
+ kmp_debugger \
+ kmp_itt \
$(empty)
ifeq "$(USE_ITT_NOTIFY)" "1"
lib_c_items += ittnotify_static
OpenPOWER on IntegriCloud