diff options
Diffstat (limited to 'openmp/runtime/src/kmp_global.cpp')
-rw-r--r-- | openmp/runtime/src/kmp_global.cpp | 537 |
1 files changed, 284 insertions, 253 deletions
diff --git a/openmp/runtime/src/kmp_global.cpp b/openmp/runtime/src/kmp_global.cpp index 623c439c9cb..960c4b6f0d8 100644 --- a/openmp/runtime/src/kmp_global.cpp +++ b/openmp/runtime/src/kmp_global.cpp @@ -19,7 +19,7 @@ kmp_key_t __kmp_gtid_threadprivate_key; #if KMP_ARCH_X86 || KMP_ARCH_X86_64 -kmp_cpuinfo_t __kmp_cpuinfo = { 0 }; // Not initialized +kmp_cpuinfo_t __kmp_cpuinfo = {0}; // Not initialized #endif #if KMP_STATS_ENABLED @@ -27,11 +27,12 @@ kmp_cpuinfo_t __kmp_cpuinfo = { 0 }; // Not initialized // lock for modifying the global __kmp_stats_list kmp_tas_lock_t __kmp_stats_lock; -// global list of per thread stats, the head is a sentinel node which accumulates all stats produced before __kmp_create_worker is called. -kmp_stats_list* __kmp_stats_list; +// global list of per thread stats, the head is a sentinel node which +// accumulates all stats produced before __kmp_create_worker is called. +kmp_stats_list *__kmp_stats_list; // thread local pointer to stats node within list -__thread kmp_stats_list* __kmp_stats_thread_ptr = NULL; +__thread kmp_stats_list *__kmp_stats_thread_ptr = NULL; // gives reference tick for all events (considered the 0 tick) tsc_tick_count __kmp_stats_start_time; @@ -40,176 +41,196 @@ tsc_tick_count __kmp_stats_start_time; /* ----------------------------------------------------- */ /* INITIALIZATION VARIABLES */ /* they are syncronized to write during init, but read anytime */ -volatile int __kmp_init_serial = FALSE; -volatile int __kmp_init_gtid = FALSE; -volatile int __kmp_init_common = FALSE; -volatile int __kmp_init_middle = FALSE; -volatile int __kmp_init_parallel = FALSE; +volatile int __kmp_init_serial = FALSE; +volatile int __kmp_init_gtid = FALSE; +volatile int __kmp_init_common = FALSE; +volatile int __kmp_init_middle = FALSE; +volatile int __kmp_init_parallel = FALSE; #if KMP_USE_MONITOR -volatile int __kmp_init_monitor = 0; /* 1 - launched, 2 - actually started (Windows* OS only) */ +volatile int __kmp_init_monitor = + 0; /* 1 - launched, 2 - actually started (Windows* OS only) */ #endif -volatile int __kmp_init_user_locks = FALSE; +volatile int __kmp_init_user_locks = FALSE; /* list of address of allocated caches for commons */ -kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL; +kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL; -int __kmp_init_counter = 0; -int __kmp_root_counter = 0; -int __kmp_version = 0; +int __kmp_init_counter = 0; +int __kmp_root_counter = 0; +int __kmp_version = 0; -volatile kmp_uint32 __kmp_team_counter = 0; -volatile kmp_uint32 __kmp_task_counter = 0; +volatile kmp_uint32 __kmp_team_counter = 0; +volatile kmp_uint32 __kmp_task_counter = 0; -unsigned int __kmp_init_wait = KMP_DEFAULT_INIT_WAIT; /* initial number of spin-tests */ -unsigned int __kmp_next_wait = KMP_DEFAULT_NEXT_WAIT; /* susequent number of spin-tests */ +unsigned int __kmp_init_wait = + KMP_DEFAULT_INIT_WAIT; /* initial number of spin-tests */ +unsigned int __kmp_next_wait = + KMP_DEFAULT_NEXT_WAIT; /* susequent number of spin-tests */ -size_t __kmp_stksize = KMP_DEFAULT_STKSIZE; +size_t __kmp_stksize = KMP_DEFAULT_STKSIZE; #if KMP_USE_MONITOR -size_t __kmp_monitor_stksize = 0; // auto adjust +size_t __kmp_monitor_stksize = 0; // auto adjust #endif -size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET; -int __kmp_stkpadding = KMP_MIN_STKPADDING; +size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET; +int __kmp_stkpadding = KMP_MIN_STKPADDING; -size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR; +size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR; -/* Barrier method defaults, settings, and strings */ -/* branch factor = 2^branch_bits (only relevant for tree and hyper barrier types) */ +// Barrier method defaults, settings, and strings. +// branch factor = 2^branch_bits (only relevant for tree & hyper barrier types) #if KMP_ARCH_X86_64 -kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; /* branch_factor = 4 */ /* hyper2: C78980 */ -kmp_uint32 __kmp_barrier_release_bb_dflt = 2; /* branch_factor = 4 */ /* hyper2: C78980 */ +kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; +/* branch_factor = 4 */ /* hyper2: C78980 */ +kmp_uint32 __kmp_barrier_release_bb_dflt = 2; +/* branch_factor = 4 */ /* hyper2: C78980 */ #else -kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; /* branch_factor = 4 */ /* communication in core for MIC */ -kmp_uint32 __kmp_barrier_release_bb_dflt = 2; /* branch_factor = 4 */ /* communication in core for MIC */ +kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; +/* branch_factor = 4 */ /* communication in core for MIC */ +kmp_uint32 __kmp_barrier_release_bb_dflt = 2; +/* branch_factor = 4 */ /* communication in core for MIC */ #endif // KMP_ARCH_X86_64 #if KMP_ARCH_X86_64 -kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ -kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ +kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ +kmp_bar_pat_e __kmp_barrier_release_pat_dflt = + bp_hyper_bar; /* hyper2: C78980 */ #else -kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_linear_bar; +kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_linear_bar; kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_linear_bar; #endif -kmp_uint32 __kmp_barrier_gather_branch_bits [ bs_last_barrier ] = { 0 }; -kmp_uint32 __kmp_barrier_release_branch_bits [ bs_last_barrier ] = { 0 }; -kmp_bar_pat_e __kmp_barrier_gather_pattern [ bs_last_barrier ] = { bp_linear_bar }; -kmp_bar_pat_e __kmp_barrier_release_pattern [ bs_last_barrier ] = { bp_linear_bar }; -char const *__kmp_barrier_branch_bit_env_name [ bs_last_barrier ] = - { "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER" - #if KMP_FAST_REDUCTION_BARRIER - , "KMP_REDUCTION_BARRIER" - #endif // KMP_FAST_REDUCTION_BARRIER - }; -char const *__kmp_barrier_pattern_env_name [ bs_last_barrier ] = - { "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN" - #if KMP_FAST_REDUCTION_BARRIER - , "KMP_REDUCTION_BARRIER_PATTERN" - #endif // KMP_FAST_REDUCTION_BARRIER - }; -char const *__kmp_barrier_type_name [ bs_last_barrier ] = - { "plain", "forkjoin" - #if KMP_FAST_REDUCTION_BARRIER - , "reduction" - #endif // KMP_FAST_REDUCTION_BARRIER - }; -char const *__kmp_barrier_pattern_name[bp_last_bar] = {"linear","tree","hyper","hierarchical"}; - -int __kmp_allThreadsSpecified = 0; -size_t __kmp_align_alloc = CACHE_LINE; - - -int __kmp_generate_warnings = kmp_warnings_low; -int __kmp_reserve_warn = 0; -int __kmp_xproc = 0; -int __kmp_avail_proc = 0; -size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE; -int __kmp_sys_max_nth = KMP_MAX_NTH; -int __kmp_max_nth = 0; -int __kmp_threads_capacity = 0; -int __kmp_dflt_team_nth = 0; -int __kmp_dflt_team_nth_ub = 0; -int __kmp_tp_capacity = 0; -int __kmp_tp_cached = 0; -int __kmp_dflt_nested = FALSE; -int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF; -int __kmp_dflt_max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT; /* max_active_levels limit */ +kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier] = {0}; +kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier] = {0}; +kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier] = {bp_linear_bar}; +kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier] = {bp_linear_bar}; +char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier] = { + "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER" +#if KMP_FAST_REDUCTION_BARRIER + , + "KMP_REDUCTION_BARRIER" +#endif // KMP_FAST_REDUCTION_BARRIER +}; +char const *__kmp_barrier_pattern_env_name[bs_last_barrier] = { + "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN" +#if KMP_FAST_REDUCTION_BARRIER + , + "KMP_REDUCTION_BARRIER_PATTERN" +#endif // KMP_FAST_REDUCTION_BARRIER +}; +char const *__kmp_barrier_type_name[bs_last_barrier] = {"plain", "forkjoin" +#if KMP_FAST_REDUCTION_BARRIER + , + "reduction" +#endif // KMP_FAST_REDUCTION_BARRIER +}; +char const *__kmp_barrier_pattern_name[bp_last_bar] = {"linear", "tree", + "hyper", "hierarchical"}; + +int __kmp_allThreadsSpecified = 0; +size_t __kmp_align_alloc = CACHE_LINE; + + +int __kmp_generate_warnings = kmp_warnings_low; +int __kmp_reserve_warn = 0; +int __kmp_xproc = 0; +int __kmp_avail_proc = 0; +size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE; +int __kmp_sys_max_nth = KMP_MAX_NTH; +int __kmp_max_nth = 0; +int __kmp_threads_capacity = 0; +int __kmp_dflt_team_nth = 0; +int __kmp_dflt_team_nth_ub = 0; +int __kmp_tp_capacity = 0; +int __kmp_tp_cached = 0; +int __kmp_dflt_nested = FALSE; +int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF; +int __kmp_dflt_max_active_levels = + KMP_MAX_ACTIVE_LEVELS_LIMIT; /* max_active_levels limit */ #if KMP_NESTED_HOT_TEAMS -int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */ - /* 1 - keep extra threads when reduced */ -int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */ +int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */ +/* 1 - keep extra threads when reduced */ +int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */ #endif enum library_type __kmp_library = library_none; -enum sched_type __kmp_sched = kmp_sch_default; /* scheduling method for runtime scheduling */ -enum sched_type __kmp_static = kmp_sch_static_greedy; /* default static scheduling method */ -enum sched_type __kmp_guided = kmp_sch_guided_iterative_chunked; /* default guided scheduling method */ -enum sched_type __kmp_auto = kmp_sch_guided_analytical_chunked; /* default auto scheduling method */ -int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; +enum sched_type __kmp_sched = + kmp_sch_default; /* scheduling method for runtime scheduling */ +enum sched_type __kmp_static = + kmp_sch_static_greedy; /* default static scheduling method */ +enum sched_type __kmp_guided = + kmp_sch_guided_iterative_chunked; /* default guided scheduling method */ +enum sched_type __kmp_auto = + kmp_sch_guided_analytical_chunked; /* default auto scheduling method */ +int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; #if KMP_USE_MONITOR -int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS; -int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( KMP_DEFAULT_BLOCKTIME, KMP_MIN_MONITOR_WAKEUPS ); +int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS; +int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(KMP_DEFAULT_BLOCKTIME, + KMP_MIN_MONITOR_WAKEUPS); #endif #ifdef KMP_ADJUST_BLOCKTIME -int __kmp_zero_bt = FALSE; +int __kmp_zero_bt = FALSE; #endif /* KMP_ADJUST_BLOCKTIME */ #ifdef KMP_DFLT_NTH_CORES -int __kmp_ncores = 0; +int __kmp_ncores = 0; #endif -int __kmp_chunk = 0; -int __kmp_abort_delay = 0; +int __kmp_chunk = 0; +int __kmp_abort_delay = 0; #if KMP_OS_LINUX && defined(KMP_TDATA_GTID) -int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */ -int __kmp_adjust_gtid_mode = FALSE; +int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */ +int __kmp_adjust_gtid_mode = FALSE; #elif KMP_OS_WINDOWS -int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */ -int __kmp_adjust_gtid_mode = FALSE; +int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */ +int __kmp_adjust_gtid_mode = FALSE; #else -int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */ -int __kmp_adjust_gtid_mode = TRUE; +int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */ +int __kmp_adjust_gtid_mode = TRUE; #endif /* KMP_OS_LINUX && defined(KMP_TDATA_GTID) */ #ifdef KMP_TDATA_GTID #if KMP_OS_WINDOWS __declspec(thread) int __kmp_gtid = KMP_GTID_DNE; #else __thread int __kmp_gtid = KMP_GTID_DNE; -#endif /* KMP_OS_WINDOWS - workaround because Intel(R) Many Integrated Core compiler 20110316 doesn't accept __declspec */ +#endif /* KMP_OS_WINDOWS - workaround because Intel(R) Many Integrated Core \ + compiler 20110316 doesn't accept __declspec */ #endif /* KMP_TDATA_GTID */ -int __kmp_tls_gtid_min = INT_MAX; -int __kmp_foreign_tp = TRUE; +int __kmp_tls_gtid_min = INT_MAX; +int __kmp_foreign_tp = TRUE; #if KMP_ARCH_X86 || KMP_ARCH_X86_64 -int __kmp_inherit_fp_control = TRUE; -kmp_int16 __kmp_init_x87_fpu_control_word = 0; -kmp_uint32 __kmp_init_mxcsr = 0; +int __kmp_inherit_fp_control = TRUE; +kmp_int16 __kmp_init_x87_fpu_control_word = 0; +kmp_uint32 __kmp_init_mxcsr = 0; #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ #ifdef USE_LOAD_BALANCE -double __kmp_load_balance_interval = 1.0; +double __kmp_load_balance_interval = 1.0; #endif /* USE_LOAD_BALANCE */ -kmp_nested_nthreads_t __kmp_nested_nth = { NULL, 0, 0 }; +kmp_nested_nthreads_t __kmp_nested_nth = {NULL, 0, 0}; #if KMP_USE_ADAPTIVE_LOCKS -kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = { 1, 1024 }; // TODO: tune it! +kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = { + 1, 1024}; // TODO: tune it! #if KMP_DEBUG_ADAPTIVE_LOCKS -char * __kmp_speculative_statsfile = "-"; +char *__kmp_speculative_statsfile = "-"; #endif #endif // KMP_USE_ADAPTIVE_LOCKS #if OMP_40_ENABLED -int __kmp_display_env = FALSE; -int __kmp_display_env_verbose = FALSE; -int __kmp_omp_cancellation = FALSE; +int __kmp_display_env = FALSE; +int __kmp_display_env_verbose = FALSE; +int __kmp_omp_cancellation = FALSE; #endif /* map OMP 3.0 schedule types with our internal schedule types */ -enum sched_type __kmp_sch_map[ kmp_sched_upper - kmp_sched_lower_ext + kmp_sched_upper_std - kmp_sched_lower - 2 ] = { - kmp_sch_static_chunked, // ==> kmp_sched_static = 1 - kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic = 2 - kmp_sch_guided_chunked, // ==> kmp_sched_guided = 3 - kmp_sch_auto, // ==> kmp_sched_auto = 4 - kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal = 101 - // will likely not used, introduced here just to debug the code - // of public intel extension schedules +enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext + + kmp_sched_upper_std - kmp_sched_lower - 2] = { + kmp_sch_static_chunked, // ==> kmp_sched_static = 1 + kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic = 2 + kmp_sch_guided_chunked, // ==> kmp_sched_guided = 3 + kmp_sch_auto, // ==> kmp_sched_auto = 4 + kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal = 101 + // will likely not be used, introduced here just to debug the code + // of public intel extension schedules }; #if KMP_OS_LINUX @@ -223,44 +244,45 @@ enum mic_type __kmp_mic_type = non_mic; #if KMP_AFFINITY_SUPPORTED -KMPAffinity* __kmp_affinity_dispatch = NULL; +KMPAffinity *__kmp_affinity_dispatch = NULL; -# if KMP_USE_HWLOC +#if KMP_USE_HWLOC int __kmp_hwloc_error = FALSE; hwloc_topology_t __kmp_hwloc_topology = NULL; -# endif +#endif -# if KMP_OS_WINDOWS -# if KMP_GROUP_AFFINITY +#if KMP_OS_WINDOWS +#if KMP_GROUP_AFFINITY int __kmp_num_proc_groups = 1; -# endif /* KMP_GROUP_AFFINITY */ +#endif /* KMP_GROUP_AFFINITY */ kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL; kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL; kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL; kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL; -# endif /* KMP_OS_WINDOWS */ +#endif /* KMP_OS_WINDOWS */ -size_t __kmp_affin_mask_size = 0; +size_t __kmp_affin_mask_size = 0; enum affinity_type __kmp_affinity_type = affinity_default; enum affinity_gran __kmp_affinity_gran = affinity_gran_default; -int __kmp_affinity_gran_levels = -1; +int __kmp_affinity_gran_levels = -1; int __kmp_affinity_dups = TRUE; -enum affinity_top_method __kmp_affinity_top_method = affinity_top_method_default; -int __kmp_affinity_compact = 0; -int __kmp_affinity_offset = 0; -int __kmp_affinity_verbose = FALSE; -int __kmp_affinity_warnings = TRUE; -int __kmp_affinity_respect_mask = affinity_respect_mask_default; -char * __kmp_affinity_proclist = NULL; +enum affinity_top_method __kmp_affinity_top_method = + affinity_top_method_default; +int __kmp_affinity_compact = 0; +int __kmp_affinity_offset = 0; +int __kmp_affinity_verbose = FALSE; +int __kmp_affinity_warnings = TRUE; +int __kmp_affinity_respect_mask = affinity_respect_mask_default; +char *__kmp_affinity_proclist = NULL; kmp_affin_mask_t *__kmp_affinity_masks = NULL; -unsigned __kmp_affinity_num_masks = 0; +unsigned __kmp_affinity_num_masks = 0; -char const * __kmp_cpuinfo_file = NULL; +char const *__kmp_cpuinfo_file = NULL; #endif /* KMP_AFFINITY_SUPPORTED */ #if OMP_40_ENABLED -kmp_nested_proc_bind_t __kmp_nested_proc_bind = { NULL, 0, 0 }; +kmp_nested_proc_bind_t __kmp_nested_proc_bind = {NULL, 0, 0}; int __kmp_affinity_num_places = 0; #endif @@ -281,75 +303,87 @@ kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams; kmp_int32 __kmp_max_task_priority = 0; #endif -/* This check ensures that the compiler is passing the correct data type - * for the flags formal parameter of the function kmpc_omp_task_alloc(). - * If the type is not a 4-byte type, then give an error message about - * a non-positive length array pointing here. If that happens, the - * kmp_tasking_flags_t structure must be redefined to have exactly 32 bits. - */ -KMP_BUILD_ASSERT( sizeof(kmp_tasking_flags_t) == 4 ); +/* This check ensures that the compiler is passing the correct data type for the + flags formal parameter of the function kmpc_omp_task_alloc(). If the type is + not a 4-byte type, then give an error message about a non-positive length + array pointing here. If that happens, the kmp_tasking_flags_t structure must + be redefined to have exactly 32 bits. */ +KMP_BUILD_ASSERT(sizeof(kmp_tasking_flags_t) == 4); -kmp_int32 __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */ +kmp_int32 __kmp_task_stealing_constraint = + 1; /* Constrain task stealing by default */ #ifdef DEBUG_SUSPEND -int __kmp_suspend_count = 0; +int __kmp_suspend_count = 0; #endif -int __kmp_settings = FALSE; -int __kmp_duplicate_library_ok = 0; +int __kmp_settings = FALSE; +int __kmp_duplicate_library_ok = 0; #if USE_ITT_BUILD -int __kmp_forkjoin_frames = 1; -int __kmp_forkjoin_frames_mode = 3; +int __kmp_forkjoin_frames = 1; +int __kmp_forkjoin_frames_mode = 3; #endif -PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method = reduction_method_not_defined; -int __kmp_determ_red = FALSE; +PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method = + reduction_method_not_defined; +int __kmp_determ_red = FALSE; #ifdef KMP_DEBUG -int kmp_a_debug = 0; -int kmp_b_debug = 0; -int kmp_c_debug = 0; -int kmp_d_debug = 0; -int kmp_e_debug = 0; -int kmp_f_debug = 0; -int kmp_diag = 0; +int kmp_a_debug = 0; +int kmp_b_debug = 0; +int kmp_c_debug = 0; +int kmp_d_debug = 0; +int kmp_e_debug = 0; +int kmp_f_debug = 0; +int kmp_diag = 0; #endif /* For debug information logging using rotating buffer */ -int __kmp_debug_buf = FALSE; /* TRUE means use buffer, FALSE means print to stderr */ -int __kmp_debug_buf_lines = KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */ -int __kmp_debug_buf_chars = KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */ -int __kmp_debug_buf_atomic = FALSE; /* TRUE means use atomic update of buffer entry pointer */ - -char *__kmp_debug_buffer = NULL; /* Debug buffer itself */ -int __kmp_debug_count = 0; /* Counter for number of lines printed in buffer so far */ -int __kmp_debug_buf_warn_chars = 0; /* Keep track of char increase recommended in warnings */ +int __kmp_debug_buf = + FALSE; /* TRUE means use buffer, FALSE means print to stderr */ +int __kmp_debug_buf_lines = + KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */ +int __kmp_debug_buf_chars = + KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */ +int __kmp_debug_buf_atomic = + FALSE; /* TRUE means use atomic update of buffer entry pointer */ + +char *__kmp_debug_buffer = NULL; /* Debug buffer itself */ +int __kmp_debug_count = + 0; /* Counter for number of lines printed in buffer so far */ +int __kmp_debug_buf_warn_chars = + 0; /* Keep track of char increase recommended in warnings */ /* end rotating debug buffer */ #ifdef KMP_DEBUG -int __kmp_par_range; /* +1 => only go par for constructs in range */ - /* -1 => only go par for constructs outside range */ -char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = { '\0' }; -char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = { '\0' }; -int __kmp_par_range_lb = 0; -int __kmp_par_range_ub = INT_MAX; +int __kmp_par_range; /* +1 => only go par for constructs in range */ +/* -1 => only go par for constructs outside range */ +char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = {'\0'}; +char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = {'\0'}; +int __kmp_par_range_lb = 0; +int __kmp_par_range_ub = INT_MAX; #endif /* KMP_DEBUG */ /* For printing out dynamic storage map for threads and teams */ -int __kmp_storage_map = FALSE; /* True means print storage map for threads and teams */ -int __kmp_storage_map_verbose = FALSE; /* True means storage map includes placement info */ -int __kmp_storage_map_verbose_specified = FALSE; -/* Initialize the library data structures when we fork a child process, defaults to TRUE */ -int __kmp_need_register_atfork = TRUE; /* At initialization, call pthread_atfork to install fork handler */ -int __kmp_need_register_atfork_specified = TRUE; - -int __kmp_env_chunk = FALSE; /* KMP_CHUNK specified? */ -int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */ -int __kmp_env_omp_stksize = FALSE; /* OMP_STACKSIZE specified? */ -int __kmp_env_all_threads = FALSE;/* KMP_ALL_THREADS or KMP_MAX_THREADS specified? */ -int __kmp_env_omp_all_threads = FALSE;/* OMP_THREAD_LIMIT specified? */ -int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */ -int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */ -int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */ +int __kmp_storage_map = + FALSE; /* True means print storage map for threads and teams */ +int __kmp_storage_map_verbose = + FALSE; /* True means storage map includes placement info */ +int __kmp_storage_map_verbose_specified = FALSE; +/* Initialize the library data structures when we fork a child process, defaults + * to TRUE */ +int __kmp_need_register_atfork = + TRUE; /* At initialization, call pthread_atfork to install fork handler */ +int __kmp_need_register_atfork_specified = TRUE; + +int __kmp_env_chunk = FALSE; /* KMP_CHUNK specified? */ +int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */ +int __kmp_env_omp_stksize = FALSE; /* OMP_STACKSIZE specified? */ +int __kmp_env_all_threads = + FALSE; /* KMP_ALL_THREADS or KMP_MAX_THREADS specified? */ +int __kmp_env_omp_all_threads = FALSE; /* OMP_THREAD_LIMIT specified? */ +int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */ +int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */ +int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */ kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT; kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT; @@ -360,42 +394,38 @@ kmp_uint32 __kmp_yielding_on = 1; #if KMP_OS_CNK kmp_uint32 __kmp_yield_cycle = 0; #else -kmp_uint32 __kmp_yield_cycle = 1; /* Yield-cycle is on by default */ +kmp_uint32 __kmp_yield_cycle = 1; /* Yield-cycle is on by default */ #endif -kmp_int32 __kmp_yield_on_count = 10; /* By default, yielding is on for 10 monitor periods. */ -kmp_int32 __kmp_yield_off_count = 1; /* By default, yielding is off for 1 monitor periods. */ -/* ----------------------------------------------------- */ - +kmp_int32 __kmp_yield_on_count = + 10; /* By default, yielding is on for 10 monitor periods. */ +kmp_int32 __kmp_yield_off_count = + 1; /* By default, yielding is off for 1 monitor periods. */ /* ------------------------------------------------------ */ /* STATE mostly syncronized with global lock */ /* data written to rarely by masters, read often by workers */ -/* - * SHALL WE EDIT THE COMMENT BELOW IN SOME WAY? - * TODO: None of this global padding stuff works consistently because - * the order of declaration is not necessarily correlated to storage order. - * To fix this, all the important globals must be put in a big structure - * instead. - */ +/* TODO: None of this global padding stuff works consistently because the order + of declaration is not necessarily correlated to storage order. To fix this, + all the important globals must be put in a big structure instead. */ KMP_ALIGN_CACHE - kmp_info_t **__kmp_threads = NULL; - kmp_root_t **__kmp_root = NULL; +kmp_info_t **__kmp_threads = NULL; +kmp_root_t **__kmp_root = NULL; /* data read/written to often by masters */ KMP_ALIGN_CACHE -volatile int __kmp_nth = 0; -volatile int __kmp_all_nth = 0; -int __kmp_thread_pool_nth = 0; -volatile kmp_info_t *__kmp_thread_pool = NULL; -volatile kmp_team_t *__kmp_team_pool = NULL; +volatile int __kmp_nth = 0; +volatile int __kmp_all_nth = 0; +int __kmp_thread_pool_nth = 0; +volatile kmp_info_t *__kmp_thread_pool = NULL; +volatile kmp_team_t *__kmp_team_pool = NULL; KMP_ALIGN_CACHE -volatile int __kmp_thread_pool_active_nth = 0; +volatile int __kmp_thread_pool_active_nth = 0; /* ------------------------------------------------- * GLOBAL/ROOT STATE */ KMP_ALIGN_CACHE -kmp_global_t __kmp_global = {{ 0 }}; +kmp_global_t __kmp_global = {{0}}; /* ----------------------------------------------- */ /* GLOBAL SYNCHRONIZATION LOCKS */ @@ -406,66 +436,72 @@ kmp_global_t __kmp_global = {{ 0 }}; * false sharing if the alignment is not large enough for these locks */ KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_initz_lock ); /* Control initializations */ +kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( + __kmp_initz_lock); /* Control initializations */ KMP_ALIGN_CACHE_INTERNODE kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ +kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ #if KMP_USE_MONITOR KMP_ALIGN_CACHE_INTERNODE kmp_bootstrap_lock_t __kmp_monitor_lock; /* control monitor thread creation */ #endif +/* used for the hack to allow threadprivate cache and __kmp_threads expansion + to co-exist */ KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and __kmp_threads expansion to co-exist */ +kmp_bootstrap_lock_t __kmp_tp_cached_lock; KMP_ALIGN_CACHE_INTERNODE -kmp_lock_t __kmp_global_lock; /* Control OS/global access */ +kmp_lock_t __kmp_global_lock; /* Control OS/global access */ KMP_ALIGN_CACHE_INTERNODE -kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ +kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ KMP_ALIGN_CACHE_INTERNODE -kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ +kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ #else KMP_ALIGN_CACHE -kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_initz_lock ); /* Control initializations */ +kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( + __kmp_initz_lock); /* Control initializations */ kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ -kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ +kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ #if KMP_USE_MONITOR kmp_bootstrap_lock_t __kmp_monitor_lock; /* control monitor thread creation */ #endif -kmp_bootstrap_lock_t __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and __kmp_threads expansion to co-exist */ +/* used for the hack to allow threadprivate cache and __kmp_threads expansion + to co-exist */ +kmp_bootstrap_lock_t __kmp_tp_cached_lock; KMP_ALIGN(128) -kmp_lock_t __kmp_global_lock; /* Control OS/global access */ +kmp_lock_t __kmp_global_lock; /* Control OS/global access */ KMP_ALIGN(128) -kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ +kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ KMP_ALIGN(128) -kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ +kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ #endif /* ----------------------------------------------- */ #if KMP_HANDLE_SIGNALS - /* - Signal handling is disabled by default, because it confuses users: In case of sigsegv - (or other trouble) in user code signal handler catches the signal, which then "appears" in - the monitor thread (when the monitor executes raise() function). Users see signal in the - monitor thread and blame OpenMP RTL. - - Grant said signal handling required on some older OSes (Irix?) supported by KAI, because - bad applications hung but not aborted. Currently it is not a problem for Linux* OS, OS X* and - Windows* OS. - - Grant: Found new hangs for EL4, EL5, and a Fedora Core machine. So I'm putting - the default back for now to see if that fixes hangs on those machines. - - 2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of stack backtrace - when program is aborting, but the code is not signal-safe. When multiple signals raised at - the same time (which occurs in dynamic negative tests because all the worker threads detects - the same error), Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided - by Steve R., and will be available soon. - */ - int __kmp_handle_signals = FALSE; +/* Signal handling is disabled by default, because it confuses users: In case of + sigsegv (or other trouble) in user code signal handler catches the signal, + which then "appears" in the monitor thread (when the monitor executes raise() + function). Users see signal in the monitor thread and blame OpenMP RTL. + + Grant said signal handling required on some older OSes (Irix?) supported by + KAI, because bad applications hung but not aborted. Currently it is not a + problem for Linux* OS, OS X* and Windows* OS. + + Grant: Found new hangs for EL4, EL5, and a Fedora Core machine. So I'm + putting the default back for now to see if that fixes hangs on those + machines. + + 2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of + stack backtrace when program is aborting, but the code is not signal-safe. + When multiple signals raised at the same time (which occurs in dynamic + negative tests because all the worker threads detects the same error), + Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided + by Steve R., and will be available soon. */ +int __kmp_handle_signals = FALSE; #endif /* ----------------------------------------------- */ @@ -474,26 +510,21 @@ kmp_key_t __kmp_tv_key = 0; #endif /* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ #ifdef DEBUG_SUSPEND -int -get_suspend_count_( void ) { - int count = __kmp_suspend_count; - __kmp_suspend_count = 0; - return count; -} -void -set_suspend_count_( int * value ) { - __kmp_suspend_count = *value; +int get_suspend_count_(void) { + int count = __kmp_suspend_count; + __kmp_suspend_count = 0; + return count; } +void set_suspend_count_(int *value) { __kmp_suspend_count = *value; } #endif // Symbols for MS mutual detection. int _You_must_link_with_exactly_one_OpenMP_library = 1; -int _You_must_link_with_Intel_OpenMP_library = 1; -#if KMP_OS_WINDOWS && ( KMP_VERSION_MAJOR > 4 ) - int _You_must_link_with_Microsoft_OpenMP_library = 1; +int _You_must_link_with_Intel_OpenMP_library = 1; +#if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4) +int _You_must_link_with_Microsoft_OpenMP_library = 1; #endif // end of file // |