summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Peyton <jonathan.l.peyton@intel.com>2015-10-20 19:21:04 +0000
committerJonathan Peyton <jonathan.l.peyton@intel.com>2015-10-20 19:21:04 +0000
commit0dd75fdfa96411081ccd94840163460f4dbec0ba (patch)
tree8925aadbb70c245550bac197a5b165e9ad074615
parent55f027b1d4428d3d4e214be28f959465972fa597 (diff)
downloadbcm5719-llvm-0dd75fdfa96411081ccd94840163460f4dbec0ba.tar.gz
bcm5719-llvm-0dd75fdfa96411081ccd94840163460f4dbec0ba.zip
Removed zeroing th.th_task_state for master thread at start of nested parallel.
The th.th_task_state for the master thread at the start of a nested parallel should not be zeroed in __kmp_allocate_team() because it is later put in the stack of states in __kmp_fork_call() for further re-use after exiting the nested region. It is zeroed after being put in the stack. Differential Revision: http://reviews.llvm.org/D13702 llvm-svn: 250847
-rw-r--r--openmp/runtime/src/kmp_runtime.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/openmp/runtime/src/kmp_runtime.c b/openmp/runtime/src/kmp_runtime.c
index 6e6334ea92d..b5b09284975 100644
--- a/openmp/runtime/src/kmp_runtime.c
+++ b/openmp/runtime/src/kmp_runtime.c
@@ -4891,12 +4891,6 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
}
# endif /* KMP_AFFINITY_SUPPORTED */
#endif /* OMP_40_ENABLED */
-
- if (level) {
- for(f = 0; f < new_nproc; ++f) {
- team->t.t_threads[f]->th.th_task_state = 0;
- }
- }
}
else if( team->t.t_nproc > new_nproc ) {
KA_TRACE( 20, ("__kmp_allocate_team: decreasing hot team thread count to %d\n", new_nproc ));
@@ -4959,7 +4953,8 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
/* update the remaining threads */
if (level) {
- for(f = 0; f < new_nproc; ++f) {
+ team->t.t_threads[0]->th.th_team_nproc = new_nproc;
+ for(f = 1; f < new_nproc; ++f) {
team->t.t_threads[f]->th.th_team_nproc = new_nproc;
team->t.t_threads[f]->th.th_task_state = 0;
}
@@ -5100,8 +5095,12 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
/* reinitialize the threads */
KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
if (level) {
+ int old_state = team->t.t_threads[0]->th.th_task_state;
for (f=0; f < team->t.t_nproc; ++f)
__kmp_initialize_info( team->t.t_threads[ f ], team, f, __kmp_gtid_from_tid( f, team ) );
+ // th_task_state for master thread will be put in stack of states in __kmp_fork_call()
+ // before zeroing, for workers it was just zeroed in __kmp_initialize_info()
+ team->t.t_threads[0]->th.th_task_state = old_state;
}
else {
int old_state = team->t.t_threads[0]->th.th_task_state;
@@ -5141,10 +5140,9 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
#endif /* OMP_40_ENABLED */
#if KMP_NESTED_HOT_TEAMS
if( level ) {
- // Sync task (TODO: and barrier?) state for nested hot teams, not needed for outermost hot team.
+ // Sync barrier state for nested hot teams, not needed for outermost hot team.
for( f = 1; f < new_nproc; ++f ) {
kmp_info_t *thr = team->t.t_threads[f];
- thr->th.th_task_state = 0;
int b;
kmp_balign_t * balign = thr->th.th_bar;
for( b = 0; b < bs_last_barrier; ++ b ) {
OpenPOWER on IntegriCloud