diff options
| author | Jonas Hahnfeld <Hahnfeld@itc.rwth-aachen.de> | 2017-02-15 08:14:22 +0000 |
|---|---|---|
| committer | Jonas Hahnfeld <Hahnfeld@itc.rwth-aachen.de> | 2017-02-15 08:14:22 +0000 |
| commit | 35801a2470e9fe5e4a296f12ff86b95ea411b875 (patch) | |
| tree | f6c4edcc20384224fd4795ed78bbd04fbcba66de /openmp/runtime/src | |
| parent | b8a4f255dd63ed1ed6a1b4a0f8228f86e0db991c (diff) | |
| download | bcm5719-llvm-35801a2470e9fe5e4a296f12ff86b95ea411b875.tar.gz bcm5719-llvm-35801a2470e9fe5e4a296f12ff86b95ea411b875.zip | |
[OpenMP] New Tsan annotations to remove false positive on reduction and barriers
Added new ThreadSanitizer annotations to remove false positives with OpenMP reduction.
Cleaned up Tsan annotations header file from unused annotations.
Patch by Simone Atzeni!
Differential Revision: https://reviews.llvm.org/D29202
llvm-svn: 295158
Diffstat (limited to 'openmp/runtime/src')
| -rw-r--r-- | openmp/runtime/src/kmp_barrier.cpp | 35 | ||||
| -rw-r--r-- | openmp/runtime/src/tsan_annotations.h | 17 |
2 files changed, 31 insertions, 21 deletions
diff --git a/openmp/runtime/src/kmp_barrier.cpp b/openmp/runtime/src/kmp_barrier.cpp index 41062453f92..d08873fdcf3 100644 --- a/openmp/runtime/src/kmp_barrier.cpp +++ b/openmp/runtime/src/kmp_barrier.cpp @@ -74,6 +74,7 @@ __kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid // Mark arrival to master thread /* After performing this write, a worker thread may not assume that the team is valid any more - it could be deallocated by the master thread at any time. */ + ANNOTATE_BARRIER_BEGIN(this_thr); kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[0]); flag.release(); } else { @@ -99,6 +100,7 @@ __kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state); flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(other_threads[i]); #if USE_ITT_BUILD && USE_ITT_NOTIFY // Barrier imbalance - write min of the thread time and the other thread time to the thread. if (__kmp_forkjoin_frames_mode == 2) { @@ -175,6 +177,7 @@ __kmp_linear_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gti &other_threads[i]->th.th_bar[bt].bb.b_go, other_threads[i]->th.th_bar[bt].bb.b_go, other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP)); + ANNOTATE_BARRIER_BEGIN(other_threads[i]); kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_go, other_threads[i]); flag.release(); } @@ -185,6 +188,7 @@ __kmp_linear_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gti kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP); flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(this_thr); #if USE_ITT_BUILD && USE_ITT_NOTIFY if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) { // In a fork barrier; cannot get the object reliably (or ITTNOTIFY is disabled) @@ -268,6 +272,7 @@ __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, kmp_flag_64 flag(&child_bar->b_arrived, new_state); flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(child_thr); #if USE_ITT_BUILD && USE_ITT_NOTIFY // Barrier imbalance - write min of the thread time and a child time to the thread. if (__kmp_forkjoin_frames_mode == 2) { @@ -302,6 +307,7 @@ __kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, // Mark arrival to parent thread /* After performing this write, a worker thread may not assume that the team is valid any more - it could be deallocated by the master thread at any time. */ + ANNOTATE_BARRIER_BEGIN(this_thr); kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[parent_tid]); flag.release(); } else { @@ -340,6 +346,7 @@ __kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP); flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(this_thr); #if USE_ITT_BUILD && USE_ITT_NOTIFY if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) { // In fork barrier where we could not get the object reliably (or ITTNOTIFY is disabled) @@ -408,6 +415,7 @@ __kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, child_tid, &child_bar->b_go, child_bar->b_go, child_bar->b_go + KMP_BARRIER_STATE_BUMP)); // Release child from barrier + ANNOTATE_BARRIER_BEGIN(child_thr); kmp_flag_64 flag(&child_bar->b_go, child_thr); flag.release(); child++; @@ -468,6 +476,7 @@ __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, /* After performing this write (in the last iteration of the enclosing for loop), a worker thread may not assume that the team is valid any more - it could be deallocated by the master thread at any time. */ + ANNOTATE_BARRIER_BEGIN(this_thr); p_flag.set_waiter(other_threads[parent_tid]); p_flag.release(); break; @@ -495,6 +504,7 @@ __kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, kmp_flag_64 c_flag(&child_bar->b_arrived, new_state); c_flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(child_thr); #if USE_ITT_BUILD && USE_ITT_NOTIFY // Barrier imbalance - write min of the thread time and a child time to the thread. if (__kmp_forkjoin_frames_mode == 2) { @@ -568,6 +578,7 @@ __kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP); flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(this_thr); #if USE_ITT_BUILD && USE_ITT_NOTIFY if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) { // In fork barrier where we could not get the object reliably @@ -655,6 +666,7 @@ __kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid child_tid, &child_bar->b_go, child_bar->b_go, child_bar->b_go + KMP_BARRIER_STATE_BUMP)); // Release child from barrier + ANNOTATE_BARRIER_BEGIN(child_thr); kmp_flag_64 flag(&child_bar->b_go, child_thr); flag.release(); } @@ -788,6 +800,7 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n", gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), team->t.t_id, child_tid)); + ANNOTATE_BARRIER_END(other_threads[child_tid]); (*reduce)(this_thr->th.th_local.reduce_data, other_threads[child_tid]->th.th_local.reduce_data); } ANNOTATE_REDUCE_BEFORE(reduce); @@ -809,6 +822,7 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, kmp_flag_64 flag(&child_bar->b_arrived, new_state); flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(child_thr); if (reduce) { KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n", gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), @@ -835,6 +849,7 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, kmp_flag_64 flag(&child_bar->b_arrived, new_state); flag.wait(this_thr, FALSE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(child_thr); if (reduce) { KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n", gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), @@ -859,6 +874,7 @@ __kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, the team is valid any more - it could be deallocated by the master thread at any time. */ if (thr_bar->my_level || __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME || !thr_bar->use_oncore_barrier) { // Parent is waiting on my b_arrived flag; release it + ANNOTATE_BARRIER_BEGIN(this_thr); kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[thr_bar->parent_tid]); flag.release(); } @@ -904,6 +920,7 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP); flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj) ); + ANNOTATE_BARRIER_END(this_thr); TCW_8(thr_bar->b_go, KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time } else { // Thread barrier data is initialized, this is a leaf, blocktime is infinite, not nested @@ -1020,6 +1037,7 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go, child_bar->b_go + KMP_BARRIER_STATE_BUMP)); // Release child using child's b_go flag + ANNOTATE_BARRIER_BEGIN(child_thr); kmp_flag_64 flag(&child_bar->b_go, child_thr); flag.release(); } @@ -1043,6 +1061,7 @@ __kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, i team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go, child_bar->b_go + KMP_BARRIER_STATE_BUMP)); // Release child using child's b_go flag + ANNOTATE_BARRIER_BEGIN(child_thr); kmp_flag_64 flag(&child_bar->b_go, child_thr); flag.release(); } @@ -1082,7 +1101,7 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) has arrived\n", gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid))); - ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar); + ANNOTATE_BARRIER_BEGIN(&team->t.t_bar); #if OMPT_SUPPORT if (ompt_enabled) { #if OMPT_BLAME @@ -1325,7 +1344,7 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size, this_thr->th.ompt_thread_info.state = ompt_state_work_parallel; } #endif - ANNOTATE_NEW_BARRIER_END(&team->t.t_bar); + ANNOTATE_BARRIER_END(&team->t.t_bar); return status; } @@ -1340,7 +1359,7 @@ __kmp_end_split_barrier(enum barrier_type bt, int gtid) kmp_info_t *this_thr = __kmp_threads[gtid]; kmp_team_t *team = this_thr->th.th_team; - ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar); + ANNOTATE_BARRIER_BEGIN(&team->t.t_bar); if (!team->t.t_serialized) { if (KMP_MASTER_GTID(gtid)) { switch (__kmp_barrier_release_pattern[bt]) { @@ -1371,7 +1390,7 @@ __kmp_end_split_barrier(enum barrier_type bt, int gtid) } // if } } - ANNOTATE_NEW_BARRIER_END(&team->t.t_bar); + ANNOTATE_BARRIER_END(&team->t.t_bar); } @@ -1422,7 +1441,7 @@ __kmp_join_barrier(int gtid) KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]); KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n", gtid, team_id, tid)); - ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar); + ANNOTATE_BARRIER_BEGIN(&team->t.t_bar); #if OMPT_SUPPORT #if OMPT_TRACE if (ompt_enabled && @@ -1587,7 +1606,7 @@ __kmp_join_barrier(int gtid) this_thr->th.ompt_thread_info.state = ompt_state_overhead; } #endif - ANNOTATE_NEW_BARRIER_END(&team->t.t_bar); + ANNOTATE_BARRIER_END(&team->t.t_bar); } @@ -1603,7 +1622,7 @@ __kmp_fork_barrier(int gtid, int tid) void * itt_sync_obj = NULL; #endif /* USE_ITT_BUILD */ if (team) - ANNOTATE_NEW_BARRIER_END(&team->t.t_bar); + ANNOTATE_BARRIER_END(&team->t.t_bar); KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) has arrived\n", gtid, (team != NULL) ? team->t.t_id : -1, tid)); @@ -1758,7 +1777,7 @@ __kmp_fork_barrier(int gtid, int tid) } // (prepare called inside barrier_release) } #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */ - ANNOTATE_NEW_BARRIER_END(&team->t.t_bar); + ANNOTATE_BARRIER_END(&team->t.t_bar); KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid, team->t.t_id, tid)); } diff --git a/openmp/runtime/src/tsan_annotations.h b/openmp/runtime/src/tsan_annotations.h index c4624390a89..cacd7ece99d 100644 --- a/openmp/runtime/src/tsan_annotations.h +++ b/openmp/runtime/src/tsan_annotations.h @@ -82,19 +82,10 @@ void AnnotateMemoryIsInitialized(const char *f, int l, uptr mem, uptr sz); #define ANNOTATE_RWLOCK_CREATE(lck) AnnotateRWLockCreate(__FILE__, __LINE__, (uptr)lck) #define ANNOTATE_RWLOCK_RELEASED(lck) AnnotateRWLockAcquired(__FILE__, __LINE__, (uptr)lck, 1) #define ANNOTATE_RWLOCK_ACQUIRED(lck) AnnotateRWLockReleased(__FILE__, __LINE__, (uptr)lck, 1) - -/* new higher level barrier annotations */ -#define ANNOTATE_NEW_BARRIER_BEGIN(addr) AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr) -#define ANNOTATE_NEW_BARRIER_END(addr) AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr) -// #define ANNOTATE_NEW_BARRIER_BEGIN(addr) -// #define ANNOTATE_NEW_BARRIER_END(addr) - - +#define ANNOTATE_BARRIER_BEGIN(addr) AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr) +#define ANNOTATE_BARRIER_END(addr) AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr) #define ANNOTATE_REDUCE_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, (uptr)addr) #define ANNOTATE_REDUCE_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, (uptr)addr) -// #define ANNOTATE_REDUCE_AFTER(addr) -// #define ANNOTATE_REDUCE_BEFORE(addr) - #else #define ANNOTATE_HAPPENS_AFTER(addr) #define ANNOTATE_HAPPENS_BEFORE(addr) @@ -103,8 +94,8 @@ void AnnotateMemoryIsInitialized(const char *f, int l, uptr mem, uptr sz); #define ANNOTATE_RWLOCK_CREATE(lck) #define ANNOTATE_RWLOCK_RELEASED(lck) #define ANNOTATE_RWLOCK_ACQUIRED(lck) -#define ANNOTATE_NEW_BARRIER_BEGIN(addr) -#define ANNOTATE_NEW_BARRIER_END(addr) +#define ANNOTATE_BARRIER_BEGIN(addr) +#define ANNOTATE_BARRIER_END(addr) #define ANNOTATE_REDUCE_AFTER(addr) #define ANNOTATE_REDUCE_BEFORE(addr) #endif |

