diff options
| author | Kostya Serebryany <kcc@google.com> | 2014-02-25 07:34:41 +0000 |
|---|---|---|
| committer | Kostya Serebryany <kcc@google.com> | 2014-02-25 07:34:41 +0000 |
| commit | 6d54611fd4191bddc1907b4e68b19dbaa8fa2173 (patch) | |
| tree | 22946bb8d5093b460b8df8285ab532639e6dfb29 | |
| parent | 27d8b20ee50ffa6e6cad334c3e1d509b63a04dcd (diff) | |
| download | bcm5719-llvm-6d54611fd4191bddc1907b4e68b19dbaa8fa2173.tar.gz bcm5719-llvm-6d54611fd4191bddc1907b4e68b19dbaa8fa2173.zip | |
[sanitizer] fix epoch handling in deadlock detector (before the fix, we could have had edges from locks in the previous epoch to locks in the current epoch)
llvm-svn: 202118
3 files changed, 60 insertions, 20 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h index 0bbaea53ed4..76ca8611b61 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h @@ -42,27 +42,28 @@ class DeadlockDetectorTLS { epoch_ = 0; } + void ensureCurrentEpoch(uptr current_epoch) { + if (epoch_ == current_epoch) return; + bv_.clear(); + epoch_ = current_epoch; + } + void addLock(uptr lock_id, uptr current_epoch) { // Printf("addLock: %zx %zx\n", lock_id, current_epoch); - CHECK_LE(epoch_, current_epoch); - if (current_epoch != epoch_) { - bv_.clear(); - epoch_ = current_epoch; - } + CHECK_EQ(epoch_, current_epoch); CHECK(bv_.setBit(lock_id)); } void removeLock(uptr lock_id, uptr current_epoch) { // Printf("remLock: %zx %zx\n", lock_id, current_epoch); - CHECK_LE(epoch_, current_epoch); - if (current_epoch != epoch_) { - bv_.clear(); - epoch_ = current_epoch; - } + CHECK_EQ(epoch_, current_epoch); bv_.clearBit(lock_id); // May already be cleared due to epoch update. } - const BV &getLocks() const { return bv_; } + const BV &getLocks(uptr current_epoch) const { + CHECK_EQ(epoch_, current_epoch); + return bv_; + } private: BV bv_; @@ -127,12 +128,17 @@ class DeadlockDetector { g_.removeEdgesFrom(idx); } - // Handle the lock event, return true if there is a cycle. + void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) { + dtls->ensureCurrentEpoch(current_epoch_); + } + + // Handles the lock event, returns true if there is a cycle. // FIXME: handle RW locks, recusive locks, etc. bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) { + ensureCurrentEpoch(dtls); uptr cur_idx = nodeToIndex(cur_node); - bool is_reachable = g_.isReachable(cur_idx, dtls->getLocks()); - g_.addEdges(dtls->getLocks(), cur_idx); + bool is_reachable = g_.isReachable(cur_idx, dtls->getLocks(current_epoch_)); + g_.addEdges(dtls->getLocks(current_epoch_), cur_idx); dtls->addLock(cur_idx, current_epoch_); return is_reachable; } @@ -142,7 +148,7 @@ class DeadlockDetector { // or 0 on failure. uptr findPathToHeldLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path, uptr path_size) { - tmp_bv_.copyFrom(dtls->getLocks()); + tmp_bv_.copyFrom(dtls->getLocks(current_epoch_)); uptr idx = nodeToIndex(cur_node); CHECK(tmp_bv_.clearBit(idx)); uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size); @@ -155,14 +161,17 @@ class DeadlockDetector { // Handle the unlock event. void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) { + ensureCurrentEpoch(dtls); dtls->removeLock(nodeToIndex(node), current_epoch_); } bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const { - return dtls->getLocks().getBit(nodeToIndex(node)); + return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node)); } uptr testOnlyGetEpoch() const { return current_epoch_; } + // idx1 and idx2 are raw indices to g_, not lock IDs. + bool testOnlyHasEdge(uptr idx1, uptr idx2) { return g_.hasEdge(idx1, idx2); } void Print() { for (uptr from = 0; from < size(); from++) diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc b/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc index fa9a6a794cc..cc3cbabced8 100644 --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc @@ -281,4 +281,34 @@ TEST(DeadlockDetector, MultipleEpochsTest) { RunMultipleEpochsTest<BV4>(); } +template <class BV> +void RunCorrectEpochFlush() { + ScopedDD<BV> sdd; + DeadlockDetector<BV> &d = *sdd.dp; + DeadlockDetectorTLS<BV> &dtls = sdd.dtls; + vector<uptr> locks1; + for (uptr i = 0; i < d.size(); i++) + locks1.push_back(d.newNode(i)); + EXPECT_EQ(d.testOnlyGetEpoch(), d.size()); + d.onLock(&dtls, locks1[3]); + d.onLock(&dtls, locks1[4]); + d.onLock(&dtls, locks1[5]); + + // We have a new epoch, old locks in dtls will have to be forgotten. + uptr l0 = d.newNode(0); + EXPECT_EQ(d.testOnlyGetEpoch(), d.size() * 2); + uptr l1 = d.newNode(0); + EXPECT_EQ(d.testOnlyGetEpoch(), d.size() * 2); + d.onLock(&dtls, l0); + d.onLock(&dtls, l1); + EXPECT_TRUE(d.testOnlyHasEdge(0, 1)); + EXPECT_FALSE(d.testOnlyHasEdge(1, 0)); + EXPECT_FALSE(d.testOnlyHasEdge(3, 0)); + EXPECT_FALSE(d.testOnlyHasEdge(4, 0)); + EXPECT_FALSE(d.testOnlyHasEdge(5, 0)); +} +TEST(DeadlockDetector, CorrectEpochFlush) { + RunCorrectEpochFlush<BV1>(); + RunCorrectEpochFlush<BV2>(); +} diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc index 74aec4e6afe..89098a885b7 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -22,10 +22,11 @@ namespace __tsan { - -static void EnsureDeadlockDetectorID(Context *ctx, SyncVar *s) { +static void EnsureDeadlockDetectorID(Context *ctx, ThreadState *thr, + SyncVar *s) { if (!ctx->dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id)) s->deadlock_detector_id = ctx->dd.newNode(reinterpret_cast<uptr>(s)); + ctx->dd.ensureCurrentEpoch(&thr->deadlock_detector_tls); } void MutexCreate(ThreadState *thr, uptr pc, uptr addr, @@ -121,7 +122,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); if (common_flags()->detect_deadlocks) { Lock lk(&ctx->dd_mtx); - EnsureDeadlockDetectorID(ctx, s); + EnsureDeadlockDetectorID(ctx, thr, s); if (ctx->dd.isHeld(&thr->deadlock_detector_tls, s->deadlock_detector_id)) { // FIXME: add tests, handle the real recursive locks. Printf("ThreadSanitizer: reursive-lock\n"); @@ -184,7 +185,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { thr->mset.Del(s->GetId(), true); if (common_flags()->detect_deadlocks) { Lock lk(&ctx->dd_mtx); - EnsureDeadlockDetectorID(ctx, s); + EnsureDeadlockDetectorID(ctx, thr, s); // Printf("MutexUnlock: %zx\n", s->deadlock_detector_id); ctx->dd.onUnlock(&thr->deadlock_detector_tls, s->deadlock_detector_id); |

