summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test/lang/c
diff options
context:
space:
mode:
authorJim Ingham <jingham@apple.com>2019-12-16 17:38:13 -0800
committerJim Ingham <jingham@apple.com>2019-12-16 17:45:21 -0800
commit434905b97d961531286d4b49c7ee1969f7cbea0e (patch)
treee56180fd34ddd0d7316f26a5fffd9b4d2005a9c5 /lldb/packages/Python/lldbsuite/test/lang/c
parent53bcd1e1413c878d2d988df80142a430a9abf24a (diff)
downloadbcm5719-llvm-434905b97d961531286d4b49c7ee1969f7cbea0e.tar.gz
bcm5719-llvm-434905b97d961531286d4b49c7ee1969f7cbea0e.zip
Run all threads when extending a next range over a call.
If you don't do this you end up running arbitrary code with only one thread allowed to run, which can cause deadlocks. <rdar://problem/56422478> Differential Revision: https://reviews.llvm.org/D71440
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test/lang/c')
-rw-r--r--lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile4
-rw-r--r--lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py30
-rw-r--r--lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp76
3 files changed, 110 insertions, 0 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
new file mode 100644
index 00000000000..ee0d4690d83
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
@@ -0,0 +1,4 @@
+CXX_SOURCES := locking.cpp
+ENABLE_THREADS := YES
+
+include Makefile.rules
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
new file mode 100644
index 00000000000..988d90a7bb3
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
@@ -0,0 +1,30 @@
+"""
+Test that step over will let other threads run when necessary
+"""
+
+from __future__ import print_function
+
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class StepOverDoesntDeadlockTestCase(TestBase):
+
+ mydir = TestBase.compute_mydir(__file__)
+
+ def test_step_over(self):
+ """Test that when step over steps over a function it lets other threads run."""
+ self.build()
+ (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
+ "without running the first thread at least somewhat",
+ lldb.SBFileSpec("locking.cpp"))
+ # This is just testing that the step over actually completes.
+ # If the test fails this step never return, so failure is really
+ # signaled by the test timing out.
+
+ thread.StepOver()
+ state = process.GetState()
+ self.assertEqual(state, lldb.eStateStopped)
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
new file mode 100644
index 00000000000..fab3aa8c563
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
@@ -0,0 +1,76 @@
+#include <stdio.h>
+#include <thread>
+
+std::mutex contended_mutex;
+
+std::mutex control_mutex;
+std::condition_variable control_condition;
+
+std::mutex thread_started_mutex;
+std::condition_variable thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void
+lock_acquirer_1 (void)
+{
+ std::unique_lock<std::mutex> contended_lock(contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ thread_started_mutex.lock();
+ thread_started_mutex.unlock();
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ std::unique_lock<std::mutex> control_lock(control_mutex);
+
+ thread_started_condition.notify_all();
+
+ control_condition.wait(control_lock);
+
+}
+
+int
+call_me_to_get_lock (int ret_val)
+{
+ control_condition.notify_all();
+ contended_mutex.lock();
+ return ret_val;
+}
+
+int
+get_int() {
+ return 567;
+}
+
+int main ()
+{
+ std::unique_lock<std::mutex> thread_started_lock(thread_started_mutex);
+
+ std::thread thread_1(lock_acquirer_1);
+
+ thread_started_condition.wait(thread_started_lock);
+
+ control_mutex.lock();
+ control_mutex.unlock();
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ int result = call_me_to_get_lock(get_int());
+ thread_1.join();
+
+ return 0;
+
+}
OpenPOWER on IntegriCloud