summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test
diff options
context:
space:
mode:
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test')
-rw-r--r--lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile2
-rw-r--r--lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py9
-rw-r--r--lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c80
-rw-r--r--lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp76
-rw-r--r--lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile4
-rw-r--r--lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py30
-rw-r--r--lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp76
7 files changed, 190 insertions, 87 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
index f63adb4f5d2..ee0d4690d83 100644
--- a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
+++ b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
@@ -1,4 +1,4 @@
-C_SOURCES := locking.c
+CXX_SOURCES := locking.cpp
ENABLE_THREADS := YES
include Makefile.rules
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
index 5b5042b63e4..d7d963390b0 100644
--- a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
+++ b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
@@ -16,9 +16,6 @@ class ExprDoesntDeadlockTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17946')
- @expectedFailureAll(
- oslist=["windows"],
- bugnumber="Windows doesn't have pthreads, test needs to be ported")
@add_test_categories(["basic_process"])
def test_with_run_command(self):
"""Test that expr will time out and allow other threads to run if it blocks."""
@@ -32,7 +29,7 @@ class ExprDoesntDeadlockTestCase(TestBase):
# Now create a breakpoint at source line before call_me_to_get_lock
# gets called.
- main_file_spec = lldb.SBFileSpec("locking.c")
+ main_file_spec = lldb.SBFileSpec("locking.cpp")
breakpoint = target.BreakpointCreateBySourceRegex(
'Break here', main_file_spec)
if self.TraceOn():
@@ -55,6 +52,6 @@ class ExprDoesntDeadlockTestCase(TestBase):
frame0 = thread.GetFrameAtIndex(0)
- var = frame0.EvaluateExpression("call_me_to_get_lock()")
+ var = frame0.EvaluateExpression("call_me_to_get_lock(get_int())")
self.assertTrue(var.IsValid())
- self.assertTrue(var.GetValueAsSigned(0) == 567)
+ self.assertEqual(var.GetValueAsSigned(0), 567)
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c
deleted file mode 100644
index fae9979611d..00000000000
--- a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c
+++ /dev/null
@@ -1,80 +0,0 @@
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdio.h>
-
-pthread_mutex_t contended_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-pthread_mutex_t control_mutex = PTHREAD_MUTEX_INITIALIZER;
-pthread_cond_t control_condition;
-
-pthread_mutex_t thread_started_mutex = PTHREAD_MUTEX_INITIALIZER;
-pthread_cond_t thread_started_condition;
-
-// This function runs in a thread. The locking dance is to make sure that
-// by the time the main thread reaches the pthread_join below, this thread
-// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
-// function will block trying to get the mutex, and only succeed once it
-// signals this thread, then lets it run to wake up from the cond_wait and
-// release the mutex.
-
-void *
-lock_acquirer_1 (void *input)
-{
- pthread_mutex_lock (&contended_mutex);
-
- // Grab this mutex, that will ensure that the main thread
- // is in its cond_wait for it (since that's when it drops the mutex.
-
- pthread_mutex_lock (&thread_started_mutex);
- pthread_mutex_unlock(&thread_started_mutex);
-
- // Now signal the main thread that it can continue, we have the contended lock
- // so the call to call_me_to_get_lock won't make any progress till this
- // thread gets a chance to run.
-
- pthread_mutex_lock (&control_mutex);
-
- pthread_cond_signal (&thread_started_condition);
-
- pthread_cond_wait (&control_condition, &control_mutex);
-
- pthread_mutex_unlock (&contended_mutex);
- return NULL;
-}
-
-int
-call_me_to_get_lock ()
-{
- pthread_cond_signal (&control_condition);
- pthread_mutex_lock (&contended_mutex);
- return 567;
-}
-
-int main ()
-{
- pthread_t thread_1;
-
- pthread_cond_init (&control_condition, NULL);
- pthread_cond_init (&thread_started_condition, NULL);
-
- pthread_mutex_lock (&thread_started_mutex);
-
- pthread_create (&thread_1, NULL, lock_acquirer_1, NULL);
-
- pthread_cond_wait (&thread_started_condition, &thread_started_mutex);
-
- pthread_mutex_lock (&control_mutex);
- pthread_mutex_unlock (&control_mutex);
-
- // Break here. At this point the other thread will have the contended_mutex,
- // and be sitting in its cond_wait for the control condition. So there is
- // no way that our by-hand calling of call_me_to_get_lock will proceed
- // without running the first thread at least somewhat.
-
- call_me_to_get_lock();
- pthread_join (thread_1, NULL);
-
- return 0;
-
-}
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
new file mode 100644
index 00000000000..fab3aa8c563
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
@@ -0,0 +1,76 @@
+#include <stdio.h>
+#include <thread>
+
+std::mutex contended_mutex;
+
+std::mutex control_mutex;
+std::condition_variable control_condition;
+
+std::mutex thread_started_mutex;
+std::condition_variable thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void
+lock_acquirer_1 (void)
+{
+ std::unique_lock<std::mutex> contended_lock(contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ thread_started_mutex.lock();
+ thread_started_mutex.unlock();
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ std::unique_lock<std::mutex> control_lock(control_mutex);
+
+ thread_started_condition.notify_all();
+
+ control_condition.wait(control_lock);
+
+}
+
+int
+call_me_to_get_lock (int ret_val)
+{
+ control_condition.notify_all();
+ contended_mutex.lock();
+ return ret_val;
+}
+
+int
+get_int() {
+ return 567;
+}
+
+int main ()
+{
+ std::unique_lock<std::mutex> thread_started_lock(thread_started_mutex);
+
+ std::thread thread_1(lock_acquirer_1);
+
+ thread_started_condition.wait(thread_started_lock);
+
+ control_mutex.lock();
+ control_mutex.unlock();
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ int result = call_me_to_get_lock(get_int());
+ thread_1.join();
+
+ return 0;
+
+}
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
new file mode 100644
index 00000000000..ee0d4690d83
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
@@ -0,0 +1,4 @@
+CXX_SOURCES := locking.cpp
+ENABLE_THREADS := YES
+
+include Makefile.rules
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
new file mode 100644
index 00000000000..988d90a7bb3
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
@@ -0,0 +1,30 @@
+"""
+Test that step over will let other threads run when necessary
+"""
+
+from __future__ import print_function
+
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class StepOverDoesntDeadlockTestCase(TestBase):
+
+ mydir = TestBase.compute_mydir(__file__)
+
+ def test_step_over(self):
+ """Test that when step over steps over a function it lets other threads run."""
+ self.build()
+ (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
+ "without running the first thread at least somewhat",
+ lldb.SBFileSpec("locking.cpp"))
+ # This is just testing that the step over actually completes.
+ # If the test fails this step never return, so failure is really
+ # signaled by the test timing out.
+
+ thread.StepOver()
+ state = process.GetState()
+ self.assertEqual(state, lldb.eStateStopped)
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
new file mode 100644
index 00000000000..fab3aa8c563
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
@@ -0,0 +1,76 @@
+#include <stdio.h>
+#include <thread>
+
+std::mutex contended_mutex;
+
+std::mutex control_mutex;
+std::condition_variable control_condition;
+
+std::mutex thread_started_mutex;
+std::condition_variable thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void
+lock_acquirer_1 (void)
+{
+ std::unique_lock<std::mutex> contended_lock(contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ thread_started_mutex.lock();
+ thread_started_mutex.unlock();
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ std::unique_lock<std::mutex> control_lock(control_mutex);
+
+ thread_started_condition.notify_all();
+
+ control_condition.wait(control_lock);
+
+}
+
+int
+call_me_to_get_lock (int ret_val)
+{
+ control_condition.notify_all();
+ contended_mutex.lock();
+ return ret_val;
+}
+
+int
+get_int() {
+ return 567;
+}
+
+int main ()
+{
+ std::unique_lock<std::mutex> thread_started_lock(thread_started_mutex);
+
+ std::thread thread_1(lock_acquirer_1);
+
+ thread_started_condition.wait(thread_started_lock);
+
+ control_mutex.lock();
+ control_mutex.unlock();
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ int result = call_me_to_get_lock(get_int());
+ thread_1.join();
+
+ return 0;
+
+}
OpenPOWER on IntegriCloud