summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp
diff options
context:
space:
mode:
authorRavitheja Addepally <ravitheja.addepally@intel.com>2015-11-03 14:24:24 +0000
committerRavitheja Addepally <ravitheja.addepally@intel.com>2015-11-03 14:24:24 +0000
commit46bcbaafb5467d26814243bf3e7bfb2bbfb78246 (patch)
treeb33c4035b317a3795186ad498f70ed73b9d78949 /lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp
parent0ca1e2c3f92610bbb552e69daaaa0173dbb7a5e2 (diff)
downloadbcm5719-llvm-46bcbaafb5467d26814243bf3e7bfb2bbfb78246.tar.gz
bcm5719-llvm-46bcbaafb5467d26814243bf3e7bfb2bbfb78246.zip
Changes for Bug 25251
Summary: The solution to bug 24074,rL249673 needed to parse the function information from the Dwarf in order to set the SymbolContext. For that, GetFunction was called for the parent in GetTypeForDIE, which parses the ChildParameters and in the flow, GetTypeForDIE was called for one of the sibling die and so an infinite loop was triggered by calling GetFunction repeatedly for the same function. The changes in this revision modify the GetTypeForDIE to only resolve the function context in the Type Lookup flow and so prevent the infinite loop. A testcase has also been added to check for regression in the future and a test vector had been added to the testcase of 24074. Reviewers: jingham, tberghammer, clayborg Differential Revision: http://reviews.llvm.org/D14202 llvm-svn: 251917
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp')
-rwxr-xr-xlldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp151
1 files changed, 151 insertions, 0 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp b/lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp
new file mode 100755
index 00000000000..71fb8e3bb56
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp
@@ -0,0 +1,151 @@
+#include <cstdint>
+#include <thread>
+#include <vector>
+#include <queue>
+#include <future>
+#include <iostream>
+#include <cassert>
+
+class TaskPoolImpl
+{
+public:
+ TaskPoolImpl(uint32_t num_threads) :
+ m_stop(false)
+ {
+ for (uint32_t i = 0; i < num_threads; ++i)
+ m_threads.emplace_back(Worker, this);
+ }
+
+ ~TaskPoolImpl()
+ {
+ Stop();
+ }
+
+ template<typename F, typename... Args>
+ std::future<typename std::result_of<F(Args...)>::type>
+ AddTask(F&& f, Args&&... args)
+ {
+ auto task = std::make_shared<std::packaged_task<typename std::result_of<F(Args...)>::type()>>(
+ std::bind(std::forward<F>(f), std::forward<Args>(args)...));
+
+ std::unique_lock<std::mutex> lock(m_tasks_mutex);
+ assert(!m_stop && "Can't add task to TaskPool after it is stopped");
+ m_tasks.emplace([task](){ (*task)(); });
+ lock.unlock();
+ m_tasks_cv.notify_one();
+
+ return task->get_future();
+ }
+
+ void
+ Stop()
+ {
+ std::unique_lock<std::mutex> lock(m_tasks_mutex);
+ m_stop = true;
+ m_tasks_mutex.unlock();
+ m_tasks_cv.notify_all();
+ for (auto& t : m_threads)
+ t.join();
+ }
+
+private:
+ static void
+ Worker(TaskPoolImpl* pool)
+ {
+ while (true)
+ {
+ std::unique_lock<std::mutex> lock(pool->m_tasks_mutex);
+ if (pool->m_tasks.empty())
+ pool->m_tasks_cv.wait(lock, [pool](){ return !pool->m_tasks.empty() || pool->m_stop; });
+ if (pool->m_tasks.empty())
+ break;
+
+ std::function<void()> f = pool->m_tasks.front();
+ pool->m_tasks.pop();
+ lock.unlock();
+
+ f();
+ }
+ }
+
+ std::queue<std::function<void()>> m_tasks;
+ std::mutex m_tasks_mutex;
+ std::condition_variable m_tasks_cv;
+ bool m_stop;
+ std::vector<std::thread> m_threads;
+};
+
+class TaskPool
+{
+public:
+ // Add a new task to the thread pool and return a std::future belongs for the newly created task.
+ // The caller of this function have to wait on the future for this task to complete.
+ template<typename F, typename... Args>
+ static std::future<typename std::result_of<F(Args...)>::type>
+ AddTask(F&& f, Args&&... args)
+ {
+ return GetImplementation().AddTask(std::forward<F>(f), std::forward<Args>(args)...);
+ }
+
+ // Run all of the specified tasks on the thread pool and wait until all of them are finished
+ // before returning
+ template<typename... T>
+ static void
+ RunTasks(T&&... t)
+ {
+ RunTaskImpl<T...>::Run(std::forward<T>(t)...);
+ }
+
+private:
+ static TaskPoolImpl&
+ GetImplementation()
+ {
+ static TaskPoolImpl g_task_pool_impl(std::thread::hardware_concurrency());
+ return g_task_pool_impl;
+ }
+
+ template<typename... T>
+ struct RunTaskImpl;
+};
+
+template<typename H, typename... T>
+struct TaskPool::RunTaskImpl<H, T...>
+{
+ static void
+ Run(H&& h, T&&... t)
+ {
+ auto f = AddTask(std::forward<H>(h));
+ RunTaskImpl<T...>::Run(std::forward<T>(t)...);
+ f.wait();
+ }
+};
+
+template<>
+struct TaskPool::RunTaskImpl<>
+{
+ static void
+ Run() {}
+};
+
+int main()
+{
+ std::vector<std::future<uint32_t>> tasks;
+ for (int i = 0; i < 100000; ++i)
+ {
+ tasks.emplace_back(TaskPool::AddTask([](int i){
+ uint32_t s = 0;
+ for (int j = 0; j <= i; ++j)
+ s += j;
+ return s;
+ },
+ i));
+ }
+
+ for (auto& it : tasks) // Set breakpoint here
+ it.wait();
+
+ TaskPool::RunTasks(
+ []() { return 1; },
+ []() { return "aaaa"; }
+ );
+}
OpenPOWER on IntegriCloud