summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test/functionalities/thread/backtrace_all/ParallelTask.cpp
blob: 71fb8e3bb565a62ffde163b8be6ddfa1cafbadfe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#include <cstdint>
#include <thread>
#include <vector>
#include <queue>
#include <future>
#include <iostream>
#include <cassert>

class TaskPoolImpl
{
public:
    TaskPoolImpl(uint32_t num_threads) :
        m_stop(false)
    {
        for (uint32_t i = 0; i < num_threads; ++i)
            m_threads.emplace_back(Worker, this);
    }

    ~TaskPoolImpl()
    {
        Stop();
    }

    template<typename F, typename... Args>
    std::future<typename std::result_of<F(Args...)>::type>
    AddTask(F&& f, Args&&... args)
    {
        auto task = std::make_shared<std::packaged_task<typename std::result_of<F(Args...)>::type()>>(
            std::bind(std::forward<F>(f), std::forward<Args>(args)...));

        std::unique_lock<std::mutex> lock(m_tasks_mutex);
        assert(!m_stop && "Can't add task to TaskPool after it is stopped");
        m_tasks.emplace([task](){ (*task)(); });
        lock.unlock();
        m_tasks_cv.notify_one();

        return task->get_future();
    }

    void
    Stop()
    {
        std::unique_lock<std::mutex> lock(m_tasks_mutex);
        m_stop = true;
        m_tasks_mutex.unlock();
        m_tasks_cv.notify_all();
        for (auto& t : m_threads)
            t.join();
    }

private:
    static void
    Worker(TaskPoolImpl* pool)
    {
        while (true)
        {
            std::unique_lock<std::mutex> lock(pool->m_tasks_mutex);
            if (pool->m_tasks.empty())
                pool->m_tasks_cv.wait(lock, [pool](){ return !pool->m_tasks.empty() || pool->m_stop; });
            if (pool->m_tasks.empty())
                break;

            std::function<void()> f = pool->m_tasks.front();
            pool->m_tasks.pop();
            lock.unlock();

            f();
        }
    }

    std::queue<std::function<void()>> m_tasks;
    std::mutex                        m_tasks_mutex;
    std::condition_variable           m_tasks_cv;
    bool                              m_stop;
    std::vector<std::thread>          m_threads;
};

class TaskPool
{
public:
    // Add a new task to the thread pool and return a std::future belongs for the newly created task.
    // The caller of this function have to wait on the future for this task to complete.
    template<typename F, typename... Args>
    static std::future<typename std::result_of<F(Args...)>::type>
    AddTask(F&& f, Args&&... args)
    {
        return GetImplementation().AddTask(std::forward<F>(f), std::forward<Args>(args)...);
    }

    // Run all of the specified tasks on the thread pool and wait until all of them are finished
    // before returning
    template<typename... T>
    static void
    RunTasks(T&&... t)
    {
        RunTaskImpl<T...>::Run(std::forward<T>(t)...);
    }

private:
    static TaskPoolImpl&
    GetImplementation()
    {
        static TaskPoolImpl g_task_pool_impl(std::thread::hardware_concurrency());
        return g_task_pool_impl;
    }

    template<typename... T>
    struct RunTaskImpl;
};

template<typename H, typename... T>
struct TaskPool::RunTaskImpl<H, T...>
{
    static void
    Run(H&& h, T&&... t)
    {
        auto f = AddTask(std::forward<H>(h));
        RunTaskImpl<T...>::Run(std::forward<T>(t)...);
        f.wait();
    }
};

template<>
struct TaskPool::RunTaskImpl<>
{
    static void
    Run() {}
};

int main()
{
    std::vector<std::future<uint32_t>> tasks;
    for (int i = 0; i < 100000; ++i)
    {
        tasks.emplace_back(TaskPool::AddTask([](int i){
            uint32_t s = 0;
            for (int j = 0; j <= i; ++j)
                s += j;
            return s;
        },
        i));
    }

    for (auto& it : tasks)  // Set breakpoint here
        it.wait();

    TaskPool::RunTasks(
        []() { return 1; },
        []() { return "aaaa"; }
    );
}
OpenPOWER on IntegriCloud