diff options
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r-- | kernel/kthread.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index f874300dfce5..524dfc8247d7 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -578,6 +578,9 @@ EXPORT_SYMBOL_GPL(__kthread_init_worker); * The works are not allowed to keep any locks, disable preemption or interrupts * when they finish. There is defined a safe point for freezing when one work * finishes and before a new one is started. + * + * Also the works must not be handled by more than one worker at the same time, + * see also kthread_queue_work(). */ int kthread_worker_fn(void *worker_ptr) { @@ -714,12 +717,21 @@ kthread_create_worker_on_cpu(int cpu, const char namefmt[], ...) } EXPORT_SYMBOL(kthread_create_worker_on_cpu); +static void kthread_insert_work_sanity_check(struct kthread_worker *worker, + struct kthread_work *work) +{ + lockdep_assert_held(&worker->lock); + WARN_ON_ONCE(!list_empty(&work->node)); + /* Do not use a work with >1 worker, see kthread_queue_work() */ + WARN_ON_ONCE(work->worker && work->worker != worker); +} + /* insert @work before @pos in @worker */ static void kthread_insert_work(struct kthread_worker *worker, - struct kthread_work *work, - struct list_head *pos) + struct kthread_work *work, + struct list_head *pos) { - lockdep_assert_held(&worker->lock); + kthread_insert_work_sanity_check(worker, work); list_add_tail(&work->node, pos); work->worker = worker; @@ -735,6 +747,9 @@ static void kthread_insert_work(struct kthread_worker *worker, * Queue @work to work processor @task for async execution. @task * must have been created with kthread_worker_create(). Returns %true * if @work was successfully queued, %false if it was already pending. + * + * Reinitialize the work if it needs to be used by another worker. + * For example, when the worker was stopped and started again. */ bool kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work) @@ -779,16 +794,13 @@ void kthread_flush_work(struct kthread_work *work) struct kthread_worker *worker; bool noop = false; -retry: worker = work->worker; if (!worker) return; spin_lock_irq(&worker->lock); - if (work->worker != worker) { - spin_unlock_irq(&worker->lock); - goto retry; - } + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); if (!list_empty(&work->node)) kthread_insert_work(worker, &fwork.work, work->node.next); |