diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 783 |
1 files changed, 406 insertions, 377 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6fa5e63085d..fb56fedd5c02 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -36,30 +36,20 @@ /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). - * - * The sequence counters are for flush_scheduled_work(). It wants to wait - * until all currently-scheduled works are completed, but it doesn't - * want to be livelocked by new, incoming ones. So it waits until - * remove_sequence is >= the insert_sequence which pertained when - * flush_scheduled_work() was called. */ struct cpu_workqueue_struct { spinlock_t lock; - long remove_sequence; /* Least-recently added (next to run) */ - long insert_sequence; /* Next to add */ - struct list_head worklist; wait_queue_head_t more_work; - wait_queue_head_t work_done; + struct work_struct *current_work; struct workqueue_struct *wq; struct task_struct *thread; + int should_stop; int run_depth; /* Detect run_workqueue() recursion depth */ - - int freezeable; /* Freeze the thread during suspend */ } ____cacheline_aligned; /* @@ -68,8 +58,10 @@ struct cpu_workqueue_struct { */ struct workqueue_struct { struct cpu_workqueue_struct *cpu_wq; + struct list_head list; const char *name; - struct list_head list; /* Empty if single thread */ + int singlethread; + int freezeable; /* Freeze threads during suspend */ }; /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove @@ -77,106 +69,68 @@ struct workqueue_struct { static DEFINE_MUTEX(workqueue_mutex); static LIST_HEAD(workqueues); -static int singlethread_cpu; +static int singlethread_cpu __read_mostly; +static cpumask_t cpu_singlethread_map __read_mostly; +/* optimization, we could use cpu_possible_map */ +static cpumask_t cpu_populated_map __read_mostly; /* If it's single threaded, it isn't in the list of workqueues. */ static inline int is_single_threaded(struct workqueue_struct *wq) { - return list_empty(&wq->list); + return wq->singlethread; +} + +static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) +{ + return is_single_threaded(wq) + ? &cpu_singlethread_map : &cpu_populated_map; +} + +static +struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) +{ + if (unlikely(is_single_threaded(wq))) + cpu = singlethread_cpu; + return per_cpu_ptr(wq->cpu_wq, cpu); } /* * Set the workqueue on which a work item is to be run * - Must *only* be called if the pending flag is set */ -static inline void set_wq_data(struct work_struct *work, void *wq) +static inline void set_wq_data(struct work_struct *work, + struct cpu_workqueue_struct *cwq) { unsigned long new; BUG_ON(!work_pending(work)); - new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); + new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); atomic_long_set(&work->data, new); } -static inline void *get_wq_data(struct work_struct *work) +static inline +struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) { return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } -static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) +static void insert_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work, int tail) { - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&cwq->lock, flags); + set_wq_data(work, cwq); /* - * We need to re-validate the work info after we've gotten - * the cpu_workqueue lock. We can run the work now iff: - * - * - the wq_data still matches the cpu_workqueue_struct - * - AND the work is still marked pending - * - AND the work is still on a list (which will be this - * workqueue_struct list) - * - * All these conditions are important, because we - * need to protect against the work being run right - * now on another CPU (all but the last one might be - * true if it's currently running and has not been - * released yet, for example). + * Ensure that we get the right work->data if we see the + * result of list_add() below, see try_to_grab_pending(). */ - if (get_wq_data(work) == cwq - && work_pending(work) - && !list_empty(&work->entry)) { - work_func_t f = work->func; - list_del_init(&work->entry); - spin_unlock_irqrestore(&cwq->lock, flags); - - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) - work_release(work); - f(work); - - spin_lock_irqsave(&cwq->lock, flags); - cwq->remove_sequence++; - wake_up(&cwq->work_done); - ret = 1; - } - spin_unlock_irqrestore(&cwq->lock, flags); - return ret; -} - -/** - * run_scheduled_work - run scheduled work synchronously - * @work: work to run - * - * This checks if the work was pending, and runs it - * synchronously if so. It returns a boolean to indicate - * whether it had any scheduled work to run or not. - * - * NOTE! This _only_ works for normal work_structs. You - * CANNOT use this for delayed work, because the wq data - * for delayed work will not point properly to the per- - * CPU workqueue struct, but will change! - */ -int fastcall run_scheduled_work(struct work_struct *work) -{ - for (;;) { - struct cpu_workqueue_struct *cwq; - - if (!work_pending(work)) - return 0; - if (list_empty(&work->entry)) - return 0; - /* NOTE! This depends intimately on __queue_work! */ - cwq = get_wq_data(work); - if (!cwq) - return 0; - if (__run_work(cwq, work)) - return 1; - } + smp_wmb(); + if (tail) + list_add_tail(&work->entry, &cwq->worklist); + else + list_add(&work->entry, &cwq->worklist); + wake_up(&cwq->more_work); } -EXPORT_SYMBOL(run_scheduled_work); /* Preempt must be disabled. */ static void __queue_work(struct cpu_workqueue_struct *cwq, @@ -185,10 +139,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, unsigned long flags; spin_lock_irqsave(&cwq->lock, flags); - set_wq_data(work, cwq); - list_add_tail(&work->entry, &cwq->worklist); - cwq->insert_sequence++; - wake_up(&cwq->more_work); + insert_work(cwq, work, 1); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -204,16 +155,14 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, */ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { - int ret = 0, cpu = get_cpu(); + int ret = 0; if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { - if (unlikely(is_single_threaded(wq))) - cpu = singlethread_cpu; BUG_ON(!list_empty(&work->entry)); - __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + __queue_work(wq_per_cpu(wq, get_cpu()), work); + put_cpu(); ret = 1; } - put_cpu(); return ret; } EXPORT_SYMBOL_GPL(queue_work); @@ -221,13 +170,10 @@ EXPORT_SYMBOL_GPL(queue_work); void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; - struct workqueue_struct *wq = get_wq_data(&dwork->work); - int cpu = smp_processor_id(); + struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); + struct workqueue_struct *wq = cwq->wq; - if (unlikely(is_single_threaded(wq))) - cpu = singlethread_cpu; - - __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); + __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); } /** @@ -241,27 +187,11 @@ void delayed_work_timer_fn(unsigned long __data) int fastcall queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { - int ret = 0; - struct timer_list *timer = &dwork->timer; - struct work_struct *work = &dwork->work; - - timer_stats_timer_set_start_info(timer); + timer_stats_timer_set_start_info(&dwork->timer); if (delay == 0) - return queue_work(wq, work); - - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { - BUG_ON(timer_pending(timer)); - BUG_ON(!list_empty(&work->entry)); + return queue_work(wq, &dwork->work); - /* This stores wq for the moment, for the timer_fn */ - set_wq_data(work, wq); - timer->expires = jiffies + delay; - timer->data = (unsigned long)dwork; - timer->function = delayed_work_timer_fn; - add_timer(timer); - ret = 1; - } - return ret; + return queue_delayed_work_on(-1, wq, dwork, delay); } EXPORT_SYMBOL_GPL(queue_delayed_work); @@ -285,12 +215,16 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); - /* This stores wq for the moment, for the timer_fn */ - set_wq_data(work, wq); + /* This stores cwq for the moment, for the timer_fn */ + set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; - add_timer_on(timer, cpu); + + if (unlikely(cpu >= 0)) + add_timer_on(timer, cpu); + else + add_timer(timer); ret = 1; } return ret; @@ -299,13 +233,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); static void run_workqueue(struct cpu_workqueue_struct *cwq) { - unsigned long flags; - - /* - * Keep taking off work from the queue until - * done. - */ - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irq(&cwq->lock); cwq->run_depth++; if (cwq->run_depth > 3) { /* morton gets to eat his hat */ @@ -318,12 +246,12 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) struct work_struct, entry); work_func_t f = work->func; + cwq->current_work = work; list_del_init(cwq->worklist.next); - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irq(&cwq->lock); BUG_ON(get_wq_data(work) != cwq); - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) - work_release(work); + work_clear_pending(work); f(work); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { @@ -337,63 +265,81 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) dump_stack(); } - spin_lock_irqsave(&cwq->lock, flags); - cwq->remove_sequence++; - wake_up(&cwq->work_done); + spin_lock_irq(&cwq->lock); + cwq->current_work = NULL; } cwq->run_depth--; - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irq(&cwq->lock); +} + +/* + * NOTE: the caller must not touch *cwq if this func returns true + */ +static int cwq_should_stop(struct cpu_workqueue_struct *cwq) +{ + int should_stop = cwq->should_stop; + + if (unlikely(should_stop)) { + spin_lock_irq(&cwq->lock); + should_stop = cwq->should_stop && list_empty(&cwq->worklist); + if (should_stop) + cwq->thread = NULL; + spin_unlock_irq(&cwq->lock); + } + + return should_stop; } static int worker_thread(void *__cwq) { struct cpu_workqueue_struct *cwq = __cwq; - DECLARE_WAITQUEUE(wait, current); - struct k_sigaction sa; - sigset_t blocked; + DEFINE_WAIT(wait); - if (!cwq->freezeable) + if (!cwq->wq->freezeable) current->flags |= PF_NOFREEZE; set_user_nice(current, -5); - /* Block and flush all signals */ - sigfillset(&blocked); - sigprocmask(SIG_BLOCK, &blocked, NULL); - flush_signals(current); - - /* - * We inherited MPOL_INTERLEAVE from the booting kernel. - * Set MPOL_DEFAULT to insure node local allocations. - */ - numa_default_policy(); - - /* SIG_IGN makes children autoreap: see do_notify_parent(). */ - sa.sa.sa_handler = SIG_IGN; - sa.sa.sa_flags = 0; - siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); - do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); + for (;;) { + prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); + if (!freezing(current) && !cwq->should_stop + && list_empty(&cwq->worklist)) + schedule(); + finish_wait(&cwq->more_work, &wait); - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - if (cwq->freezeable) - try_to_freeze(); + try_to_freeze(); - add_wait_queue(&cwq->more_work, &wait); - if (list_empty(&cwq->worklist)) - schedule(); - else - __set_current_state(TASK_RUNNING); - remove_wait_queue(&cwq->more_work, &wait); + if (cwq_should_stop(cwq)) + break; - if (!list_empty(&cwq->worklist)) - run_workqueue(cwq); - set_current_state(TASK_INTERRUPTIBLE); + run_workqueue(cwq); } - __set_current_state(TASK_RUNNING); + return 0; } +struct wq_barrier { + struct work_struct work; + struct completion done; +}; + +static void wq_barrier_func(struct work_struct *work) +{ + struct wq_barrier *barr = container_of(work, struct wq_barrier, work); + complete(&barr->done); +} + +static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, + struct wq_barrier *barr, int tail) +{ + INIT_WORK(&barr->work, wq_barrier_func); + __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); + + init_completion(&barr->done); + + insert_work(cwq, &barr->work, tail); +} + static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) { if (cwq->thread == current) { @@ -403,21 +349,18 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ run_workqueue(cwq); } else { - DEFINE_WAIT(wait); - long sequence_needed; + struct wq_barrier barr; + int active = 0; spin_lock_irq(&cwq->lock); - sequence_needed = cwq->insert_sequence; - - while (sequence_needed - cwq->remove_sequence > 0) { - prepare_to_wait(&cwq->work_done, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&cwq->lock); - schedule(); - spin_lock_irq(&cwq->lock); + if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { + insert_wq_barrier(cwq, &barr, 1); + active = 1; } - finish_wait(&cwq->work_done, &wait); spin_unlock_irq(&cwq->lock); + + if (active) + wait_for_completion(&barr.done); } } @@ -428,151 +371,145 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) * Forces execution of the workqueue and blocks until its completion. * This is typically used in driver shutdown handlers. * - * This function will sample each workqueue's current insert_sequence number and - * will sleep until the head sequence is greater than or equal to that. This - * means that we sleep until all works which were queued on entry have been - * handled, but we are not livelocked by new incoming ones. + * We sleep until all works which were queued on entry have been handled, + * but we are not livelocked by new incoming ones. * * This function used to run the workqueues itself. Now we just wait for the * helper threads to do it. */ void fastcall flush_workqueue(struct workqueue_struct *wq) { + const cpumask_t *cpu_map = wq_cpu_map(wq); + int cpu; + might_sleep(); + for_each_cpu_mask(cpu, *cpu_map) + flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); +} +EXPORT_SYMBOL_GPL(flush_workqueue); - if (is_single_threaded(wq)) { - /* Always use first cpu's area. */ - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); - } else { - int cpu; +/* + * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit, + * so this work can't be re-armed in any way. + */ +static int try_to_grab_pending(struct work_struct *work) +{ + struct cpu_workqueue_struct *cwq; + int ret = 0; - mutex_lock(&workqueue_mutex); - for_each_online_cpu(cpu) - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); - mutex_unlock(&workqueue_mutex); + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) + return 1; + + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. + */ + + cwq = get_wq_data(work); + if (!cwq) + return ret; + + spin_lock_irq(&cwq->lock); + if (!list_empty(&work->entry)) { + /* + * This work is queued, but perhaps we locked the wrong cwq. + * In that case we must see the new value after rmb(), see + * insert_work()->wmb(). + */ + smp_rmb(); + if (cwq == get_wq_data(work)) { + list_del_init(&work->entry); + ret = 1; + } } + spin_unlock_irq(&cwq->lock); + + return ret; } -EXPORT_SYMBOL_GPL(flush_workqueue); -static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, - int cpu, int freezeable) +static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work) { - struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - struct task_struct *p; + struct wq_barrier barr; + int running = 0; - spin_lock_init(&cwq->lock); - cwq->wq = wq; - cwq->thread = NULL; - cwq->insert_sequence = 0; - cwq->remove_sequence = 0; - cwq->freezeable = freezeable; - INIT_LIST_HEAD(&cwq->worklist); - init_waitqueue_head(&cwq->more_work); - init_waitqueue_head(&cwq->work_done); + spin_lock_irq(&cwq->lock); + if (unlikely(cwq->current_work == work)) { + insert_wq_barrier(cwq, &barr, 0); + running = 1; + } + spin_unlock_irq(&cwq->lock); - if (is_single_threaded(wq)) - p = kthread_create(worker_thread, cwq, "%s", wq->name); - else - p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); - if (IS_ERR(p)) - return NULL; - cwq->thread = p; - return p; + if (unlikely(running)) + wait_for_completion(&barr.done); } -struct workqueue_struct *__create_workqueue(const char *name, - int singlethread, int freezeable) +static void wait_on_work(struct work_struct *work) { - int cpu, destroy = 0; + struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - struct task_struct *p; + const cpumask_t *cpu_map; + int cpu; - wq = kzalloc(sizeof(*wq), GFP_KERNEL); - if (!wq) - return NULL; + might_sleep(); - wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); - if (!wq->cpu_wq) { - kfree(wq); - return NULL; - } + cwq = get_wq_data(work); + if (!cwq) + return; - wq->name = name; - mutex_lock(&workqueue_mutex); - if (singlethread) { - INIT_LIST_HEAD(&wq->list); - p = create_workqueue_thread(wq, singlethread_cpu, freezeable); - if (!p) - destroy = 1; - else - wake_up_process(p); - } else { - list_add(&wq->list, &workqueues); - for_each_online_cpu(cpu) { - p = create_workqueue_thread(wq, cpu, freezeable); - if (p) { - kthread_bind(p, cpu); - wake_up_process(p); - } else - destroy = 1; - } - } - mutex_unlock(&workqueue_mutex); + wq = cwq->wq; + cpu_map = wq_cpu_map(wq); - /* - * Was there any error during startup? If yes then clean up: - */ - if (destroy) { - destroy_workqueue(wq); - wq = NULL; - } - return wq; + for_each_cpu_mask(cpu, *cpu_map) + wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } -EXPORT_SYMBOL_GPL(__create_workqueue); -static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) +/** + * cancel_work_sync - block until a work_struct's callback has terminated + * @work: the work which is to be flushed + * + * cancel_work_sync() will cancel the work if it is queued. If the work's + * callback appears to be running, cancel_work_sync() will block until it + * has completed. + * + * It is possible to use this function if the work re-queues itself. It can + * cancel the work even if it migrates to another workqueue, however in that + * case it only guarantees that work->func() has completed on the last queued + * workqueue. + * + * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not + * pending, otherwise it goes into a busy-wait loop until the timer expires. + * + * The caller must ensure that workqueue_struct on which this work was last + * queued can't be destroyed before this function returns. + */ +void cancel_work_sync(struct work_struct *work) { - struct cpu_workqueue_struct *cwq; - unsigned long flags; - struct task_struct *p; - - cwq = per_cpu_ptr(wq->cpu_wq, cpu); - spin_lock_irqsave(&cwq->lock, flags); - p = cwq->thread; - cwq->thread = NULL; - spin_unlock_irqrestore(&cwq->lock, flags); - if (p) - kthread_stop(p); + while (!try_to_grab_pending(work)) + cpu_relax(); + wait_on_work(work); + work_clear_pending(work); } +EXPORT_SYMBOL_GPL(cancel_work_sync); /** - * destroy_workqueue - safely terminate a workqueue - * @wq: target workqueue + * cancel_rearming_delayed_work - reliably kill off a delayed work. + * @dwork: the delayed work struct * - * Safely destroy a workqueue. All work currently pending will be done first. + * It is possible to use this function if @dwork rearms itself via queue_work() + * or queue_delayed_work(). See also the comment for cancel_work_sync(). */ -void destroy_workqueue(struct workqueue_struct *wq) +void cancel_rearming_delayed_work(struct delayed_work *dwork) { - int cpu; - - flush_workqueue(wq); - - /* We don't need the distraction of CPUs appearing and vanishing. */ - mutex_lock(&workqueue_mutex); - if (is_single_threaded(wq)) - cleanup_workqueue_thread(wq, singlethread_cpu); - else { - for_each_online_cpu(cpu) - cleanup_workqueue_thread(wq, cpu); - list_del(&wq->list); - } - mutex_unlock(&workqueue_mutex); - free_percpu(wq->cpu_wq); - kfree(wq); + while (!del_timer(&dwork->timer) && + !try_to_grab_pending(&dwork->work)) + cpu_relax(); + wait_on_work(&dwork->work); + work_clear_pending(&dwork->work); } -EXPORT_SYMBOL_GPL(destroy_workqueue); +EXPORT_SYMBOL(cancel_rearming_delayed_work); -static struct workqueue_struct *keventd_wq; +static struct workqueue_struct *keventd_wq __read_mostly; /** * schedule_work - put work task in global workqueue @@ -638,7 +575,7 @@ int schedule_on_each_cpu(work_func_t func) if (!works) return -ENOMEM; - mutex_lock(&workqueue_mutex); + preempt_disable(); /* CPU hotplug */ for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); @@ -646,7 +583,7 @@ int schedule_on_each_cpu(work_func_t func) set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); } - mutex_unlock(&workqueue_mutex); + preempt_enable(); flush_workqueue(keventd_wq); free_percpu(works); return 0; @@ -659,29 +596,6 @@ void flush_scheduled_work(void) EXPORT_SYMBOL(flush_scheduled_work); /** - * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. - * @wq: the controlling workqueue structure - * @dwork: the delayed work struct - */ -void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, - struct delayed_work *dwork) -{ - while (!cancel_delayed_work(dwork)) - flush_workqueue(wq); -} -EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); - -/** - * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. - * @dwork: the delayed work struct - */ -void cancel_rearming_delayed_work(struct delayed_work *dwork) -{ - cancel_rearming_delayed_workqueue(keventd_wq, dwork); -} -EXPORT_SYMBOL(cancel_rearming_delayed_work); - -/** * execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute * @ew: guaranteed storage for the execute work structure (must @@ -728,94 +642,209 @@ int current_is_keventd(void) } -/* Take the work from this (downed) CPU. */ -static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) +static struct cpu_workqueue_struct * +init_cpu_workqueue(struct workqueue_struct *wq, int cpu) { struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - struct list_head list; - struct work_struct *work; - spin_lock_irq(&cwq->lock); - list_replace_init(&cwq->worklist, &list); + cwq->wq = wq; + spin_lock_init(&cwq->lock); + INIT_LIST_HEAD(&cwq->worklist); + init_waitqueue_head(&cwq->more_work); + + return cwq; +} + +static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) +{ + struct workqueue_struct *wq = cwq->wq; + const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; + struct task_struct *p; + + p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); + /* + * Nobody can add the work_struct to this cwq, + * if (caller is __create_workqueue) + * nobody should see this wq + * else // caller is CPU_UP_PREPARE + * cpu is not on cpu_online_map + * so we can abort safely. + */ + if (IS_ERR(p)) + return PTR_ERR(p); + + cwq->thread = p; + cwq->should_stop = 0; + + return 0; +} + +static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) +{ + struct task_struct *p = cwq->thread; - while (!list_empty(&list)) { - printk("Taking work for %s\n", wq->name); - work = list_entry(list.next,struct work_struct,entry); - list_del(&work->entry); - __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); + if (p != NULL) { + if (cpu >= 0) + kthread_bind(p, cpu); + wake_up_process(p); } - spin_unlock_irq(&cwq->lock); } -/* We're holding the cpucontrol mutex here */ -static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +struct workqueue_struct *__create_workqueue(const char *name, + int singlethread, int freezeable) { - unsigned int hotcpu = (unsigned long)hcpu; struct workqueue_struct *wq; + struct cpu_workqueue_struct *cwq; + int err = 0, cpu; - switch (action) { - case CPU_UP_PREPARE: - mutex_lock(&workqueue_mutex); - /* Create a new workqueue thread for it. */ - list_for_each_entry(wq, &workqueues, list) { - if (!create_workqueue_thread(wq, hotcpu, 0)) { - printk("workqueue for %i failed\n", hotcpu); - return NOTIFY_BAD; - } - } - break; + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return NULL; - case CPU_ONLINE: - /* Kick off worker threads. */ - list_for_each_entry(wq, &workqueues, list) { - struct cpu_workqueue_struct *cwq; + wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); + if (!wq->cpu_wq) { + kfree(wq); + return NULL; + } - cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); - kthread_bind(cwq->thread, hotcpu); - wake_up_process(cwq->thread); - } - mutex_unlock(&workqueue_mutex); - break; + wq->name = name; + wq->singlethread = singlethread; + wq->freezeable = freezeable; + INIT_LIST_HEAD(&wq->list); - case CPU_UP_CANCELED: - list_for_each_entry(wq, &workqueues, list) { - if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) + if (singlethread) { + cwq = init_cpu_workqueue(wq, singlethread_cpu); + err = create_workqueue_thread(cwq, singlethread_cpu); + start_workqueue_thread(cwq, -1); + } else { + mutex_lock(&workqueue_mutex); + list_add(&wq->list, &workqueues); + + for_each_possible_cpu(cpu) { + cwq = init_cpu_workqueue(wq, cpu); + if (err || !cpu_online(cpu)) continue; - /* Unbind so it can run. */ - kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, - any_online_cpu(cpu_online_map)); - cleanup_workqueue_thread(wq, hotcpu); + err = create_workqueue_thread(cwq, cpu); + start_workqueue_thread(cwq, cpu); } mutex_unlock(&workqueue_mutex); - break; + } + + if (err) { + destroy_workqueue(wq); + wq = NULL; + } + return wq; +} +EXPORT_SYMBOL_GPL(__create_workqueue); + +static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) +{ + struct wq_barrier barr; + int alive = 0; + + spin_lock_irq(&cwq->lock); + if (cwq->thread != NULL) { + insert_wq_barrier(cwq, &barr, 1); + cwq->should_stop = 1; + alive = 1; + } + spin_unlock_irq(&cwq->lock); + + if (alive) { + wait_for_completion(&barr.done); - case CPU_DOWN_PREPARE: + while (unlikely(cwq->thread != NULL)) + cpu_relax(); + /* + * Wait until cwq->thread unlocks cwq->lock, + * it won't touch *cwq after that. + */ + smp_rmb(); + spin_unlock_wait(&cwq->lock); + } +} + +/** + * destroy_workqueue - safely terminate a workqueue + * @wq: target workqueue + * + * Safely destroy a workqueue. All work currently pending will be done first. + */ +void destroy_workqueue(struct workqueue_struct *wq) +{ + const cpumask_t *cpu_map = wq_cpu_map(wq); + struct cpu_workqueue_struct *cwq; + int cpu; + + mutex_lock(&workqueue_mutex); + list_del(&wq->list); + mutex_unlock(&workqueue_mutex); + + for_each_cpu_mask(cpu, *cpu_map) { + cwq = per_cpu_ptr(wq->cpu_wq, cpu); + cleanup_workqueue_thread(cwq, cpu); + } + + free_percpu(wq->cpu_wq); + kfree(wq); +} +EXPORT_SYMBOL_GPL(destroy_workqueue); + +static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct cpu_workqueue_struct *cwq; + struct workqueue_struct *wq; + + action &= ~CPU_TASKS_FROZEN; + + switch (action) { + case CPU_LOCK_ACQUIRE: mutex_lock(&workqueue_mutex); - break; + return NOTIFY_OK; - case CPU_DOWN_FAILED: + case CPU_LOCK_RELEASE: mutex_unlock(&workqueue_mutex); - break; + return NOTIFY_OK; - case CPU_DEAD: - list_for_each_entry(wq, &workqueues, list) - cleanup_workqueue_thread(wq, hotcpu); - list_for_each_entry(wq, &workqueues, list) - take_over_work(wq, hotcpu); - mutex_unlock(&workqueue_mutex); - break; + case CPU_UP_PREPARE: + cpu_set(cpu, cpu_populated_map); + } + + list_for_each_entry(wq, &workqueues, list) { + cwq = per_cpu_ptr(wq->cpu_wq, cpu); + + switch (action) { + case CPU_UP_PREPARE: + if (!create_workqueue_thread(cwq, cpu)) + break; + printk(KERN_ERR "workqueue for %i failed\n", cpu); + return NOTIFY_BAD; + + case CPU_ONLINE: + start_workqueue_thread(cwq, cpu); + break; + + case CPU_UP_CANCELED: + start_workqueue_thread(cwq, -1); + case CPU_DEAD: + cleanup_workqueue_thread(cwq, cpu); + break; + } } return NOTIFY_OK; } -void init_workqueues(void) +void __init init_workqueues(void) { + cpu_populated_map = cpu_online_map; singlethread_cpu = first_cpu(cpu_possible_map); + cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); } - |