From 15316ba81aee6775d6079fb46c66c801989e7d10 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 8 Jan 2006 01:00:43 -0800 Subject: [PATCH] add schedule_on_each_cpu() swap migration's isolate_lru_page() currently uses an IPI to notify other processors that the lru caches need to be drained if the page cannot be found on the LRU. The IPI interrupt may interrupt a processor that is just processing lru requests and cause a race condition. This patch introduces a new function run_on_each_cpu() that uses the keventd() to run the LRU draining on each processor. Processors disable preemption when dealing the LRU caches (these are per processor) and thus executing LRU draining from another process is safe. Thanks to Lee Schermerhorn for finding this race condition. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/workqueue.h | 1 + kernel/workqueue.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ac39d04d027c..86b111300231 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work)); extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); +extern int schedule_on_each_cpu(void (*func)(void *info), void *info); extern void flush_scheduled_work(void); extern int current_is_keventd(void); extern int keventd_up(void); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2bd5aee1c736..62d47220696a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -419,6 +419,25 @@ int schedule_delayed_work_on(int cpu, return ret; } +int schedule_on_each_cpu(void (*func) (void *info), void *info) +{ + int cpu; + struct work_struct *work; + + work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); + + if (!work) + return -ENOMEM; + for_each_online_cpu(cpu) { + INIT_WORK(work + cpu, func, info); + __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), + work + cpu); + } + flush_workqueue(keventd_wq); + kfree(work); + return 0; +} + void flush_scheduled_work(void) { flush_workqueue(keventd_wq); -- cgit v1.2.1