summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-04 09:23:24 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-04 09:23:24 -0500
commitb51912c91fcf7581cc7b4550f1bb96422809d9ed (patch)
tree3738e358e75088d300b289e013beccffe0af8e05 /fs
parent0279b4cd86685b5eea467c1b74ce94f0add2c0a3 (diff)
downloadblackbird-op-linux-b51912c91fcf7581cc7b4550f1bb96422809d9ed.tar.gz
blackbird-op-linux-b51912c91fcf7581cc7b4550f1bb96422809d9ed.zip
Btrfs: async threads should try harder to find work
Tracing shows the delay between when an async thread goes to sleep and when more work is added is often very short. This commit adds a little bit of delay and extra checking to the code right before we schedule out. It allows more work to be added to the worker without requiring notifications from other procs. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/async-thread.c50
-rw-r--r--fs/btrfs/disk-io.c2
2 files changed, 46 insertions, 6 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index f2e80f3768ec..c84ca1f5259a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -19,7 +19,8 @@
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-# include <linux/freezer.h>
+#include <linux/freezer.h>
+#include <linux/ftrace.h>
#include "async-thread.h"
#define WORK_QUEUED_BIT 0
@@ -142,6 +143,7 @@ static int worker_loop(void *arg)
struct btrfs_work *work;
do {
spin_lock_irq(&worker->lock);
+again_locked:
while (!list_empty(&worker->pending)) {
cur = worker->pending.next;
work = list_entry(cur, struct btrfs_work, list);
@@ -164,14 +166,50 @@ static int worker_loop(void *arg)
check_idle_worker(worker);
}
- worker->working = 0;
if (freezing(current)) {
+ worker->working = 0;
+ spin_unlock_irq(&worker->lock);
refrigerator();
} else {
- set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&worker->lock);
- if (!kthread_should_stop())
+ if (!kthread_should_stop()) {
+ cpu_relax();
+ /*
+ * we've dropped the lock, did someone else
+ * jump_in?
+ */
+ smp_mb();
+ if (!list_empty(&worker->pending))
+ continue;
+
+ /*
+ * this short schedule allows more work to
+ * come in without the queue functions
+ * needing to go through wake_up_process()
+ *
+ * worker->working is still 1, so nobody
+ * is going to try and wake us up
+ */
+ schedule_timeout(1);
+ smp_mb();
+ if (!list_empty(&worker->pending))
+ continue;
+
+ /* still no more work?, sleep for real */
+ spin_lock_irq(&worker->lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!list_empty(&worker->pending))
+ goto again_locked;
+
+ /*
+ * this makes sure we get a wakeup when someone
+ * adds something new to the queue
+ */
+ worker->working = 0;
+ spin_unlock_irq(&worker->lock);
+
schedule();
+ }
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
@@ -355,8 +393,8 @@ int btrfs_requeue_work(struct btrfs_work *work)
goto out;
spin_lock_irqsave(&worker->lock, flags);
- atomic_inc(&worker->num_pending);
list_add_tail(&work->list, &worker->pending);
+ atomic_inc(&worker->num_pending);
/* by definition we're busy, take ourselves off the idle
* list
@@ -405,9 +443,9 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
spin_lock_irqsave(&worker->lock, flags);
+ list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);
check_busy_worker(worker);
- list_add_tail(&work->list, &worker->pending);
/*
* avoid calling into wake_up_process if this thread has already
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7feac5a475e9..9c3810047976 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1679,6 +1679,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
* low idle thresh
*/
fs_info->endio_workers.idle_thresh = 4;
+ fs_info->endio_meta_workers.idle_thresh = 4;
+
fs_info->endio_write_workers.idle_thresh = 64;
fs_info->endio_meta_write_workers.idle_thresh = 64;
OpenPOWER on IntegriCloud