diff options
author | Chris Mason <chris.mason@oracle.com> | 2008-05-15 16:15:45 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:04:03 -0400 |
commit | cb03c743c6486e4efb8be6bdf7bf23177826d156 (patch) | |
tree | e5e9cfb8258c55946b0a551b8f89deb7a2b0267a | |
parent | 211c17f51f46dc6c308c742098273dd46b5ca59c (diff) | |
download | blackbird-op-linux-cb03c743c6486e4efb8be6bdf7bf23177826d156.tar.gz blackbird-op-linux-cb03c743c6486e4efb8be6bdf7bf23177826d156.zip |
Btrfs: Change the congestion functions to meter the number of async submits as well
The async submit workqueue was absorbing too many requests, leading to long
stalls where the async submitters were stalling.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r-- | fs/btrfs/ctree.h | 1 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 9 |
2 files changed, 10 insertions, 0 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a9377fb16c37..293464c0c6ba 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -522,6 +522,7 @@ struct btrfs_fs_info { struct work_struct async_submit_work; spinlock_t end_io_work_lock; spinlock_t async_submit_work_lock; + atomic_t nr_async_submits; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) struct work_struct trans_work; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 264f297260f8..373374340e9b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -453,6 +453,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, spin_lock(&fs_info->async_submit_work_lock); list_add_tail(&async->list, &fs_info->async_submit_work_list); + atomic_inc(&fs_info->nr_async_submits); spin_unlock(&fs_info->async_submit_work_lock); queue_work(async_submit_workqueue, &fs_info->async_submit_work); @@ -906,10 +907,16 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; int ret = 0; + int limit = 256 * info->fs_devices->open_devices; struct list_head *cur; struct btrfs_device *device; struct backing_dev_info *bdi; + if ((bdi_bits & (1 << BDI_write_congested)) && + atomic_read(&info->nr_async_submits) > limit) { + return 1; + } + list_for_each(cur, &info->fs_devices->devices) { device = list_entry(cur, struct btrfs_device, dev_list); if (!device->bdev) @@ -1117,6 +1124,7 @@ static void btrfs_async_submit_work(struct work_struct *work) } next = fs_info->async_submit_work_list.next; list_del(next); + atomic_dec(&fs_info->nr_async_submits); spin_unlock(&fs_info->async_submit_work_lock); async = list_entry(next, struct async_submit_bio, list); @@ -1179,6 +1187,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->space_info); btrfs_mapping_init(&fs_info->mapping_tree); + atomic_set(&fs_info->nr_async_submits, 0); fs_info->sb = sb; fs_info->max_extent = (u64)-1; fs_info->max_inline = 8192 * 1024; |