summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2015-09-29 11:40:47 -0400
committerChris Mason <clm@fb.com>2015-10-21 18:55:19 -0700
commit4f4db2174d8d6cdc093cbb79d17fbfe0f4d9fbde (patch)
treeae850a309ddfe971d0e862aafbfb951163b5a842
parent36af4e0737f6aa494e43497a5a34588a1d5cb12f (diff)
downloadblackbird-obmc-linux-4f4db2174d8d6cdc093cbb79d17fbfe0f4d9fbde.tar.gz
blackbird-obmc-linux-4f4db2174d8d6cdc093cbb79d17fbfe0f4d9fbde.zip
Btrfs: keep track of max_extent_size per space_info
When we are heavily fragmented we can induce a lot of latency trying to make an allocation happen that is simply not going to happen. Thankfully we keep track of our max_extent_size when going through the allocator, so if we get to the point where we are exiting find_free_extent with ENOSPC then set our space_info->max_extent_size so we can keep future allocations from having to pay this cost. We reset the max_extent_size whenever we release pinned bytes back into this space info so we can redo all the work. Thanks, Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/extent-tree.c31
2 files changed, 34 insertions, 1 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 16384231db82..78a8f56c7e88 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1154,6 +1154,10 @@ struct btrfs_space_info {
delalloc/allocations */
u64 bytes_readonly; /* total bytes that are read only */
+ u64 max_extent_size; /* This will hold the maximum extent size of
+ the space info if we had an ENOSPC in the
+ allocator. */
+
unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d6b5ef4b4c72..febb5bc35a64 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3810,6 +3810,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_readonly = 0;
found->bytes_may_use = 0;
found->full = 0;
+ found->max_extent_size = 0;
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
found->chunk_alloc = 0;
found->flush = 0;
@@ -6158,6 +6159,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
spin_lock(&cache->lock);
cache->pinned -= len;
space_info->bytes_pinned -= len;
+ space_info->max_extent_size = 0;
percpu_counter_add(&space_info->total_bytes_pinned, -len);
if (cache->ro) {
space_info->bytes_readonly += len;
@@ -6915,6 +6917,29 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
}
/*
+ * If our free space is heavily fragmented we may not be able to make
+ * big contiguous allocations, so instead of doing the expensive search
+ * for free space, simply return ENOSPC with our max_extent_size so we
+ * can go ahead and search for a more manageable chunk.
+ *
+ * If our max_extent_size is large enough for our allocation simply
+ * disable clustering since we will likely not be able to find enough
+ * space to create a cluster and induce latency trying.
+ */
+ if (unlikely(space_info->max_extent_size)) {
+ spin_lock(&space_info->lock);
+ if (space_info->max_extent_size &&
+ num_bytes > space_info->max_extent_size) {
+ ins->offset = space_info->max_extent_size;
+ spin_unlock(&space_info->lock);
+ return -ENOSPC;
+ } else if (space_info->max_extent_size) {
+ use_cluster = false;
+ }
+ spin_unlock(&space_info->lock);
+ }
+
+ /*
* If the space info is for both data and metadata it means we have a
* small filesystem and we can't use the clustering stuff.
*/
@@ -7287,8 +7312,12 @@ loop:
ret = 0;
}
out:
- if (ret == -ENOSPC)
+ if (ret == -ENOSPC) {
+ spin_lock(&space_info->lock);
+ space_info->max_extent_size = max_extent_size;
+ spin_unlock(&space_info->lock);
ins->offset = max_extent_size;
+ }
return ret;
}
OpenPOWER on IntegriCloud