summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/block-group.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2019-06-20 15:37:47 -0400
committerDavid Sterba <dsterba@suse.com>2019-09-09 14:59:04 +0200
commit3eeb3226a8891544ea4a9baf27ba3d73e8a42991 (patch)
tree0bc3752557d3c3c21085876227da6908317d26b3 /fs/btrfs/block-group.c
parent3cad128400c2445d9140c0f5720018e075ef66c6 (diff)
downloadtalos-op-linux-3eeb3226a8891544ea4a9baf27ba3d73e8a42991.tar.gz
talos-op-linux-3eeb3226a8891544ea4a9baf27ba3d73e8a42991.zip
btrfs: migrate nocow and reservation helpers
These are relatively straightforward as well. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/block-group.c')
-rw-r--r--fs/btrfs/block-group.c82
1 files changed, 82 insertions, 0 deletions
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 4328196a4d44..1f3afa0b42ba 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2,6 +2,7 @@
#include "ctree.h"
#include "block-group.h"
+#include "space-info.h"
void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
{
@@ -118,3 +119,84 @@ struct btrfs_block_group_cache *btrfs_next_block_group(
spin_unlock(&fs_info->block_group_cache_lock);
return cache;
}
+
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_block_group_cache *bg;
+ bool ret = true;
+
+ bg = btrfs_lookup_block_group(fs_info, bytenr);
+ if (!bg)
+ return false;
+
+ spin_lock(&bg->lock);
+ if (bg->ro)
+ ret = false;
+ else
+ atomic_inc(&bg->nocow_writers);
+ spin_unlock(&bg->lock);
+
+ /* No put on block group, done by btrfs_dec_nocow_writers */
+ if (!ret)
+ btrfs_put_block_group(bg);
+
+ return ret;
+}
+
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
+{
+ struct btrfs_block_group_cache *bg;
+
+ bg = btrfs_lookup_block_group(fs_info, bytenr);
+ ASSERT(bg);
+ if (atomic_dec_and_test(&bg->nocow_writers))
+ wake_up_var(&bg->nocow_writers);
+ /*
+ * Once for our lookup and once for the lookup done by a previous call
+ * to btrfs_inc_nocow_writers()
+ */
+ btrfs_put_block_group(bg);
+ btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
+{
+ wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
+}
+
+void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
+ const u64 start)
+{
+ struct btrfs_block_group_cache *bg;
+
+ bg = btrfs_lookup_block_group(fs_info, start);
+ ASSERT(bg);
+ if (atomic_dec_and_test(&bg->reservations))
+ wake_up_var(&bg->reservations);
+ btrfs_put_block_group(bg);
+}
+
+void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
+{
+ struct btrfs_space_info *space_info = bg->space_info;
+
+ ASSERT(bg->ro);
+
+ if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
+ return;
+
+ /*
+ * Our block group is read only but before we set it to read only,
+ * some task might have had allocated an extent from it already, but it
+ * has not yet created a respective ordered extent (and added it to a
+ * root's list of ordered extents).
+ * Therefore wait for any task currently allocating extents, since the
+ * block group's reservations counter is incremented while a read lock
+ * on the groups' semaphore is held and decremented after releasing
+ * the read access on that semaphore and creating the ordered extent.
+ */
+ down_write(&space_info->groups_sem);
+ up_write(&space_info->groups_sem);
+
+ wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
+}
OpenPOWER on IntegriCloud