diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/Makefile | 2 | ||||
-rw-r--r-- | fs/btrfs/acl.c | 5 | ||||
-rw-r--r-- | fs/btrfs/btrfs_inode.h | 14 | ||||
-rw-r--r-- | fs/btrfs/compression.c | 5 | ||||
-rw-r--r-- | fs/btrfs/ctree.c | 18 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 58 | ||||
-rw-r--r-- | fs/btrfs/delayed-inode.c | 1695 | ||||
-rw-r--r-- | fs/btrfs/delayed-inode.h | 141 | ||||
-rw-r--r-- | fs/btrfs/dir-item.c | 34 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 69 | ||||
-rw-r--r-- | fs/btrfs/disk-io.h | 1 | ||||
-rw-r--r-- | fs/btrfs/export.c | 25 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 95 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 4 | ||||
-rw-r--r-- | fs/btrfs/file-item.c | 5 | ||||
-rw-r--r-- | fs/btrfs/file.c | 27 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 973 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.h | 47 | ||||
-rw-r--r-- | fs/btrfs/inode-map.c | 428 | ||||
-rw-r--r-- | fs/btrfs/inode-map.h | 13 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 405 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 46 | ||||
-rw-r--r-- | fs/btrfs/relocation.c | 27 | ||||
-rw-r--r-- | fs/btrfs/super.c | 10 | ||||
-rw-r--r-- | fs/btrfs/sysfs.c | 12 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 56 | ||||
-rw-r--r-- | fs/btrfs/transaction.h | 2 | ||||
-rw-r--r-- | fs/btrfs/tree-log.c | 61 | ||||
-rw-r--r-- | fs/btrfs/xattr.c | 8 |
29 files changed, 3533 insertions, 753 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73aec..a8411c22313d 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o delayed-inode.o diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 1a21c99a91b8..f66fc9959733 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { ret = posix_acl_valid(acl); if (ret) goto out; - } else if (IS_ERR(acl)) { - return PTR_ERR(acl); } } diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..d0b0e43a6a8b 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -22,6 +22,7 @@ #include "extent_map.h" #include "extent_io.h" #include "ordered-data.h" +#include "delayed-inode.h" /* in memory btrfs inode */ struct btrfs_inode { @@ -158,14 +159,27 @@ struct btrfs_inode { */ unsigned force_compress:4; + struct btrfs_delayed_node *delayed_node; + struct inode vfs_inode; }; +extern unsigned char btrfs_filetype_table[]; + static inline struct btrfs_inode *BTRFS_I(struct inode *inode) { return container_of(inode, struct btrfs_inode, vfs_inode); } +static inline u64 btrfs_ino(struct inode *inode) +{ + u64 ino = BTRFS_I(inode)->location.objectid; + + if (ino <= BTRFS_FIRST_FREE_OBJECTID) + ino = inode->i_ino; + return ino; +} + static inline void btrfs_i_size_write(struct inode *inode, u64 size) { i_size_write(inode, size); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d4cd0f0cd695..bfe42b03eaf9 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr, KM_USER0); if (csum != *cb_sum) { - printk(KERN_INFO "btrfs csum failed ino %lu " + printk(KERN_INFO "btrfs csum failed ino %llu " "extent %llu csum %u " - "wanted %u mirror %d\n", inode->i_ino, + "wanted %u mirror %d\n", + (unsigned long long)btrfs_ino(inode), (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); ret = -EIO; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index fad8f23d70f0..b6cbeed226b1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans, struct extent_buffer *src_buf); static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); -static int setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr); - struct btrfs_path *btrfs_alloc_path(void) { @@ -74,8 +69,8 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p) * retake all the spinlocks in the path. You can safely use NULL * for held */ -static noinline void btrfs_clear_path_blocking(struct btrfs_path *p, - struct extent_buffer *held) +noinline void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held) { int i; @@ -3559,11 +3554,10 @@ out: * to save stack depth by doing the bulk of the work in a function * that doesn't call btrfs_search_slot */ -static noinline_for_stack int -setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr) +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 343304dec6d1..e7d40791ec9f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -105,6 +105,12 @@ struct btrfs_ordered_sum; /* For storing free space cache */ #define BTRFS_FREE_SPACE_OBJECTID -11ULL +/* + * The inode number assigned to the special inode for sotring + * free ino cache + */ +#define BTRFS_FREE_INO_OBJECTID -12ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -830,9 +836,6 @@ struct btrfs_block_group_cache { u64 bytes_super; u64 flags; u64 sectorsize; - int extents_thresh; - int free_extents; - int total_bitmaps; unsigned int ro:1; unsigned int dirty:1; unsigned int iref:1; @@ -847,9 +850,7 @@ struct btrfs_block_group_cache { struct btrfs_space_info *space_info; /* free space cache stuff */ - spinlock_t tree_lock; - struct rb_root free_space_offset; - u64 free_space; + struct btrfs_free_space_ctl *free_space_ctl; /* block group cache stuff */ struct rb_node cache_node; @@ -869,6 +870,7 @@ struct btrfs_block_group_cache { struct reloc_control; struct btrfs_device; struct btrfs_fs_devices; +struct btrfs_delayed_root; struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; @@ -895,7 +897,10 @@ struct btrfs_fs_info { /* logical->physical extent mapping */ struct btrfs_mapping_tree mapping_tree; - /* block reservation for extent, checksum and root tree */ + /* + * block reservation for extent, checksum, root tree and + * delayed dir index item + */ struct btrfs_block_rsv global_block_rsv; /* block reservation for delay allocation */ struct btrfs_block_rsv delalloc_block_rsv; @@ -1022,6 +1027,7 @@ struct btrfs_fs_info { * for the sys_munmap function call path */ struct btrfs_workers fixup_workers; + struct btrfs_workers delayed_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; int thread_pool_size; @@ -1079,6 +1085,8 @@ struct btrfs_fs_info { /* filesystem state */ u64 fs_state; + + struct btrfs_delayed_root *delayed_root; }; /* @@ -1107,6 +1115,16 @@ struct btrfs_root { spinlock_t accounting_lock; struct btrfs_block_rsv *block_rsv; + /* free ino cache stuff */ + struct mutex fs_commit_mutex; + struct btrfs_free_space_ctl *free_ino_ctl; + enum btrfs_caching_type cached; + spinlock_t cache_lock; + wait_queue_head_t cache_wait; + struct btrfs_free_space_ctl *free_ino_pinned; + u64 cache_progress; + struct inode *cache_inode; + struct mutex log_mutex; wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; @@ -1162,6 +1180,11 @@ struct btrfs_root { struct rb_root inode_tree; /* + * radix tree that keeps track of delayed nodes of every inode, + * protected by inode_lock + */ + struct radix_tree_root delayed_nodes_tree; + /* * right now this just gets used so that a root has its own devid * for stat. It may be used for more later */ @@ -2034,6 +2057,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) } /* extent-tree.c */ +static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, + int num_items) +{ + return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * + 3 * num_items; +} + void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); @@ -2226,6 +2256,8 @@ void btrfs_release_path(struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_set_path_blocking(struct btrfs_path *p); +void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held); void btrfs_unlock_up_safe(struct btrfs_path *p, int level); int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -2237,6 +2269,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, return btrfs_del_items(trans, root, path, path->slots[0], 1); } +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr); int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, @@ -2293,7 +2329,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item); /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, u64 dir, + int name_len, struct inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -2338,12 +2374,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset); int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); -/* inode-map.c */ -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *fs_root, - u64 dirid, u64 *objectid); -int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid); - /* inode-item.c */ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c new file mode 100644 index 000000000000..01e29503a54b --- /dev/null +++ b/fs/btrfs/delayed-inode.c @@ -0,0 +1,1695 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie <miaox@cn.fujitsu.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/slab.h> +#include "delayed-inode.h" +#include "disk-io.h" +#include "transaction.h" + +#define BTRFS_DELAYED_WRITEBACK 400 +#define BTRFS_DELAYED_BACKGROUND 100 + +static struct kmem_cache *delayed_node_cache; + +int __init btrfs_delayed_inode_init(void) +{ + delayed_node_cache = kmem_cache_create("delayed_node", + sizeof(struct btrfs_delayed_node), + 0, + SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, + NULL); + if (!delayed_node_cache) + return -ENOMEM; + return 0; +} + +void btrfs_delayed_inode_exit(void) +{ + if (delayed_node_cache) + kmem_cache_destroy(delayed_node_cache); +} + +static inline void btrfs_init_delayed_node( + struct btrfs_delayed_node *delayed_node, + struct btrfs_root *root, u64 inode_id) +{ + delayed_node->root = root; + delayed_node->inode_id = inode_id; + atomic_set(&delayed_node->refs, 0); + delayed_node->count = 0; + delayed_node->in_list = 0; + delayed_node->inode_dirty = 0; + delayed_node->ins_root = RB_ROOT; + delayed_node->del_root = RB_ROOT; + mutex_init(&delayed_node->mutex); + delayed_node->index_cnt = 0; + INIT_LIST_HEAD(&delayed_node->n_list); + INIT_LIST_HEAD(&delayed_node->p_list); + delayed_node->bytes_reserved = 0; +} + +static inline int btrfs_is_continuous_delayed_item( + struct btrfs_delayed_item *item1, + struct btrfs_delayed_item *item2) +{ + if (item1->key.type == BTRFS_DIR_INDEX_KEY && + item1->key.objectid == item2->key.objectid && + item1->key.type == item2->key.type && + item1->key.offset + 1 == item2->key.offset) + return 1; + return 0; +} + +static inline struct btrfs_delayed_root *btrfs_get_delayed_root( + struct btrfs_root *root) +{ + return root->fs_info->delayed_root; +} + +static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( + struct inode *inode) +{ + struct btrfs_delayed_node *node; + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_root *root = btrfs_inode->root; + u64 ino = btrfs_ino(inode); + int ret; + +again: + node = ACCESS_ONCE(btrfs_inode->delayed_node); + if (node) { + atomic_inc(&node->refs); /* can be accessed */ + return node; + } + + spin_lock(&root->inode_lock); + node = radix_tree_lookup(&root->delayed_nodes_tree, ino); + if (node) { + if (btrfs_inode->delayed_node) { + spin_unlock(&root->inode_lock); + goto again; + } + btrfs_inode->delayed_node = node; + atomic_inc(&node->refs); /* can be accessed */ + atomic_inc(&node->refs); /* cached in the inode */ + spin_unlock(&root->inode_lock); + return node; + } + spin_unlock(&root->inode_lock); + + node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); + if (!node) + return ERR_PTR(-ENOMEM); + btrfs_init_delayed_node(node, root, ino); + + atomic_inc(&node->refs); /* cached in the btrfs inode */ + atomic_inc(&node->refs); /* can be accessed */ + + ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + if (ret) { + kmem_cache_free(delayed_node_cache, node); + return ERR_PTR(ret); + } + + spin_lock(&root->inode_lock); + ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); + if (ret == -EEXIST) { + kmem_cache_free(delayed_node_cache, node); + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + goto again; + } + btrfs_inode->delayed_node = node; + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + + return node; +} + +/* + * Call it when holding delayed_node->mutex + * + * If mod = 1, add this node into the prepared list. + */ +static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node, + int mod) +{ + spin_lock(&root->lock); + if (node->in_list) { + if (!list_empty(&node->p_list)) + list_move_tail(&node->p_list, &root->prepare_list); + else if (mod) + list_add_tail(&node->p_list, &root->prepare_list); + } else { + list_add_tail(&node->n_list, &root->node_list); + list_add_tail(&node->p_list, &root->prepare_list); + atomic_inc(&node->refs); /* inserted into list */ + root->nodes++; + node->in_list = 1; + } + spin_unlock(&root->lock); +} + +/* Call it when holding delayed_node->mutex */ +static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node) +{ + spin_lock(&root->lock); + if (node->in_list) { + root->nodes--; + atomic_dec(&node->refs); /* not in the list */ + list_del_init(&node->n_list); + if (!list_empty(&node->p_list)) + list_del_init(&node->p_list); + node->in_list = 0; + } + spin_unlock(&root->lock); +} + +struct btrfs_delayed_node *btrfs_first_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->node_list)) + goto out; + + p = delayed_root->node_list.next; + node = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +struct btrfs_delayed_node *btrfs_next_delayed_node( + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_root *delayed_root; + struct list_head *p; + struct btrfs_delayed_node *next = NULL; + + delayed_root = node->root->fs_info->delayed_root; + spin_lock(&delayed_root->lock); + if (!node->in_list) { /* not in the list */ + if (list_empty(&delayed_root->node_list)) + goto out; + p = delayed_root->node_list.next; + } else if (list_is_last(&node->n_list, &delayed_root->node_list)) + goto out; + else + p = node->n_list.next; + + next = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&next->refs); +out: + spin_unlock(&delayed_root->lock); + + return next; +} + +static void __btrfs_release_delayed_node( + struct btrfs_delayed_node *delayed_node, + int mod) +{ + struct btrfs_delayed_root *delayed_root; + + if (!delayed_node) + return; + + delayed_root = delayed_node->root->fs_info->delayed_root; + + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + btrfs_queue_delayed_node(delayed_root, delayed_node, mod); + else + btrfs_dequeue_delayed_node(delayed_root, delayed_node); + mutex_unlock(&delayed_node->mutex); + + if (atomic_dec_and_test(&delayed_node->refs)) { + struct btrfs_root *root = delayed_node->root; + spin_lock(&root->inode_lock); + if (atomic_read(&delayed_node->refs) == 0) { + radix_tree_delete(&root->delayed_nodes_tree, + delayed_node->inode_id); + kmem_cache_free(delayed_node_cache, delayed_node); + } + spin_unlock(&root->inode_lock); + } +} + +static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 0); +} + +struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->prepare_list)) + goto out; + + p = delayed_root->prepare_list.next; + list_del_init(p); + node = list_entry(p, struct btrfs_delayed_node, p_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +static inline void btrfs_release_prepared_delayed_node( + struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 1); +} + +struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) +{ + struct btrfs_delayed_item *item; + item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); + if (item) { + item->data_len = data_len; + item->ins_or_del = 0; + item->bytes_reserved = 0; + item->block_rsv = NULL; + item->delayed_node = NULL; + atomic_set(&item->refs, 1); + } + return item; +} + +/* + * __btrfs_lookup_delayed_item - look up the delayed item by key + * @delayed_node: pointer to the delayed node + * @key: the key to look up + * @prev: used to store the prev item if the right item isn't found + * @next: used to store the next item if the right item isn't found + * + * Note: if we don't find the right item, we will return the prev item and + * the next item. + */ +static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( + struct rb_root *root, + struct btrfs_key *key, + struct btrfs_delayed_item **prev, + struct btrfs_delayed_item **next) +{ + struct rb_node *node, *prev_node = NULL; + struct btrfs_delayed_item *delayed_item = NULL; + int ret = 0; + + node = root->rb_node; + + while (node) { + delayed_item = rb_entry(node, struct btrfs_delayed_item, + rb_node); + prev_node = node; + ret = btrfs_comp_cpu_keys(&delayed_item->key, key); + if (ret < 0) + node = node->rb_right; + else if (ret > 0) + node = node->rb_left; + else + return delayed_item; + } + + if (prev) { + if (!prev_node) + *prev = NULL; + else if (ret < 0) + *prev = delayed_item; + else if ((node = rb_prev(prev_node)) != NULL) { + *prev = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *prev = NULL; + } + + if (next) { + if (!prev_node) + *next = NULL; + else if (ret > 0) + *next = delayed_item; + else if ((node = rb_next(prev_node)) != NULL) { + *next = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *next = NULL; + } + return NULL; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, + struct btrfs_delayed_item *ins, + int action) +{ + struct rb_node **p, *node; + struct rb_node *parent_node = NULL; + struct rb_root *root; + struct btrfs_delayed_item *item; + int cmp; + + if (action == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_node->ins_root; + else if (action == BTRFS_DELAYED_DELETION_ITEM) + root = &delayed_node->del_root; + else + BUG(); + p = &root->rb_node; + node = &ins->rb_node; + + while (*p) { + parent_node = *p; + item = rb_entry(parent_node, struct btrfs_delayed_item, + rb_node); + + cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); + if (cmp < 0) + p = &(*p)->rb_right; + else if (cmp > 0) + p = &(*p)->rb_left; + else + return -EEXIST; + } + + rb_link_node(node, parent_node, p); + rb_insert_color(node, root); + ins->delayed_node = delayed_node; + ins->ins_or_del = action; + + if (ins->key.type == BTRFS_DIR_INDEX_KEY && + action == BTRFS_DELAYED_INSERTION_ITEM && + ins->key.offset >= delayed_node->index_cnt) + delayed_node->index_cnt = ins->key.offset + 1; + + delayed_node->count++; + atomic_inc(&delayed_node->root->fs_info->delayed_root->items); + return 0; +} + +static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_INSERTION_ITEM); +} + +static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_DELETION_ITEM); +} + +static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) +{ + struct rb_root *root; + struct btrfs_delayed_root *delayed_root; + + delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; + + BUG_ON(!delayed_root); + BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && + delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); + + if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_item->delayed_node->ins_root; + else + root = &delayed_item->delayed_node->del_root; + + rb_erase(&delayed_item->rb_node, root); + delayed_item->delayed_node->count--; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); +} + +static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) +{ + if (item) { + __btrfs_remove_delayed_item(item); + if (atomic_dec_and_test(&item->refs)) + kfree(item); + } +} + +struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->ins_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->del_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_next_delayed_item( + struct btrfs_delayed_item *item) +{ + struct rb_node *p; + struct btrfs_delayed_item *next = NULL; + + p = rb_next(&item->rb_node); + if (p) + next = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return next; +} + +static inline struct btrfs_delayed_node *btrfs_get_delayed_node( + struct inode *inode) +{ + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_inode->delayed_node; + if (delayed_node) + atomic_inc(&delayed_node->refs); + + return delayed_node; +} + +static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, + u64 root_id) +{ + struct btrfs_key root_key; + + if (root->objectid == root_id) + return root; + + root_key.objectid = root_id; + root_key.type = BTRFS_ROOT_ITEM_KEY; + root_key.offset = (u64)-1; + return btrfs_read_fs_root_no_name(root->fs_info, &root_key); +} + +static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) { + item->bytes_reserved = num_bytes; + item->block_rsv = dst_rsv; + } + + return ret; +} + +static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + if (!item->bytes_reserved) + return; + + btrfs_block_rsv_release(root, item->block_rsv, + item->bytes_reserved); +} + +static int btrfs_delayed_inode_reserve_metadata( + struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) + node->bytes_reserved = num_bytes; + + return ret; +} + +static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *rsv; + + if (!node->bytes_reserved) + return; + + rsv = &root->fs_info->global_block_rsv; + btrfs_block_rsv_release(root, rsv, + node->bytes_reserved); + node->bytes_reserved = 0; +} + +/* + * This helper will insert some continuous items into the same leaf according + * to the free space of the leaf. + */ +static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + int free_space; + int total_data_size = 0, total_size = 0; + struct extent_buffer *leaf; + char *data_ptr; + struct btrfs_key *keys; + u32 *data_size; + struct list_head head; + int slot; + int nitems; + int i; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + free_space = btrfs_leaf_free_space(root, leaf); + INIT_LIST_HEAD(&head); + + next = item; + + /* + * count the number of the continuous items that we can insert in batch + */ + while (total_size + next->data_len + sizeof(struct btrfs_item) <= + free_space) { + total_data_size += next->data_len; + total_size += next->data_len + sizeof(struct btrfs_item); + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + } + + if (!nitems) { + ret = 0; + goto out; + } + + /* + * we need allocate some memory space, but it might cause the task + * to sleep, so we set all locked nodes in the path to blocking locks + * first. + */ + btrfs_set_path_blocking(path); + + keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); + if (!keys) { + ret = -ENOMEM; + goto out; + } + + data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); + if (!data_size) { + ret = -ENOMEM; + goto error; + } + + /* get keys of all the delayed items */ + i = 0; + list_for_each_entry(next, &head, tree_list) { + keys[i] = next->key; + data_size[i] = next->data_len; + i++; + } + + /* reset all the locked nodes in the patch to spinning locks. */ + btrfs_clear_path_blocking(path, NULL); + + /* insert the keys of the items */ + ret = setup_items_for_insert(trans, root, path, keys, data_size, + total_data_size, total_size, nitems); + if (ret) + goto error; + + /* insert the dir index items */ + slot = path->slots[0]; + list_for_each_entry_safe(curr, next, &head, tree_list) { + data_ptr = btrfs_item_ptr(leaf, slot, char); + write_extent_buffer(leaf, &curr->data, + (unsigned long)data_ptr, + curr->data_len); + slot++; + + btrfs_delayed_item_release_metadata(root, curr); + + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +error: + kfree(data_size); + kfree(keys); +out: + return ret; +} + +/* + * This helper can just do simple insertion that needn't extend item for new + * data, such as directory name index insertion, inode insertion. + */ +static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *delayed_item) +{ + struct extent_buffer *leaf; + struct btrfs_item *item; + char *ptr; + int ret; + + ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, + delayed_item->data_len); + if (ret < 0 && ret != -EEXIST) + return ret; + + leaf = path->nodes[0]; + + item = btrfs_item_nr(leaf, path->slots[0]); + ptr = btrfs_item_ptr(leaf, path->slots[0], char); + + write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, + delayed_item->data_len); + btrfs_mark_buffer_dirty(leaf); + + btrfs_delayed_item_release_metadata(root, delayed_item); + return 0; +} + +/* + * we insert an item first, then if there are some continuous items, we try + * to insert those items into the same leaf. + */ +static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_insertion_item(node); + if (!curr) + goto insert_end; + + ret = btrfs_insert_delayed_item(trans, root, path, curr); + if (ret < 0) { + btrfs_release_path(path); + goto insert_end; + } + + prev = curr; + curr = __btrfs_next_delayed_item(prev); + if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { + /* insert the continuous items into the same leaf */ + path->slots[0]++; + btrfs_batch_insert_items(trans, root, path, curr); + } + btrfs_release_delayed_item(prev); + btrfs_mark_buffer_dirty(path->nodes[0]); + + btrfs_release_path(path); + mutex_unlock(&node->mutex); + goto do_again; + +insert_end: + mutex_unlock(&node->mutex); + return ret; +} + +static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + struct extent_buffer *leaf; + struct btrfs_key key; + struct list_head head; + int nitems, i, last_item; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + + i = path->slots[0]; + last_item = btrfs_header_nritems(leaf) - 1; + if (i > last_item) + return -ENOENT; /* FIXME: Is errno suitable? */ + + next = item; + INIT_LIST_HEAD(&head); + btrfs_item_key_to_cpu(leaf, &key, i); + nitems = 0; + /* + * count the number of the dir index items that we can delete in batch + */ + while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + + i++; + if (i > last_item) + break; + btrfs_item_key_to_cpu(leaf, &key, i); + } + + if (!nitems) + return 0; + + ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); + if (ret) + goto out; + + list_for_each_entry_safe(curr, next, &head, tree_list) { + btrfs_delayed_item_release_metadata(root, curr); + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +out: + return ret; +} + +static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_deletion_item(node); + if (!curr) + goto delete_fail; + + ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); + if (ret < 0) + goto delete_fail; + else if (ret > 0) { + /* + * can't find the item which the node points to, so this node + * is invalid, just drop it. + */ + prev = curr; + curr = __btrfs_next_delayed_item(prev); + btrfs_release_delayed_item(prev); + ret = 0; + btrfs_release_path(path); + if (curr) + goto do_again; + else + goto delete_fail; + } + + btrfs_batch_delete_items(trans, root, path, curr); + btrfs_release_path(path); + mutex_unlock(&node->mutex); + goto do_again; + +delete_fail: + btrfs_release_path(path); + mutex_unlock(&node->mutex); + return ret; +} + +static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_delayed_root *delayed_root; + + if (delayed_node && delayed_node->inode_dirty) { + BUG_ON(!delayed_node->root); + delayed_node->inode_dirty = 0; + delayed_node->count--; + + delayed_root = delayed_node->root->fs_info->delayed_root; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); + } +} + +static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_node *node) +{ + struct btrfs_key key; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; + int ret; + + mutex_lock(&node->mutex); + if (!node->inode_dirty) { + mutex_unlock(&node->mutex); + return 0; + } + + key.objectid = node->inode_id; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + ret = btrfs_lookup_inode(trans, root, path, &key, 1); + if (ret > 0) { + btrfs_release_path(path); + mutex_unlock(&node->mutex); + return -ENOENT; + } else if (ret < 0) { + mutex_unlock(&node->mutex); + return ret; + } + + btrfs_unlock_up_safe(path, 1); + leaf = path->nodes[0]; + inode_item = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_inode_item); + write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, + sizeof(struct btrfs_inode_item)); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(path); + + btrfs_delayed_inode_release_metadata(root, node); + btrfs_release_delayed_inode(node); + mutex_unlock(&node->mutex); + + return 0; +} + +/* Called when committing the transaction. */ +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + struct btrfs_delayed_node *curr_node, *prev_node; + struct btrfs_path *path; + int ret = 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + delayed_root = btrfs_get_delayed_root(root); + + curr_node = btrfs_first_delayed_node(delayed_root); + while (curr_node) { + root = curr_node->root; + ret = btrfs_insert_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, root, path, + curr_node); + if (ret) { + btrfs_release_delayed_node(curr_node); + break; + } + + prev_node = curr_node; + curr_node = btrfs_next_delayed_node(curr_node); + btrfs_release_delayed_node(prev_node); + } + + btrfs_free_path(path); + return ret; +} + +static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *node) +{ + struct btrfs_path *path; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + ret = btrfs_insert_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, node->root, path, node); + btrfs_free_path(path); + + return ret; +} + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); + int ret; + + if (!delayed_node) + return 0; + + mutex_lock(&delayed_node->mutex); + if (!delayed_node->count) { + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return 0; + } + mutex_unlock(&delayed_node->mutex); + + ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +void btrfs_remove_delayed_node(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); + if (!delayed_node) + return; + + BTRFS_I(inode)->delayed_node = NULL; + btrfs_release_delayed_node(delayed_node); +} + +struct btrfs_async_delayed_node { + struct btrfs_root *root; + struct btrfs_delayed_node *delayed_node; + struct btrfs_work work; +}; + +static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_trans_handle *trans; + struct btrfs_path *path; + struct btrfs_delayed_node *delayed_node = NULL; + struct btrfs_root *root; + unsigned long nr = 0; + int need_requeue = 0; + int ret; + + async_node = container_of(work, struct btrfs_async_delayed_node, work); + + path = btrfs_alloc_path(); + if (!path) + goto out; + path->leave_spinning = 1; + + delayed_node = async_node->delayed_node; + root = delayed_node->root; + + trans = btrfs_join_transaction(root, 0); + if (IS_ERR(trans)) + goto free_path; + + ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + delayed_node); + + if (!ret) + btrfs_update_delayed_inode(trans, root, path, delayed_node); + + /* + * Maybe new delayed items have been inserted, so we need requeue + * the work. Besides that, we must dequeue the empty delayed nodes + * to avoid the race between delayed items balance and the worker. + * The race like this: + * Task1 Worker thread + * count == 0, needn't requeue + * also needn't insert the + * delayed node into prepare + * list again. + * add lots of delayed items + * queue the delayed node + * already in the list, + * and not in the prepare + * list, it means the delayed + * node is being dealt with + * by the worker. + * do delayed items balance + * the delayed node is being + * dealt with by the worker + * now, just wait. + * the worker goto idle. + * Task1 will sleep until the transaction is commited. + */ + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + need_requeue = 1; + else + btrfs_dequeue_delayed_node(root->fs_info->delayed_root, + delayed_node); + mutex_unlock(&delayed_node->mutex); + + nr = trans->blocks_used; + + btrfs_end_transaction_dmeta(trans, root); + __btrfs_btree_balance_dirty(root, nr); +free_path: + btrfs_free_path(path); +out: + if (need_requeue) + btrfs_requeue_work(&async_node->work); + else { + btrfs_release_prepared_delayed_node(delayed_node); + kfree(async_node); + } +} + +static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, + struct btrfs_root *root, int all) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_delayed_node *curr; + int count = 0; + +again: + curr = btrfs_first_prepared_delayed_node(delayed_root); + if (!curr) + return 0; + + async_node = kmalloc(sizeof(*async_node), GFP_NOFS); + if (!async_node) { + btrfs_release_prepared_delayed_node(curr); + return -ENOMEM; + } + + async_node->root = root; + async_node->delayed_node = curr; + + async_node->work.func = btrfs_async_run_delayed_node_done; + async_node->work.flags = 0; + + btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); + count++; + + if (all || count < 4) + goto again; + + return 0; +} + +void btrfs_balance_delayed_items(struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + + delayed_root = btrfs_get_delayed_root(root); + + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) + return; + + if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { + int ret; + ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); + if (ret) + return; + + wait_event_interruptible_timeout( + delayed_root->wait, + (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND), + HZ); + return; + } + + btrfs_wq_run_delayed_node(delayed_root, root, 0); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *delayed_item; + struct btrfs_dir_item *dir_item; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); + if (!delayed_item) { + ret = -ENOMEM; + goto release_node; + } + + ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + delayed_item->key.objectid = btrfs_ino(dir); + btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); + delayed_item->key.offset = index; + + dir_item = (struct btrfs_dir_item *)delayed_item->data; + dir_item->location = *disk_key; + dir_item->transid = cpu_to_le64(trans->transid); + dir_item->data_len = 0; + dir_item->name_len = cpu_to_le16(name_len); + dir_item->type = type; + memcpy((char *)(dir_item + 1), name, name_len); + + mutex_lock(&delayed_node->mutex); + ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(name: %s) into " + "the insertion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + name, + (unsigned long long)delayed_node->root->objectid, + (unsigned long long)delayed_node->inode_id, + ret); + BUG(); + } + mutex_unlock(&delayed_node->mutex); + +release_node: + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, + struct btrfs_delayed_node *node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + mutex_lock(&node->mutex); + item = __btrfs_lookup_delayed_insertion_item(node, key); + if (!item) { + mutex_unlock(&node->mutex); + return 1; + } + + btrfs_delayed_item_release_metadata(root, item); + btrfs_release_delayed_item(item); + mutex_unlock(&node->mutex); + return 0; +} + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index) +{ + struct btrfs_delayed_node *node; + struct btrfs_delayed_item *item; + struct btrfs_key item_key; + int ret; + + node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(node)) + return PTR_ERR(node); + + item_key.objectid = btrfs_ino(dir); + btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); + item_key.offset = index; + + ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); + if (!ret) + goto end; + + item = btrfs_alloc_delayed_item(0); + if (!item) { + ret = -ENOMEM; + goto end; + } + + item->key = item_key; + + ret = btrfs_delayed_item_reserve_metadata(trans, root, item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible. + */ + BUG_ON(ret); + + mutex_lock(&node->mutex); + ret = __btrfs_add_delayed_deletion_item(node, item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(index: %llu) " + "into the deletion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + (unsigned long long)index, + (unsigned long long)node->root->objectid, + (unsigned long long)node->inode_id, + ret); + BUG(); + } + mutex_unlock(&node->mutex); +end: + btrfs_release_delayed_node(node); + return ret; +} + +int btrfs_inode_delayed_dir_index_count(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; + int ret = 0; + + if (!delayed_node) + return -ENOENT; + + /* + * Since we have held i_mutex of this directory, it is impossible that + * a new directory index is added into the delayed node and index_cnt + * is updated now. So we needn't lock the delayed node. + */ + if (!delayed_node->index_cnt) + return -EINVAL; + + BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; + return ret; +} + +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *item; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + mutex_lock(&delayed_node->mutex); + item = __btrfs_first_delayed_insertion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, ins_list); + item = __btrfs_next_delayed_item(item); + } + + item = __btrfs_first_delayed_deletion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, del_list); + item = __btrfs_next_delayed_item(item); + } + mutex_unlock(&delayed_node->mutex); + /* + * This delayed node is still cached in the btrfs inode, so refs + * must be > 1 now, and we needn't check it is going to be freed + * or not. + * + * Besides that, this function is used to read dir, we do not + * insert/delete delayed items in this period. So we also needn't + * requeue or dequeue this delayed node. + */ + atomic_dec(&delayed_node->refs); +} + +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_item *curr, *next; + + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } +} + +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index) +{ + struct btrfs_delayed_item *curr, *next; + int ret; + + if (list_empty(del_list)) + return 0; + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + if (curr->key.offset > index) + break; + + list_del(&curr->readdir_list); + ret = (curr->key.offset == index); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (ret) + return 1; + else + continue; + } + return 0; +} + +/* + * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree + * + */ +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list) +{ + struct btrfs_dir_item *di; + struct btrfs_delayed_item *curr, *next; + struct btrfs_key location; + char *name; + int name_len; + int over = 0; + unsigned char d_type; + + if (list_empty(ins_list)) + return 0; + + /* + * Changing the data of the delayed item is impossible. So + * we needn't lock them. And we have held i_mutex of the + * directory, nobody can delete any directory indexes now. + */ + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + + if (curr->key.offset < filp->f_pos) { + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + continue; + } + + filp->f_pos = curr->key.offset; + + di = (struct btrfs_dir_item *)curr->data; + name = (char *)(di + 1); + name_len = le16_to_cpu(di->name_len); + + d_type = btrfs_filetype_table[di->type]; + btrfs_disk_key_to_cpu(&location, &di->location); + + over = filldir(dirent, name, name_len, curr->key.offset, + location.objectid, d_type); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (over) + return 1; + } + return 0; +} + +BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, + generation, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, + sequence, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, + transid, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, + nbytes, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, + block_group, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); + +BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); +BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); + +static void fill_stack_inode_item(struct btrfs_trans_handle *trans, + struct btrfs_inode_item *inode_item, + struct inode *inode) +{ + btrfs_set_stack_inode_uid(inode_item, inode->i_uid); + btrfs_set_stack_inode_gid(inode_item, inode->i_gid); + btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); + btrfs_set_stack_inode_mode(inode_item, inode->i_mode); + btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); + btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); + btrfs_set_stack_inode_generation(inode_item, + BTRFS_I(inode)->generation); + btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); + btrfs_set_stack_inode_transid(inode_item, trans->transid); + btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); + btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); + btrfs_set_stack_inode_block_group(inode_item, + BTRFS_I(inode)->block_group); + + btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_nsec); +} + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + mutex_lock(&delayed_node->mutex); + if (delayed_node->inode_dirty) { + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + goto release_node; + } + + ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); + /* + * we must reserve enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + delayed_node->inode_dirty = 1; + delayed_node->count++; + atomic_inc(&root->fs_info->delayed_root->items); +release_node: + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_root *root = delayed_node->root; + struct btrfs_delayed_item *curr_item, *prev_item; + + mutex_lock(&delayed_node->mutex); + curr_item = __btrfs_first_delayed_insertion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + curr_item = __btrfs_first_delayed_deletion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + if (delayed_node->inode_dirty) { + btrfs_delayed_inode_release_metadata(root, delayed_node); + btrfs_release_delayed_inode(delayed_node); + } + mutex_unlock(&delayed_node->mutex); +} + +void btrfs_kill_delayed_inode_items(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + __btrfs_kill_delayed_node(delayed_node); + btrfs_release_delayed_node(delayed_node); +} + +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) +{ + u64 inode_id = 0; + struct btrfs_delayed_node *delayed_nodes[8]; + int i, n; + + while (1) { + spin_lock(&root->inode_lock); + n = radix_tree_gang_lookup(&root->delayed_nodes_tree, + (void **)delayed_nodes, inode_id, + ARRAY_SIZE(delayed_nodes)); + if (!n) { + spin_unlock(&root->inode_lock); + break; + } + + inode_id = delayed_nodes[n - 1]->inode_id + 1; + + for (i = 0; i < n; i++) + atomic_inc(&delayed_nodes[i]->refs); + spin_unlock(&root->inode_lock); + + for (i = 0; i < n; i++) { + __btrfs_kill_delayed_node(delayed_nodes[i]); + btrfs_release_delayed_node(delayed_nodes[i]); + } + } +} diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h new file mode 100644 index 000000000000..eb7d240aa648 --- /dev/null +++ b/fs/btrfs/delayed-inode.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie <miaox@cn.fujitsu.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __DELAYED_TREE_OPERATION_H +#define __DELAYED_TREE_OPERATION_H + +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <asm/atomic.h> + +#include "ctree.h" + +/* types of the delayed item */ +#define BTRFS_DELAYED_INSERTION_ITEM 1 +#define BTRFS_DELAYED_DELETION_ITEM 2 + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + /* + * Used for delayed nodes which is waiting to be dealt with by the + * worker. If the delayed node is inserted into the work queue, we + * drop it from this list. + */ + struct list_head prepare_list; + atomic_t items; /* for delayed items */ + int nodes; /* for delayed nodes */ + wait_queue_head_t wait; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + /* Used to add the node into the delayed root's node list. */ + struct list_head n_list; + /* + * Used to add the node into the prepare list, the nodes in this list + * is waiting to be dealt with by the async worker. + */ + struct list_head p_list; + struct rb_root ins_root; + struct rb_root del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + atomic_t refs; + u64 index_cnt; + bool in_list; + bool inode_dirty; + int count; +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + struct btrfs_key key; + struct list_head tree_list; /* used for batch insert/delete items */ + struct list_head readdir_list; /* used for readdir items */ + u64 bytes_reserved; + struct btrfs_block_rsv *block_rsv; + struct btrfs_delayed_node *delayed_node; + atomic_t refs; + int ins_or_del; + u32 data_len; + char data[0]; +}; + +static inline void btrfs_init_delayed_root( + struct btrfs_delayed_root *delayed_root) +{ + atomic_set(&delayed_root->items, 0); + delayed_root->nodes = 0; + spin_lock_init(&delayed_root->lock); + init_waitqueue_head(&delayed_root->wait); + INIT_LIST_HEAD(&delayed_root->node_list); + INIT_LIST_HEAD(&delayed_root->prepare_list); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index); + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index); + +int btrfs_inode_delayed_dir_index_count(struct inode *inode); + +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root); + +void btrfs_balance_delayed_items(struct btrfs_root *root); + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode); +/* Used for evicting the inode. */ +void btrfs_remove_delayed_node(struct inode *inode); +void btrfs_kill_delayed_inode_items(struct inode *inode); + + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode); + +/* Used for drop dead root */ +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); + +/* Used for readdir() */ +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list); +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list); +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index); +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list); + +/* for init */ +int __init btrfs_delayed_inode_init(void); +void btrfs_delayed_inode_exit(void); +#endif diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index ab8afed671a0..1ddfca78e910 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -124,8 +124,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, * to use for the second index (if one is created). */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, const char *name, int name_len, u64 dir, - struct btrfs_key *location, u8 type, u64 index) + *root, const char *name, int name_len, + struct inode *dir, struct btrfs_key *location, + u8 type, u64 index) { int ret = 0; int ret2 = 0; @@ -137,13 +138,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = dir; + key.objectid = btrfs_ino(dir); btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->leave_spinning = 1; + btrfs_cpu_key_to_disk(&disk_key, location); + data_size = sizeof(*dir_item) + name_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); @@ -155,7 +160,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root } leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); @@ -174,27 +178,9 @@ second_insert: } btrfs_release_path(path); - btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); - key.offset = index; - dir_item = insert_with_overflow(trans, root, path, &key, data_size, - name, name_len); - if (IS_ERR(dir_item)) { - ret2 = PTR_ERR(dir_item); - goto out_free; - } - leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); - btrfs_set_dir_item_key(leaf, dir_item, &disk_key); - btrfs_set_dir_type(leaf, dir_item, type); - btrfs_set_dir_data_len(leaf, dir_item, 0); - btrfs_set_dir_name_len(leaf, dir_item, name_len); - btrfs_set_dir_transid(leaf, dir_item, trans->transid); - name_ptr = (unsigned long)(dir_item + 1); - write_extent_buffer(leaf, name, name_ptr, name_len); - btrfs_mark_buffer_dirty(leaf); - + ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, + &disk_key, type, index); out_free: - btrfs_free_path(path); if (ret) return ret; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index cb9d1b8bfe74..a2eb3a3755db 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -42,6 +42,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" +#include "inode-map.h" static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -1045,6 +1046,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->name = NULL; root->in_sysfs = 0; root->inode_tree = RB_ROOT; + INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; @@ -1298,6 +1300,19 @@ again: if (IS_ERR(root)) return root; + root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; + root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), + GFP_NOFS); + if (!root->free_ino_pinned) + goto fail; + + btrfs_init_free_ino_ctl(root); + mutex_init(&root->fs_commit_mutex); + spin_lock_init(&root->cache_lock); + init_waitqueue_head(&root->cache_wait); + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { @@ -1631,6 +1646,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), + GFP_NOFS); + if (!fs_info->delayed_root) { + err = -ENOMEM; + goto fail_iput; + } + btrfs_init_delayed_root(fs_info->delayed_root); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); @@ -1696,7 +1718,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; - goto fail_iput; + goto fail_alloc; } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); @@ -1708,7 +1730,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) - goto fail_iput; + goto fail_alloc; /* check FS state, whether FS is broken. */ fs_info->fs_state |= btrfs_super_flags(disk_super); @@ -1724,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & @@ -1734,7 +1756,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported optional features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); @@ -1750,7 +1772,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported option features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } btrfs_init_workers(&fs_info->generic_worker, @@ -1797,6 +1819,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1, &fs_info->generic_worker); + btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", + fs_info->thread_pool_size, + &fs_info->generic_worker); /* * endios are largely parallel and should have a very @@ -1818,6 +1843,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1); + btrfs_start_workers(&fs_info->delayed_workers, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, @@ -2074,6 +2100,9 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); +fail_alloc: + kfree(fs_info->delayed_root); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); @@ -2338,12 +2367,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); + __btrfs_remove_free_space_cache(root->free_ino_pinned); + __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); return 0; } static void free_fs_root(struct btrfs_root *root) { + iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_super.s_dev) { down_write(&root->anon_super.s_umount); @@ -2351,6 +2383,8 @@ static void free_fs_root(struct btrfs_root *root) } free_extent_buffer(root->node); free_extent_buffer(root->commit_root); + kfree(root->free_ino_ctl); + kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } @@ -2512,6 +2546,7 @@ int close_ctree(struct btrfs_root *root) del_fs_roots(fs_info); iput(fs_info->btree_inode); + kfree(fs_info->delayed_root); btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); @@ -2523,6 +2558,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -2599,6 +2635,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) if (current->flags & PF_MEMALLOC) return; + btrfs_balance_delayed_items(root); + + num_dirty = root->fs_info->dirty_metadata_bytes; + + if (num_dirty > thresh) { + balance_dirty_pages_ratelimited_nr( + root->fs_info->btree_inode->i_mapping, 1); + } + return; +} + +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) +{ + /* + * looks as though older kernels can get into trouble with + * this code, they end up stuck in balance_dirty_pages forever + */ + u64 num_dirty; + unsigned long thresh = 32 * 1024 * 1024; + + if (current->flags & PF_MEMALLOC) + return; + num_dirty = root->fs_info->dirty_metadata_bytes; if (num_dirty > thresh) { diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 2d75f9e896f6..a0b610a67aae 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -61,6 +61,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index b4ffad859adb..1b8dc33778f9 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; - fid->objectid = inode->i_ino; + fid->objectid = btrfs_ino(inode); fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; @@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child) if (!path) return ERR_PTR(-ENOMEM); - if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = dir->i_ino; + key.objectid = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; } @@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, struct btrfs_key key; int name_len; int ret; + u64 ino; if (!dir || !inode) return -EINVAL; @@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name, if (!S_ISDIR(dir->i_mode)) return -EINVAL; + ino = btrfs_ino(inode); + path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = BTRFS_I(inode)->root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = inode->i_ino; - key.offset = dir->i_ino; + key.objectid = ino; + key.offset = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; } @@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, btrfs_free_path(path); return ret; } else if (ret > 0) { - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { path->slots[0]--; } else { btrfs_free_path(path); @@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name, } leaf = path->nodes[0]; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { - rref = btrfs_item_ptr(leaf, path->slots[0], + if (ino == BTRFS_FIRST_FREE_OBJECTID) { + rref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - name_ptr = (unsigned long)(rref + 1); - name_len = btrfs_root_ref_name_len(leaf, rref); + name_ptr = (unsigned long)(rref + 1); + name_len = btrfs_root_ref_name_len(leaf, rref); } else { iref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_ref); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b457f195636e..98ca149bdbc8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); WARN_ON(cache->reserved_pinned > 0); + kfree(cache->free_space_ctl); kfree(cache); } } @@ -3036,7 +3037,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - if (root == root->fs_info->tree_root) { + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { alloc_chunk = 0; committed = 1; } @@ -3834,12 +3836,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) WARN_ON(fs_info->chunk_block_rsv.reserved > 0); } -static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) -{ - return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * - 3 * num_items; -} - int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -3850,7 +3846,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, if (num_items == 0 || root->fs_info->chunk_root == root) return 0; - num_bytes = calc_trans_metadata_size(root, num_items); + num_bytes = btrfs_calc_trans_metadata_size(root, num_items); ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, num_bytes); if (!ret) { @@ -3889,14 +3885,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, * If all of the metadata space is used, we can commit * transaction and use space it freed. */ - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } @@ -3910,7 +3906,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, * two for root back/forward refs, two for directory entries * and one for root of the snapshot. */ - u64 num_bytes = calc_trans_metadata_size(root, 5); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); dst_rsv->space_info = src_rsv->space_info; return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } @@ -3939,7 +3935,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (nr_extents > reserved_extents) { nr_extents -= reserved_extents; - to_reserve = calc_trans_metadata_size(root, nr_extents); + to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); } else { nr_extents = 0; to_reserve = 0; @@ -3993,7 +3989,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes); if (nr_extents > 0) - to_free += calc_trans_metadata_size(root, nr_extents); + to_free += btrfs_calc_trans_metadata_size(root, nr_extents); btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, to_free); @@ -4754,7 +4750,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, return 0; wait_event(caching_ctl->wait, block_group_cache_done(cache) || - (cache->free_space >= num_bytes)); + (cache->free_space_ctl->free_space >= num_bytes)); put_caching_control(caching_ctl); return 0; @@ -6908,10 +6904,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) ret = -ENOMEM; goto error; } + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + ret = -ENOMEM; + goto error; + } atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); cache->fs_info = info; INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); @@ -6919,14 +6921,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (need_clear) cache->disk_cache_state = BTRFS_DC_CLEAR; - /* - * we only want to have 32k of ram per block group for keeping - * track of free space, and if we pass 1/2 of that we want to - * start converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); - read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -6937,6 +6931,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; + btrfs_init_free_space_ctl(cache); + /* * We need to exclude the super stripes now so that the space * info has super bytes accounted for, otherwise we'll think @@ -7023,6 +7019,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) return -ENOMEM; + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + return -ENOMEM; + } cache->key.objectid = chunk_offset; cache->key.offset = size; @@ -7030,19 +7032,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->sectorsize = root->sectorsize; cache->fs_info = root->fs_info; - /* - * we only want to have 32k of ram per block group for keeping track - * of free space, and if we pass 1/2 of that we want to start - * converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); + btrfs_init_free_space_ctl(cache); + btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); cache->flags = type; @@ -7209,23 +7205,38 @@ out: int btrfs_init_space_info(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; + struct btrfs_super_block *disk_super; + u64 features; + u64 flags; + int mixed = 0; int ret; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, - &space_info); - if (ret) - return ret; + disk_super = &fs_info->super_copy; + if (!btrfs_super_root(disk_super)) + return 1; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, - &space_info); - if (ret) - return ret; + features = btrfs_super_incompat_flags(disk_super); + if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) + mixed = 1; - ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, - &space_info); + flags = BTRFS_BLOCK_GROUP_SYSTEM; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); if (ret) - return ret; + goto out; + if (mixed) { + flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + } else { + flags = BTRFS_BLOCK_GROUP_METADATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + if (ret) + goto out; + + flags = BTRFS_BLOCK_GROUP_DATA; + ret = update_space_info(fs_info, flags, 0, 0, &space_info); + } +out: return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 91208296ff2b..64c8b361b539 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2810,7 +2810,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * because there might be preallocation past i_size */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, - path, inode->i_ino, -1, 0); + path, btrfs_ino(inode), -1, 0); if (ret < 0) { btrfs_free_path(path); return ret; @@ -2823,7 +2823,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, found_type = btrfs_key_type(&found_key); /* No extents, but there might be delalloc bits */ - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || found_type != BTRFS_EXTENT_DATA_KEY) { /* have to trust i_size as the end */ last = (u64)-1; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f47e43d855aa..29e014984f60 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -208,8 +208,9 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, EXTENT_NODATASUM, GFP_NOFS); } else { printk(KERN_INFO "btrfs no csum found " - "for inode %lu start %llu\n", - inode->i_ino, + "for inode %llu start %llu\n", + (unsigned long long) + btrfs_ino(inode), (unsigned long long)offset); } item = NULL; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 566bdf298ea8..58ddc4442159 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -298,6 +298,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *path; struct btrfs_key key; struct btrfs_key new_key; + u64 ino = btrfs_ino(inode); u64 search_start = start; u64 disk_bytenr = 0; u64 num_bytes = 0; @@ -318,14 +319,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, while (1) { recow = 0; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, search_start, -1); if (ret < 0) break; if (ret > 0 && path->slots[0] > 0 && search_start == start) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); - if (key.objectid == inode->i_ino && + if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -346,7 +347,7 @@ next_slot: } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - if (key.objectid > inode->i_ino || + if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) break; @@ -592,6 +593,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_slot = 0; int recow; int ret; + u64 ino = btrfs_ino(inode); btrfs_drop_extent_cache(inode, start, end - 1, 0); @@ -600,7 +602,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, again: recow = 0; split = start; - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = split; @@ -612,8 +614,7 @@ again: leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - BUG_ON(key.objectid != inode->i_ino || - key.type != BTRFS_EXTENT_DATA_KEY); + BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); BUG_ON(btrfs_file_extent_type(leaf, fi) != @@ -630,7 +631,7 @@ again: other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { new_key.offset = end; btrfs_set_item_key_safe(trans, root, path, &new_key); @@ -653,7 +654,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); @@ -702,7 +703,7 @@ again: ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); if (split == start) { @@ -718,7 +719,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(path); @@ -729,13 +730,13 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(path); @@ -746,7 +747,7 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } if (del_nr == 0) { diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0290b0c7b003..70d45795d758 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -25,18 +25,17 @@ #include "transaction.h" #include "disk-io.h" #include "extent_io.h" +#include "inode-map.h" #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) -static void recalculate_thresholds(struct btrfs_block_group_cache - *block_group); -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); -struct inode *lookup_free_space_inode(struct btrfs_root *root, - struct btrfs_block_group_cache - *block_group, struct btrfs_path *path) +static struct inode *__lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_path *path, + u64 offset) { struct btrfs_key key; struct btrfs_key location; @@ -46,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, struct inode *inode = NULL; int ret; - spin_lock(&block_group->lock); - if (block_group->inode) - inode = igrab(block_group->inode); - spin_unlock(&block_group->lock); - if (inode) - return inode; - key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, inode->i_mapping->flags &= ~__GFP_FS; + return inode; +} + +struct inode *lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_block_group_cache + *block_group, struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&block_group->lock); + if (block_group->inode) + inode = igrab(block_group->inode); + spin_unlock(&block_group->lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, + block_group->key.objectid); + if (IS_ERR(inode)) + return inode; + spin_lock(&block_group->lock); if (!root->fs_info->closing) { block_group->inode = igrab(inode); @@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; } -int create_free_space_inode(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int __create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 ino, u64 offset) { struct btrfs_key key; struct btrfs_disk_key disk_key; struct btrfs_free_space_header *header; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; - u64 objectid; int ret; - ret = btrfs_find_free_objectid(trans, root, 0, &objectid); - if (ret < 0) - return ret; - - ret = btrfs_insert_empty_inode(trans, root, path, objectid); + ret = btrfs_insert_empty_inode(trans, root, path, ino); if (ret) return ret; @@ -131,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root, BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); btrfs_set_inode_nlink(leaf, inode_item, 1); btrfs_set_inode_transid(leaf, inode_item, trans->transid); - btrfs_set_inode_block_group(leaf, inode_item, - block_group->key.objectid); + btrfs_set_inode_block_group(leaf, inode_item, offset); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_insert_empty_item(trans, root, path, &key, @@ -157,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root, return 0; } +int create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + int ret; + u64 ino; + + ret = btrfs_find_free_objectid(root, &ino); + if (ret < 0) + return ret; + + return __create_free_space_inode(root, trans, path, ino, + block_group->key.objectid); +} + int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, @@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return ret; } - return btrfs_update_inode(trans, root, inode); + ret = btrfs_update_inode(trans, root, inode); + return ret; } static int readahead_cache(struct inode *inode) @@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode) return 0; } -int load_free_space_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) +int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_path *path, u64 offset) { - struct btrfs_root *root = fs_info->tree_root; - struct inode *inode; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct page *page; - struct btrfs_path *path; u32 *checksums = NULL, *crc; char *disk_crcs = NULL; struct btrfs_key key; @@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, u64 num_entries; u64 num_bitmaps; u64 generation; - u64 used = btrfs_block_group_used(&block_group->item); u32 cur_crc = ~(u32)0; pgoff_t index = 0; unsigned long first_page_offset; int num_checksums; - int ret = 0; - - /* - * If we're unmounting then just return, since this does a search on the - * normal root and not the commit root and we could deadlock. - */ - smp_mb(); - if (fs_info->closing) - return 0; - - /* - * If this block group has been marked to be cleared for one reason or - * another then we can't trust the on disk cache, so just return. - */ - spin_lock(&block_group->lock); - if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); + int ret = 0, ret2; INIT_LIST_HEAD(&bitmaps); - path = btrfs_alloc_path(); - if (!path) - return 0; - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) { - btrfs_free_path(path); - return 0; - } - /* Nothing in the space cache, goodbye */ - if (!i_size_read(inode)) { - btrfs_free_path(path); + if (!i_size_read(inode)) goto out; - } key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret) { - btrfs_free_path(path); + if (ret < 0) + goto out; + else if (ret > 0) { + btrfs_release_path(path); + ret = 0; goto out; } + ret = -1; + leaf = path->nodes[0]; header = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_header); num_entries = btrfs_free_space_entries(leaf, header); num_bitmaps = btrfs_free_space_bitmaps(leaf, header); generation = btrfs_free_space_generation(leaf, header); - btrfs_free_path(path); + btrfs_release_path(path); if (BTRFS_I(inode)->generation != generation) { printk(KERN_ERR "btrfs: free space inode generation (%llu) did" - " not match free space cache generation (%llu) for " - "block group %llu\n", + " not match free space cache generation (%llu)\n", (unsigned long long)BTRFS_I(inode)->generation, - (unsigned long long)generation, - (unsigned long long)block_group->key.objectid); - goto free_cache; + (unsigned long long)generation); + goto out; } if (!num_entries) @@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto out; ret = readahead_cache(inode); - if (ret) { - ret = 0; + if (ret) goto out; - } while (1) { struct btrfs_free_space_entry *entry; @@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } page = grab_cache_page(inode->i_mapping, index); - if (!page) { - ret = 0; + if (!page) goto free_cache; - } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); @@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, unlock_page(page); page_cache_release(page); printk(KERN_ERR "btrfs: error reading free " - "space cache: %llu\n", - (unsigned long long) - block_group->key.objectid); + "space cache\n"); goto free_cache; } } @@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, gen = addr + (sizeof(u32) * num_checksums); if (*gen != BTRFS_I(inode)->generation) { printk(KERN_ERR "btrfs: space cache generation" - " (%llu) does not match inode (%llu) " - "for block group %llu\n", + " (%llu) does not match inode (%llu)\n", (unsigned long long)*gen, (unsigned long long) - BTRFS_I(inode)->generation, - (unsigned long long) - block_group->key.objectid); + BTRFS_I(inode)->generation); kunmap(page); unlock_page(page); page_cache_release(page); @@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, PAGE_CACHE_SIZE - start_offset); btrfs_csum_final(cur_crc, (char *)&cur_crc); if (cur_crc != *crc) { - printk(KERN_ERR "btrfs: crc mismatch for page %lu in " - "block group %llu\n", index, - (unsigned long long)block_group->key.objectid); + printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", + index); kunmap(page); unlock_page(page); page_cache_release(page); @@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } if (entry->type == BTRFS_FREE_SPACE_EXTENT) { - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + spin_unlock(&ctl->tree_lock); BUG_ON(ret); } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); @@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, page_cache_release(page); goto free_cache; } - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - block_group->total_bitmaps++; - recalculate_thresholds(block_group); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret2 = link_free_space(ctl, e); + ctl->total_bitmaps++; + ctl->op->recalc_thresholds(ctl); + spin_unlock(&ctl->tree_lock); list_add_tail(&e->list, &bitmaps); } @@ -471,41 +453,97 @@ next: index++; } - spin_lock(&block_group->tree_lock); - if (block_group->free_space != (block_group->key.offset - used - - block_group->bytes_super)) { - spin_unlock(&block_group->tree_lock); - printk(KERN_ERR "block group %llu has an wrong amount of free " - "space\n", block_group->key.objectid); - ret = 0; - goto free_cache; - } - spin_unlock(&block_group->tree_lock); - ret = 1; out: kfree(checksums); kfree(disk_crcs); - iput(inode); return ret; - free_cache: - /* This cache is bogus, make sure it gets cleared */ + __btrfs_remove_free_space_cache(ctl); + goto out; +} + +int load_free_space_cache(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_root *root = fs_info->tree_root; + struct inode *inode; + struct btrfs_path *path; + int ret; + bool matched; + u64 used = btrfs_block_group_used(&block_group->item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + /* + * If this block group has been marked to be cleared for one reason or + * another then we can't trust the on disk cache, so just return. + */ spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_CLEAR; + if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { + spin_unlock(&block_group->lock); + return 0; + } spin_unlock(&block_group->lock); - btrfs_remove_free_space_cache(block_group); - goto out; + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) { + btrfs_free_path(path); + return 0; + } + + ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, + path, block_group->key.objectid); + btrfs_free_path(path); + if (ret <= 0) + goto out; + + spin_lock(&ctl->tree_lock); + matched = (ctl->free_space == (block_group->key.offset - used - + block_group->bytes_super)); + spin_unlock(&ctl->tree_lock); + + if (!matched) { + __btrfs_remove_free_space_cache(ctl); + printk(KERN_ERR "block group %llu has an wrong amount of free " + "space\n", block_group->key.objectid); + ret = -1; + } +out: + if (ret < 0) { + /* This cache is bogus, make sure it gets cleared */ + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_CLEAR; + spin_unlock(&block_group->lock); + ret = 0; + + printk(KERN_ERR "btrfs: failed to load free space cache " + "for block group %llu\n", block_group->key.objectid); + } + + iput(inode); + return ret; } -int btrfs_write_out_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_block_group_cache *block_group, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 offset) { struct btrfs_free_space_header *header; struct extent_buffer *leaf; - struct inode *inode; struct rb_node *node; struct list_head *pos, *n; struct page **pages; @@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; - int ret = 0; + int ret = -1; bool next_page = false; bool out_of_space = false; - root = root->fs_info->tree_root; - INIT_LIST_HEAD(&bitmap_list); - spin_lock(&block_group->lock); - if (block_group->disk_cache_state < BTRFS_DC_SETUP) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) - return 0; - - if (!i_size_read(inode)) { - iput(inode); + node = rb_first(&ctl->free_space_offset); + if (!node) return 0; - } - node = rb_first(&block_group->free_space_offset); - if (!node) { - iput(inode); - return 0; - } + if (!i_size_read(inode)) + return -1; num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; @@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, /* We need a checksum per page. */ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); - if (!crc) { - iput(inode); - return 0; - } + if (!crc) + return -1; pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); if (!pages) { kfree(crc); - iput(inode); - return 0; + return -1; } /* Since the first page has all of our checksums and our generation we @@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); /* Get the cluster for this block_group if it exists */ - if (!list_empty(&block_group->cluster_list)) + if (block_group && !list_empty(&block_group->cluster_list)) cluster = list_entry(block_group->cluster_list.next, struct btrfs_free_cluster, block_group_list); @@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, * When searching for pinned extents, we need to start at our start * offset. */ - start = block_group->key.objectid; + if (block_group) + start = block_group->key.objectid; /* Write out the extent entries */ do { @@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, * We want to add any pinned extents to our free space cache * so we don't leak the space */ - while (!next_page && (start < block_group->key.objectid + - block_group->key.offset)) { + while (block_group && !next_page && + (start < block_group->key.objectid + + block_group->key.offset)) { ret = find_first_extent_bit(unpin, start, &start, &end, EXTENT_DIRTY); if (ret) { @@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(trans, root, &key, path, 1, 1); if (ret < 0) { - ret = 0; + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); @@ -816,8 +836,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || - found_key.offset != block_group->key.objectid) { - ret = 0; + found_key.offset != offset) { + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, @@ -837,44 +857,78 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = 1; out_free: - if (ret == 0) { + if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_ERROR; - spin_unlock(&block_group->lock); BTRFS_I(inode)->generation = 0; } kfree(checksums); kfree(pages); btrfs_update_inode(trans, root, inode); + return ret; +} + +int btrfs_write_out_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct inode *inode; + int ret = 0; + + root = root->fs_info->tree_root; + + spin_lock(&block_group->lock); + if (block_group->disk_cache_state < BTRFS_DC_SETUP) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, + path, block_group->key.objectid); + if (ret < 0) { + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_ERROR; + spin_unlock(&block_group->lock); + ret = 0; + + printk(KERN_ERR "btrfs: failed to write free space cace " + "for block group %llu\n", block_group->key.objectid); + } + iput(inode); return ret; } -static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, +static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, u64 offset) { BUG_ON(offset < bitmap_start); offset -= bitmap_start; - return (unsigned long)(div64_u64(offset, sectorsize)); + return (unsigned long)(div_u64(offset, unit)); } -static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) +static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) { - return (unsigned long)(div64_u64(bytes, sectorsize)); + return (unsigned long)(div_u64(bytes, unit)); } -static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, +static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) { u64 bitmap_start; u64 bytes_per_bitmap; - bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; - bitmap_start = offset - block_group->key.objectid; + bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; + bitmap_start = offset - ctl->start; bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); bitmap_start *= bytes_per_bitmap; - bitmap_start += block_group->key.objectid; + bitmap_start += ctl->start; return bitmap_start; } @@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * offset. */ static struct btrfs_free_space * -tree_search_offset(struct btrfs_block_group_cache *block_group, +tree_search_offset(struct btrfs_free_space_ctl *ctl, u64 offset, int bitmap_only, int fuzzy) { - struct rb_node *n = block_group->free_space_offset.rb_node; + struct rb_node *n = ctl->free_space_offset.rb_node; struct btrfs_free_space *entry, *prev = NULL; /* find entry that is closest to the 'offset' */ @@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, break; } } - if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) return entry; } else if (entry->offset + entry->bytes > offset) return entry; @@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, while (1) { if (entry->bitmap) { if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + ctl->unit > offset) break; } else { if (entry->offset + entry->bytes > offset) @@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, } static inline void -__unlink_free_space(struct btrfs_block_group_cache *block_group, +__unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - rb_erase(&info->offset_index, &block_group->free_space_offset); - block_group->free_extents--; + rb_erase(&info->offset_index, &ctl->free_space_offset); + ctl->free_extents--; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, +static void unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - __unlink_free_space(block_group, info); - block_group->free_space -= info->bytes; + __unlink_free_space(ctl, info); + ctl->free_space -= info->bytes; } -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { int ret = 0; BUG_ON(!info->bitmap && !info->bytes); - ret = tree_insert_offset(&block_group->free_space_offset, info->offset, + ret = tree_insert_offset(&ctl->free_space_offset, info->offset, &info->offset_index, (info->bitmap != NULL)); if (ret) return ret; - block_group->free_space += info->bytes; - block_group->free_extents++; + ctl->free_space += info->bytes; + ctl->free_extents++; return ret; } -static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) { + struct btrfs_block_group_cache *block_group = ctl->private; u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; u64 size = block_group->key.offset; + u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; + int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); + + BUG_ON(ctl->total_bitmaps > max_bitmaps); /* * The goal is to keep the total amount of memory used per 1gb of space @@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as * we add more bitmaps. */ - bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; + bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; if (bitmap_bytes >= max_bytes) { - block_group->extents_thresh = 0; + ctl->extents_thresh = 0; return; } @@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) extent_bytes = max_bytes - bitmap_bytes; extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); - block_group->extents_thresh = + ctl->extents_thresh = div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); } -static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - clear_bit(i, info->bitmap); + bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; - block_group->free_space -= bytes; + ctl->free_space -= bytes; } -static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - set_bit(i, info->bitmap); + bitmap_set(info->bitmap, start, count); info->bytes += bytes; - block_group->free_space += bytes; + ctl->free_space += bytes; } -static int search_bitmap(struct btrfs_block_group_cache *block_group, +static int search_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, unsigned long bits, i; unsigned long next_zero; - i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, + i = offset_to_bit(bitmap_info->offset, ctl->unit, max_t(u64, *offset, bitmap_info->offset)); - bits = bytes_to_bits(*bytes, block_group->sectorsize); + bits = bytes_to_bits(*bytes, ctl->unit); for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); i < BITS_PER_BITMAP; @@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, } if (found_bits) { - *offset = (u64)(i * block_group->sectorsize) + - bitmap_info->offset; - *bytes = (u64)(found_bits) * block_group->sectorsize; + *offset = (u64)(i * ctl->unit) + bitmap_info->offset; + *bytes = (u64)(found_bits) * ctl->unit; return 0; } return -1; } -static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache - *block_group, u64 *offset, - u64 *bytes, int debug) +static struct btrfs_free_space * +find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) { struct btrfs_free_space *entry; struct rb_node *node; int ret; - if (!block_group->free_space_offset.rb_node) + if (!ctl->free_space_offset.rb_node) return NULL; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, *offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); if (!entry) return NULL; @@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache continue; if (entry->bitmap) { - ret = search_bitmap(block_group, entry, offset, bytes); + ret = search_bitmap(ctl, entry, offset, bytes); if (!ret) return entry; continue; @@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache return NULL; } -static void add_new_bitmap(struct btrfs_block_group_cache *block_group, +static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset) { - u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; - int max_bitmaps = (int)div64_u64(block_group->key.offset + - bytes_per_bg - 1, bytes_per_bg); - BUG_ON(block_group->total_bitmaps >= max_bitmaps); - - info->offset = offset_to_bitmap(block_group, offset); + info->offset = offset_to_bitmap(ctl, offset); info->bytes = 0; - link_free_space(block_group, info); - block_group->total_bitmaps++; + link_free_space(ctl, info); + ctl->total_bitmaps++; - recalculate_thresholds(block_group); + ctl->op->recalc_thresholds(ctl); } -static void free_bitmap(struct btrfs_block_group_cache *block_group, +static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { - unlink_free_space(block_group, bitmap_info); + unlink_free_space(ctl, bitmap_info); kfree(bitmap_info->bitmap); kmem_cache_free(btrfs_free_space_cachep, bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } -static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, +static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro int ret; again: - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; /* * XXX - this can go away after a few releases. @@ -1288,24 +1332,22 @@ again: search_start = *offset; search_bytes = *bytes; search_bytes = min(search_bytes, end - search_start + 1); - ret = search_bitmap(block_group, bitmap_info, &search_start, - &search_bytes); + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); if (*offset > bitmap_info->offset && *offset + *bytes > end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, - end - *offset + 1); + bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); *bytes -= end - *offset + 1; *offset = end + 1; } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); + bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); *bytes = 0; } if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1332,31 +1374,28 @@ again: */ search_start = *offset; search_bytes = *bytes; - ret = search_bitmap(block_group, bitmap_info, &search_start, + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); if (ret < 0 || search_start != *offset) return -EAGAIN; goto again; } else if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); return 0; } -static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) { - struct btrfs_free_space *bitmap_info; - int added = 0; - u64 bytes, offset, end; - int ret; + struct btrfs_block_group_cache *block_group = ctl->private; /* * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap */ - if (block_group->free_extents < block_group->extents_thresh) { + if (ctl->free_extents < ctl->extents_thresh) { /* * If this block group has some small extents we don't want to * use up all of our free slots in the cache with them, we want @@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, * the overhead of a bitmap if we don't have to. */ if (info->bytes <= block_group->sectorsize * 4) { - if (block_group->free_extents * 2 <= - block_group->extents_thresh) - return 0; + if (ctl->free_extents * 2 <= ctl->extents_thresh) + return false; } else { - return 0; + return false; } } @@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, */ if (BITS_PER_BITMAP * block_group->sectorsize > block_group->key.offset) - return 0; + return false; + + return true; +} + +static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + struct btrfs_free_space *bitmap_info; + int added = 0; + u64 bytes, offset, end; + int ret; bytes = info->bytes; offset = info->offset; + if (!ctl->op->use_bitmap(ctl, info)) + return 0; + again: - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!bitmap_info) { BUG_ON(added); goto new_bitmap; } - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize); + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(block_group, bitmap_info, offset, - end - offset); + bitmap_set_bits(ctl, bitmap_info, offset, end - offset); bytes -= end - offset; offset = end; added = 0; } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(block_group, bitmap_info, offset, bytes); + bitmap_set_bits(ctl, bitmap_info, offset, bytes); bytes = 0; } else { BUG(); @@ -1417,19 +1466,19 @@ again: new_bitmap: if (info && info->bitmap) { - add_new_bitmap(block_group, info, offset); + add_new_bitmap(ctl, info, offset); added = 1; info = NULL; goto again; } else { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* no pre-allocated info, allocate a new one */ if (!info) { info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); if (!info) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = -ENOMEM; goto out; } @@ -1437,7 +1486,7 @@ new_bitmap: /* allocate the bitmap */ info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; goto out; @@ -1455,8 +1504,8 @@ out: return ret; } -static bool try_merge_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, bool update_stat) +static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; struct btrfs_free_space *right_info; @@ -1469,18 +1518,18 @@ static bool try_merge_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - right_info = tree_search_offset(block_group, offset + bytes, 0, 0); + right_info = tree_search_offset(ctl, offset + bytes, 0, 0); if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); else - left_info = tree_search_offset(block_group, offset - 1, 0, 0); + left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { if (update_stat) - unlink_free_space(block_group, right_info); + unlink_free_space(ctl, right_info); else - __unlink_free_space(block_group, right_info); + __unlink_free_space(ctl, right_info); info->bytes += right_info->bytes; kmem_cache_free(btrfs_free_space_cachep, right_info); merged = true; @@ -1489,9 +1538,9 @@ static bool try_merge_free_space(struct btrfs_block_group_cache *block_group, if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { if (update_stat) - unlink_free_space(block_group, left_info); + unlink_free_space(ctl, left_info); else - __unlink_free_space(block_group, left_info); + __unlink_free_space(ctl, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kmem_cache_free(btrfs_free_space_cachep, left_info); @@ -1501,8 +1550,8 @@ static bool try_merge_free_space(struct btrfs_block_group_cache *block_group, return merged; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 offset, u64 bytes) { struct btrfs_free_space *info; int ret = 0; @@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset; info->bytes = bytes; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (try_merge_free_space(block_group, info, true)) + if (try_merge_free_space(ctl, info, true)) goto link; /* @@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * extent then we know we're going to have to allocate a new extent, so * before we do that see if we need to drop this into a bitmap */ - ret = insert_into_bitmap(block_group, info); + ret = insert_into_bitmap(ctl, info); if (ret < 0) { goto out; } else if (ret) { @@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, goto out; } link: - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); if (ret) kmem_cache_free(btrfs_free_space_cachep, info); out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (ret) { printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); @@ -1549,21 +1598,21 @@ out: int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct btrfs_free_space *next_info = NULL; int ret = 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); again: - info = tree_search_offset(block_group, offset, 0, 0); + info = tree_search_offset(ctl, offset, 0, 0); if (!info) { /* * oops didn't find an extent that matched the space we wanted * to remove, look for a bitmap instead */ - info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!info) { WARN_ON(1); @@ -1578,8 +1627,8 @@ again: offset_index); if (next_info->bitmap) - end = next_info->offset + BITS_PER_BITMAP * - block_group->sectorsize - 1; + end = next_info->offset + + BITS_PER_BITMAP * ctl->unit - 1; else end = next_info->offset + next_info->bytes; @@ -1599,20 +1648,20 @@ again: } if (info->bytes == bytes) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) { kfree(info->bitmap); - block_group->total_bitmaps--; + ctl->total_bitmaps--; } kmem_cache_free(btrfs_free_space_cachep, info); goto out_lock; } if (!info->bitmap && info->offset == offset) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); info->offset += bytes; info->bytes -= bytes; - link_free_space(block_group, info); + link_free_space(ctl, info); goto out_lock; } @@ -1626,13 +1675,13 @@ again: * first unlink the old info and then * insert it again after the hole we're creating */ - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (offset + bytes < info->offset + info->bytes) { u64 old_end = info->offset + info->bytes; info->offset = offset + bytes; info->bytes = old_end - info->offset; - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); WARN_ON(ret); if (ret) goto out_lock; @@ -1642,7 +1691,7 @@ again: */ kmem_cache_free(btrfs_free_space_cachep, info); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* step two, insert a new info struct to cover * anything before the hole @@ -1653,12 +1702,12 @@ again: goto out; } - ret = remove_from_bitmap(block_group, info, &offset, &bytes); + ret = remove_from_bitmap(ctl, info, &offset, &bytes); if (ret == -EAGAIN) goto again; BUG_ON(ret); out_lock: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); out: return ret; } @@ -1666,11 +1715,12 @@ out: void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *n; int count = 0; - for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { + for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; @@ -1685,6 +1735,30 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } +static struct btrfs_free_space_op free_space_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = block_group->sectorsize; + ctl->start = block_group->key.objectid; + ctl->private = block_group; + ctl->op = &free_space_op; + + /* + * we only want to have 32k of ram per block group for keeping + * track of free space, and if we pass 1/2 of that we want to + * start converting things over to using bitmaps + */ + ctl->extents_thresh = ((1024 * 32) / 2) / + sizeof(struct btrfs_free_space); +} + /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group @@ -1696,6 +1770,7 @@ __btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; @@ -1717,8 +1792,8 @@ __btrfs_return_cluster_to_free_space( bitmap = (entry->bitmap != NULL); if (!bitmap) - try_merge_free_space(block_group, entry, false); - tree_insert_offset(&block_group->free_space_offset, + try_merge_free_space(ctl, entry, false); + tree_insert_offset(&ctl->free_space_offset, entry->offset, &entry->offset_index, bitmap); } cluster->root = RB_ROOT; @@ -1729,14 +1804,38 @@ out: return 0; } -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) { struct btrfs_free_space *info; struct rb_node *node; + + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, offset_index); + unlink_free_space(ctl, info); + kfree(info->bitmap); + kmem_cache_free(btrfs_free_space_cachep, info); + if (need_resched()) { + spin_unlock(&ctl->tree_lock); + cond_resched(); + spin_lock(&ctl->tree_lock); + } + } +} + +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) +{ + spin_lock(&ctl->tree_lock); + __btrfs_remove_free_space_cache_locked(ctl); + spin_unlock(&ctl->tree_lock); +} + +void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_cluster *cluster; struct list_head *head; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); while ((head = block_group->cluster_list.next) != &block_group->cluster_list) { cluster = list_entry(head, struct btrfs_free_cluster, @@ -1745,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) WARN_ON(cluster->block_group != block_group); __btrfs_return_cluster_to_free_space(block_group, cluster); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } + __btrfs_remove_free_space_cache_locked(ctl); + spin_unlock(&ctl->tree_lock); - while ((node = rb_last(&block_group->free_space_offset)) != NULL) { - info = rb_entry(node, struct btrfs_free_space, offset_index); - if (!info->bitmap) { - unlink_free_space(block_group, info); - kmem_cache_free(btrfs_free_space_cachep, info); - } else { - free_bitmap(block_group, info); - } - - if (need_resched()) { - spin_unlock(&block_group->tree_lock); - cond_resched(); - spin_lock(&block_group->tree_lock); - } - } - - spin_unlock(&block_group->tree_lock); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; u64 bytes_search = bytes + empty_size; u64 ret = 0; - spin_lock(&block_group->tree_lock); - entry = find_free_space(block_group, &offset, &bytes_search, 0); + spin_lock(&ctl->tree_lock); + entry = find_free_space(ctl, &offset, &bytes_search); if (!entry) goto out; ret = offset; if (entry->bitmap) { - bitmap_clear_bits(block_group, entry, offset, bytes); + bitmap_clear_bits(ctl, entry, offset, bytes); if (!entry->bytes) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); entry->offset += bytes; entry->bytes -= bytes; if (!entry->bytes) kmem_cache_free(btrfs_free_space_cachep, entry); else - link_free_space(block_group, entry); + link_free_space(ctl, entry); } out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1815,6 +1900,7 @@ int btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl; int ret; /* first, get a safe pointer to the block group */ @@ -1833,10 +1919,12 @@ int btrfs_return_cluster_to_free_space( atomic_inc(&block_group->count); spin_unlock(&cluster->lock); + ctl = block_group->free_space_ctl; + /* now return any extents the cluster had on it */ - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = __btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* finally drop our ref */ btrfs_put_block_group(block_group); @@ -1848,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *entry, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; int err; u64 search_start = cluster->window_start; u64 search_bytes = bytes; @@ -1856,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, search_start = min_start; search_bytes = bytes; - err = search_bitmap(block_group, entry, &search_start, - &search_bytes); + err = search_bitmap(ctl, entry, &search_start, &search_bytes); if (err) return 0; ret = search_start; - bitmap_clear_bits(block_group, entry, ret, bytes); + bitmap_clear_bits(ctl, entry, ret, bytes); return ret; } @@ -1876,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct rb_node *node; u64 ret = 0; @@ -1933,20 +2022,20 @@ out: if (!ret) return 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - block_group->free_space -= bytes; + ctl->free_space -= bytes; if (entry->bytes == 0) { - block_group->free_extents--; + ctl->free_extents--; if (entry->bitmap) { kfree(entry->bitmap); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } kmem_cache_free(btrfs_free_space_cachep, entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1956,6 +2045,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; unsigned long next_zero; unsigned long i; unsigned long search_bits; @@ -2010,7 +2100,7 @@ again: cluster->window_start = start * block_group->sectorsize + entry->offset; - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 1); BUG_ON(ret); @@ -2025,6 +2115,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *first = NULL; struct btrfs_free_space *entry = NULL; struct btrfs_free_space *prev = NULL; @@ -2035,7 +2126,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, u64 max_extent; u64 max_gap = 128 * 1024; - entry = tree_search_offset(block_group, offset, 0, 1); + entry = tree_search_offset(ctl, offset, 0, 1); if (!entry) return -ENOSPC; @@ -2101,7 +2192,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, if (entry->bitmap) continue; - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 0); BUG_ON(ret); @@ -2120,16 +2211,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; int ret = -ENOSPC; - if (block_group->total_bitmaps == 0) + if (ctl->total_bitmaps == 0) return -ENOSPC; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); if (!entry) return -ENOSPC; @@ -2162,6 +2252,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; u64 min_bytes; int ret; @@ -2181,14 +2272,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } else min_bytes = max(bytes, (bytes + empty_size) >> 2); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); /* * If we know we don't have enough space to make a cluster don't even * bother doing all the work to try and find one. */ - if (block_group->free_space < min_bytes) { - spin_unlock(&block_group->tree_lock); + if (ctl->free_space < min_bytes) { + spin_unlock(&ctl->tree_lock); return -ENOSPC; } @@ -2214,7 +2305,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } out: spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -2235,6 +2326,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct btrfs_fs_info *fs_info = block_group->fs_info; u64 bytes = 0; @@ -2244,52 +2336,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, *trimmed = 0; while (start < end) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (block_group->free_space < minlen) { - spin_unlock(&block_group->tree_lock); + if (ctl->free_space < minlen) { + spin_unlock(&ctl->tree_lock); break; } - entry = tree_search_offset(block_group, start, 0, 1); + entry = tree_search_offset(ctl, start, 0, 1); if (!entry) - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, - start), + entry = tree_search_offset(ctl, + offset_to_bitmap(ctl, start), 1, 1); if (!entry || entry->offset >= end) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); break; } if (entry->bitmap) { - ret = search_bitmap(block_group, entry, &start, &bytes); + ret = search_bitmap(ctl, entry, &start, &bytes); if (!ret) { if (start >= end) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); break; } bytes = min(bytes, end - start); - bitmap_clear_bits(block_group, entry, - start, bytes); + bitmap_clear_bits(ctl, entry, start, bytes); if (entry->bytes == 0) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { start = entry->offset + BITS_PER_BITMAP * block_group->sectorsize; - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); ret = 0; continue; } } else { start = entry->offset; bytes = min(entry->bytes, end - start); - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); kmem_cache_free(btrfs_free_space_cachep, entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (bytes >= minlen) { int update_ret; @@ -2301,8 +2391,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, bytes, &actually_trimmed); - btrfs_add_free_space(block_group, - start, bytes); + btrfs_add_free_space(block_group, start, bytes); if (!update_ret) btrfs_update_reserved_bytes(block_group, bytes, 0, 1); @@ -2324,3 +2413,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, return ret; } + +/* + * Find the left-most item in the cache tree, and then return the + * smallest inode number in the item. + * + * Note: the returned inode number may not be the smallest one in + * the tree, if the left-most item is a bitmap. + */ +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) +{ + struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; + struct btrfs_free_space *entry = NULL; + u64 ino = 0; + + spin_lock(&ctl->tree_lock); + + if (RB_EMPTY_ROOT(&ctl->free_space_offset)) + goto out; + + entry = rb_entry(rb_first(&ctl->free_space_offset), + struct btrfs_free_space, offset_index); + + if (!entry->bitmap) { + ino = entry->offset; + + unlink_free_space(ctl, entry); + entry->offset++; + entry->bytes--; + if (!entry->bytes) + kmem_cache_free(btrfs_free_space_cachep, entry); + else + link_free_space(ctl, entry); + } else { + u64 offset = 0; + u64 count = 1; + int ret; + + ret = search_bitmap(ctl, entry, &offset, &count); + BUG_ON(ret); + + ino = offset; + bitmap_clear_bits(ctl, entry, offset, 1); + if (entry->bytes == 0) + free_bitmap(ctl, entry); + } +out: + spin_unlock(&ctl->tree_lock); + + return ino; +} + +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&root->cache_lock); + if (root->cache_inode) + inode = igrab(root->cache_inode); + spin_unlock(&root->cache_lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, 0); + if (IS_ERR(inode)) + return inode; + + spin_lock(&root->cache_lock); + if (!root->fs_info->closing) + root->cache_inode = igrab(inode); + spin_unlock(&root->cache_lock); + + return inode; +} + +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + return __create_free_space_inode(root, trans, path, + BTRFS_FREE_INO_OBJECTID, 0); +} + +int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + int ret = 0; + u64 root_gen = btrfs_root_generation(&root->root_item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + goto out; + + if (root_gen != BTRFS_I(inode)->generation) + goto out_put; + + ret = __load_free_space_cache(root, inode, ctl, path, 0); + + if (ret < 0) + printk(KERN_ERR "btrfs: failed to load free ino cache for " + "root %llu\n", root->root_key.objectid); +out_put: + iput(inode); +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct inode *inode; + int ret; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); + if (ret < 0) + printk(KERN_ERR "btrfs: failed to write free ino cache " + "for root %llu\n", root->root_key.objectid); + + iput(inode); + return ret; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 12b2b5165f8a..8f2613f779ed 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -27,6 +27,25 @@ struct btrfs_free_space { struct list_head list; }; +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + struct btrfs_free_space_op *op; + void *private; +}; + +struct btrfs_free_space_op { + void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl); + bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info); +}; + struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); @@ -45,14 +64,36 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 bytenr, u64 size); + +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path); +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); +int load_free_ino_cache(struct btrfs_fs_info *fs_info, + struct btrfs_root *root); +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); + +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 bytenr, u64 size); +static inline int +btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size) +{ + return __btrfs_add_free_space(block_group->free_space_ctl, + bytenr, size); +} int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); void btrfs_remove_free_space_cache(struct btrfs_block_group_cache - *block_group); + *block_group); u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c411..000970512624 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -16,11 +16,430 @@ * Boston, MA 021110-1307, USA. */ +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/pagemap.h> + #include "ctree.h" #include "disk-io.h" +#include "free-space-cache.h" +#include "inode-map.h" #include "transaction.h" -int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) +static int caching_kthread(void *data) +{ + struct btrfs_root *root = data; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_key key; + struct btrfs_path *path; + struct extent_buffer *leaf; + u64 last = (u64)-1; + int slot; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* Since the commit root is read-only, we can safely skip locking. */ + path->skip_locking = 1; + path->search_commit_root = 1; + path->reada = 2; + + key.objectid = BTRFS_FIRST_FREE_OBJECTID; + key.offset = 0; + key.type = BTRFS_INODE_ITEM_KEY; +again: + /* need to make sure the commit_root doesn't disappear */ + mutex_lock(&root->fs_commit_mutex); + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + while (1) { + smp_mb(); + if (fs_info->closing > 1) + goto out; + + leaf = path->nodes[0]; + slot = path->slots[0]; + if (path->slots[0] >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + + if (need_resched() || + btrfs_transaction_in_commit(fs_info)) { + leaf = path->nodes[0]; + + if (btrfs_header_nritems(leaf) == 0) { + WARN_ON(1); + break; + } + + /* + * Save the key so we can advances forward + * in the next search. + */ + btrfs_item_key_to_cpu(leaf, &key, 0); + btrfs_release_path(path); + root->cache_progress = last; + mutex_unlock(&root->fs_commit_mutex); + schedule_timeout(1); + goto again; + } else + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + + if (key.type != BTRFS_INODE_ITEM_KEY) + goto next; + + if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) + break; + + if (last != (u64)-1 && last + 1 != key.objectid) { + __btrfs_add_free_space(ctl, last + 1, + key.objectid - last - 1); + wake_up(&root->cache_wait); + } + + last = key.objectid; +next: + path->slots[0]++; + } + + if (last < BTRFS_LAST_FREE_OBJECTID - 1) { + __btrfs_add_free_space(ctl, last + 1, + BTRFS_LAST_FREE_OBJECTID - last - 1); + } + + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + + root->cache_progress = (u64)-1; + btrfs_unpin_free_ino(root); +out: + wake_up(&root->cache_wait); + mutex_unlock(&root->fs_commit_mutex); + + btrfs_free_path(path); + + return ret; +} + +static void start_caching(struct btrfs_root *root) +{ + struct task_struct *tsk; + int ret; + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_NO) { + spin_unlock(&root->cache_lock); + return; + } + + root->cached = BTRFS_CACHE_STARTED; + spin_unlock(&root->cache_lock); + + ret = load_free_ino_cache(root->fs_info, root); + if (ret == 1) { + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + return; + } + + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", + root->root_key.objectid); + BUG_ON(IS_ERR(tsk)); +} + +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) +{ +again: + *objectid = btrfs_find_ino_for_alloc(root); + + if (*objectid != 0) + return 0; + + start_caching(root); + + wait_event(root->cache_wait, + root->cached == BTRFS_CACHE_FINISHED || + root->free_ino_ctl->free_space > 0); + + if (root->cached == BTRFS_CACHE_FINISHED && + root->free_ino_ctl->free_space == 0) + return -ENOSPC; + else + goto again; +} + +void btrfs_return_ino(struct btrfs_root *root, u64 objectid) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; +again: + if (root->cached == BTRFS_CACHE_FINISHED) { + __btrfs_add_free_space(ctl, objectid, 1); + } else { + /* + * If we are in the process of caching free ino chunks, + * to avoid adding the same inode number to the free_ino + * tree twice due to cross transaction, we'll leave it + * in the pinned tree until a transaction is committed + * or the caching work is done. + */ + + mutex_lock(&root->fs_commit_mutex); + spin_lock(&root->cache_lock); + if (root->cached == BTRFS_CACHE_FINISHED) { + spin_unlock(&root->cache_lock); + mutex_unlock(&root->fs_commit_mutex); + goto again; + } + spin_unlock(&root->cache_lock); + + start_caching(root); + + if (objectid <= root->cache_progress) + __btrfs_add_free_space(ctl, objectid, 1); + else + __btrfs_add_free_space(pinned, objectid, 1); + + mutex_unlock(&root->fs_commit_mutex); + } +} + +/* + * When a transaction is committed, we'll move those inode numbers which + * are smaller than root->cache_progress from pinned tree to free_ino tree, + * and others will just be dropped, because the commit root we were + * searching has changed. + * + * Must be called with root->fs_commit_mutex held + */ +void btrfs_unpin_free_ino(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; + struct btrfs_free_space *info; + struct rb_node *n; + u64 count; + + while (1) { + n = rb_first(rbroot); + if (!n) + break; + + info = rb_entry(n, struct btrfs_free_space, offset_index); + BUG_ON(info->bitmap); + + if (info->offset > root->cache_progress) + goto free; + else if (info->offset + info->bytes > root->cache_progress) + count = root->cache_progress - info->offset + 1; + else + count = info->bytes; + + __btrfs_add_free_space(ctl, info->offset, count); +free: + rb_erase(&info->offset_index, rbroot); + kfree(info); + } +} + +#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) +#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) + +/* + * The goal is to keep the memory used by the free_ino tree won't + * exceed the memory if we use bitmaps only. + */ +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) +{ + struct btrfs_free_space *info; + struct rb_node *n; + int max_ino; + int max_bitmaps; + + n = rb_last(&ctl->free_space_offset); + if (!n) { + ctl->extents_thresh = INIT_THRESHOLD; + return; + } + info = rb_entry(n, struct btrfs_free_space, offset_index); + + /* + * Find the maximum inode number in the filesystem. Note we + * ignore the fact that this can be a bitmap, because we are + * not doing precise calculation. + */ + max_ino = info->bytes - 1; + + max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; + if (max_bitmaps <= ctl->total_bitmaps) { + ctl->extents_thresh = 0; + return; + } + + ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * + PAGE_CACHE_SIZE / sizeof(*info); +} + +/* + * We don't fall back to bitmap, if we are below the extents threshold + * or this chunk of inode numbers is a big one. + */ +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + if (ctl->free_extents < ctl->extents_thresh || + info->bytes > INODES_PER_BITMAP / 10) + return false; + + return true; +} + +static struct btrfs_free_space_op free_ino_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) +{ +} + +static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + /* + * We always use extents for two reasons: + * + * - The pinned tree is only used during the process of caching + * work. + * - Make code simpler. See btrfs_unpin_free_ino(). + */ + return false; +} + +static struct btrfs_free_space_op pinned_free_ino_op = { + .recalc_thresholds = pinned_recalc_thresholds, + .use_bitmap = pinned_use_bitmap, +}; + +void btrfs_init_free_ino_ctl(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = 1; + ctl->start = 0; + ctl->private = NULL; + ctl->op = &free_ino_op; + + /* + * Initially we allow to use 16K of ram to cache chunks of + * inode numbers before we resort to bitmaps. This is somewhat + * arbitrary, but it will be adjusted in runtime. + */ + ctl->extents_thresh = INIT_THRESHOLD; + + spin_lock_init(&pinned->tree_lock); + pinned->unit = 1; + pinned->start = 0; + pinned->private = NULL; + pinned->extents_thresh = 0; + pinned->op = &pinned_free_ino_op; +} + +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + u64 alloc_hint = 0; + int ret; + int prealloc; + bool retry = false; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; +again: + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retry); + retry = true; + + ret = create_free_ino_inode(root, trans, path); + if (ret) + goto out; + goto again; + } + + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + WARN_ON(ret); + + if (i_size_read(inode) > 0) { + ret = btrfs_truncate_free_space_cache(root, trans, path, inode); + if (ret) + goto out_put; + } + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_FINISHED) { + ret = -1; + spin_unlock(&root->cache_lock); + goto out_put; + } + spin_unlock(&root->cache_lock); + + spin_lock(&ctl->tree_lock); + prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; + prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + spin_unlock(&ctl->tree_lock); + + /* Just to make sure we have enough space */ + prealloc += 8 * PAGE_CACHE_SIZE; + + ret = btrfs_check_data_free_space(inode, prealloc); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, + prealloc, prealloc, &alloc_hint); + if (ret) + goto out_put; + btrfs_free_reserved_data_space(inode, prealloc); + +out_put: + iput(inode); +out: + if (ret == 0) + ret = btrfs_write_out_ino_cache(root, trans, path); + + btrfs_free_path(path); + return ret; +} + +static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; int ret; @@ -55,15 +474,14 @@ error: return ret; } -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 dirid, u64 *objectid) +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) { int ret; mutex_lock(&root->objectid_mutex); if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { - ret = btrfs_find_highest_inode(root, &root->highest_objectid); + ret = btrfs_find_highest_objectid(root, + &root->highest_objectid); if (ret) goto out; } diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h new file mode 100644 index 000000000000..ddb347bfee23 --- /dev/null +++ b/fs/btrfs/inode-map.h @@ -0,0 +1,13 @@ +#ifndef __BTRFS_INODE_MAP +#define __BTRFS_INODE_MAP + +void btrfs_init_free_ino_ctl(struct btrfs_root *root); +void btrfs_unpin_free_ino(struct btrfs_root *root); +void btrfs_return_ino(struct btrfs_root *root, u64 objectid); +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans); + +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); + +#endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1d1017f91558..8ae72c3eedb1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -52,6 +52,7 @@ #include "compression.h" #include "locking.h" #include "free-space-cache.h" +#include "inode-map.h" struct btrfs_iget_args { u64 ino; @@ -139,7 +140,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, path->leave_spinning = 1; btrfs_set_trans_block_group(trans, inode); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = start; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(cur_size); @@ -746,6 +747,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, return alloc_hint; } +static inline bool is_free_space_inode(struct btrfs_root *root, + struct inode *inode) +{ + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + return true; + return false; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to @@ -778,7 +788,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; - BUG_ON(root == root->fs_info->tree_root); + BUG_ON(is_free_space_inode(root, inode)); trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -1050,29 +1060,31 @@ static noinline int run_delalloc_nocow(struct inode *inode, int type; int nocow; int check_prev = 1; - bool nolock = false; + bool nolock; + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); - if (root == root->fs_info->tree_root) { - nolock = true; + + nolock = is_free_space_inode(root, inode); + + if (nolock) trans = btrfs_join_transaction_nolock(root, 1); - } else { + else trans = btrfs_join_transaction(root, 1); - } BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; cur_offset = start; while (1) { - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, cur_offset, 0); BUG_ON(ret < 0); if (ret > 0 && path->slots[0] > 0 && check_prev) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); - if (found_key.objectid == inode->i_ino && + if (found_key.objectid == ino && found_key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -1093,7 +1105,7 @@ next_slot: num_bytes = 0; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid > inode->i_ino || + if (found_key.objectid > ino || found_key.type > BTRFS_EXTENT_DATA_KEY || found_key.offset > end) break; @@ -1128,7 +1140,7 @@ next_slot: goto out_check; if (btrfs_extent_readonly(root, disk_bytenr)) goto out_check; - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, ino, found_key.offset - extent_offset, disk_bytenr)) goto out_check; @@ -1317,8 +1329,7 @@ static int btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1351,8 +1362,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1459,7 +1469,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (root == root->fs_info->tree_root) + if (is_free_space_inode(root, inode)) ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); else ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); @@ -1645,7 +1655,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, &hint, 0); BUG_ON(ret); - ins.objectid = inode->i_ino; + ins.objectid = btrfs_ino(inode); ins.offset = file_pos; ins.type = BTRFS_EXTENT_DATA_KEY; ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); @@ -1676,7 +1686,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_file_extent(trans, root, root->root_key.objectid, - inode->i_ino, file_pos, &ins); + btrfs_ino(inode), file_pos, &ins); BUG_ON(ret); btrfs_free_path(path); @@ -1702,7 +1712,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_state *cached_state = NULL; int compress_type = 0; int ret; - bool nolock = false; + bool nolock; ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1); @@ -1710,7 +1720,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) return 0; BUG_ON(!ordered_extent); - nolock = (root == root->fs_info->tree_root); + nolock = is_free_space_inode(root, inode); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); @@ -2005,8 +2015,9 @@ good: return 0; zeroit: - printk_ratelimited(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " - "private %llu\n", page->mapping->host->i_ino, + printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " + "private %llu\n", + (unsigned long long)btrfs_ino(page->mapping->host), (unsigned long long)start, csum, (unsigned long long)private); memset(kaddr + offset, 1, end - start + 1); @@ -2243,7 +2254,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) /* insert an orphan item to track this unlinked/truncated file */ if (insert >= 1) { - ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2280,7 +2291,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) spin_unlock(&root->orphan_lock); if (trans && delete_item) { - ret = btrfs_del_orphan_item(trans, root, inode->i_ino); + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2542,7 +2553,8 @@ static void btrfs_read_locked_inode(struct inode *inode) * try to precache a NULL acl entry for files that don't have * any xattrs or acls */ - maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); + maybe_acls = acls_after_inode_item(leaf, path->slots[0], + btrfs_ino(inode)); if (!maybe_acls) cache_no_acl(inode); @@ -2646,11 +2658,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int ret; + /* + * If root is tree root, it means this inode is used to + * store free space information. And these inodes are updated + * when committing the transaction, so they needn't delaye to + * be updated, or deadlock will occured. + */ + if (!is_free_space_inode(root, inode)) { + ret = btrfs_delayed_update_inode(trans, root, inode); + if (!ret) + btrfs_set_inode_last_trans(trans, inode); + return ret; + } + path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; - ret = btrfs_lookup_inode(trans, root, path, - &BTRFS_I(inode)->location, 1); + ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, + 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2660,7 +2687,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, btrfs_unlock_up_safe(path, 1); leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_inode_item); + struct btrfs_inode_item); fill_inode_item(trans, leaf, inode_item, inode); btrfs_mark_buffer_dirty(leaf); @@ -2671,7 +2698,6 @@ failed: return ret; } - /* * unlink helper that gets used here in inode.c and in the tree logging * recovery code. It remove a link in a directory with a given name, and @@ -2688,6 +2714,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_dir_item *di; struct btrfs_key key; u64 index; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) { @@ -2696,7 +2724,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, } path->leave_spinning = 1; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2713,31 +2741,21 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; btrfs_release_path(path); - ret = btrfs_del_inode_ref(trans, root, name, name_len, - inode->i_ino, - dir->i_ino, &index); + ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, + dir_ino, &index); if (ret) { printk(KERN_INFO "btrfs failed to delete reference to %.*s, " - "inode %lu parent %lu\n", name_len, name, - inode->i_ino, dir->i_ino); + "inode %llu parent %llu\n", name_len, name, + (unsigned long long)ino, (unsigned long long)dir_ino); goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - if (IS_ERR(di)) { - ret = PTR_ERR(di); - goto err; - } - if (!di) { - ret = -ENOENT; + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); + if (ret) goto err; - } - ret = btrfs_delete_one_dir_name(trans, root, path, di); - btrfs_release_path(path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, - inode, dir->i_ino); + inode, dir_ino); BUG_ON(ret != 0 && ret != -ENOENT); ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, @@ -2815,12 +2833,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, int check_link = 1; int err = -ENOSPC; int ret; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); trans = btrfs_start_transaction(root, 10); if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) return trans; - if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return ERR_PTR(-ENOSPC); /* check if there is someone else holds reference */ @@ -2879,7 +2899,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, if (ret == 0 && S_ISREG(inode->i_mode)) { ret = btrfs_lookup_file_extent(trans, root, path, - inode->i_ino, (u64)-1, 0); + ino, (u64)-1, 0); if (ret < 0) { err = ret; goto out; @@ -2895,7 +2915,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, goto out; } - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2912,7 +2932,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, ref = btrfs_lookup_inode_ref(trans, root, path, dentry->d_name.name, dentry->d_name.len, - inode->i_ino, dir->i_ino, 0); + ino, dir_ino, 0); if (IS_ERR(ref)) { err = PTR_ERR(ref); goto out; @@ -2923,7 +2943,15 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, + /* + * This is a commit root search, if we can lookup inode item and other + * relative items in the commit root, it means the transaction of + * dir/file creation has been committed, and the dir index item that we + * delay to insert has also been inserted into the commit root. So + * we needn't worry about the delayed insertion of the dir index item + * here. + */ + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2998,12 +3026,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_key key; u64 index; int ret; + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); BUG_ON(IS_ERR_OR_NULL(di)); @@ -3016,10 +3045,10 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, &index, name, name_len); + dir_ino, &index, name, name_len); if (ret < 0) { BUG_ON(ret != -ENOENT); - di = btrfs_search_dir_index_item(root, path, dir->i_ino, + di = btrfs_search_dir_index_item(root, path, dir_ino, name, name_len); BUG_ON(IS_ERR_OR_NULL(di)); @@ -3028,24 +3057,16 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_release_path(path); index = key.offset; } + btrfs_release_path(path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - BUG_ON(IS_ERR_OR_NULL(di)); - - leaf = path->nodes[0]; - btrfs_dir_item_key_to_cpu(leaf, di, &key); - WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); - ret = btrfs_delete_one_dir_name(trans, root, path, di); + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); BUG_ON(ret); - btrfs_release_path(path); btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); - btrfs_free_path(path); return 0; } @@ -3058,7 +3079,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) unsigned long nr = 0; if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || - inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) return -ENOTEMPTY; trans = __unlink_start_trans(dir, dentry); @@ -3067,7 +3088,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) btrfs_set_trans_block_group(trans, dir); - if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, dentry->d_name.name, @@ -3127,17 +3148,27 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int encoding; int ret; int err = 0; + u64 ino = btrfs_ino(inode); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); if (root->ref_cows || root == root->fs_info->tree_root) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); + /* + * This function is also used to drop the items in the log tree before + * we relog the inode, so if root != BTRFS_I(inode)->root, it means + * it is used to drop the loged items. So we shouldn't kill the delayed + * items. + */ + if (min_type == 0 && root == BTRFS_I(inode)->root) + btrfs_kill_delayed_inode_items(inode); + path = btrfs_alloc_path(); BUG_ON(!path); path->reada = -1; - key.objectid = inode->i_ino; + key.objectid = ino; key.offset = (u64)-1; key.type = (u8)-1; @@ -3165,7 +3196,7 @@ search_again: found_type = btrfs_key_type(&found_key); encoding = 0; - if (found_key.objectid != inode->i_ino) + if (found_key.objectid != ino) break; if (found_type < min_type) @@ -3284,7 +3315,7 @@ delete: ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, 0, btrfs_header_owner(leaf), - inode->i_ino, extent_offset); + ino, extent_offset); BUG_ON(ret); } @@ -3293,7 +3324,9 @@ delete: if (path->slots[0] == 0 || path->slots[0] != pending_del_slot) { - if (root->ref_cows) { + if (root->ref_cows && + BTRFS_I(inode)->location.objectid != + BTRFS_FREE_INO_OBJECTID) { err = -EAGAIN; goto out; } @@ -3483,7 +3516,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) break; err = btrfs_insert_file_extent(trans, root, - inode->i_ino, cur_offset, 0, + btrfs_ino(inode), cur_offset, 0, 0, hole_size, 0, hole_size, 0, 0, 0); if (err) @@ -3585,7 +3618,7 @@ void btrfs_evict_inode(struct inode *inode) truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || - root == root->fs_info->tree_root)) + is_free_space_inode(root, inode))) goto no_delete; if (is_bad_inode(inode)) { @@ -3638,6 +3671,10 @@ void btrfs_evict_inode(struct inode *inode) BUG_ON(ret); } + if (!(root == root->fs_info->tree_root || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) + btrfs_return_ino(root, btrfs_ino(inode)); + nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); @@ -3663,7 +3700,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, + di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); @@ -3716,7 +3753,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || + if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) goto out; @@ -3755,6 +3792,7 @@ static void inode_tree_add(struct inode *inode) struct btrfs_inode *entry; struct rb_node **p; struct rb_node *parent; + u64 ino = btrfs_ino(inode); again: p = &root->inode_tree.rb_node; parent = NULL; @@ -3767,9 +3805,9 @@ again: parent = *p; entry = rb_entry(parent, struct btrfs_inode, rb_node); - if (inode->i_ino < entry->vfs_inode.i_ino) + if (ino < btrfs_ino(&entry->vfs_inode)) p = &parent->rb_left; - else if (inode->i_ino > entry->vfs_inode.i_ino) + else if (ino > btrfs_ino(&entry->vfs_inode)) p = &parent->rb_right; else { WARN_ON(!(entry->vfs_inode.i_state & @@ -3833,9 +3871,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -3843,7 +3881,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -3852,7 +3890,7 @@ again: } while (node) { entry = rb_entry(node, struct btrfs_inode, rb_node); - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; inode = igrab(&entry->vfs_inode); if (inode) { spin_unlock(&root->inode_lock); @@ -3890,7 +3928,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return args->ino == inode->i_ino && + return args->ino == btrfs_ino(inode) && args->root == BTRFS_I(inode)->root; } @@ -4035,7 +4073,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); } -static unsigned char btrfs_filetype_table[] = { +unsigned char btrfs_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; @@ -4049,6 +4087,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; + struct list_head ins_list; + struct list_head del_list; int ret; struct extent_buffer *leaf; int slot; @@ -4061,6 +4101,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, char tmp_name[32]; char *name_ptr; int name_len; + int is_curr = 0; /* filp->f_pos points to the current index? */ /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) @@ -4068,9 +4109,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, /* special case for "." */ if (filp->f_pos == 0) { - over = filldir(dirent, ".", 1, - 1, inode->i_ino, - DT_DIR); + over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); if (over) return 0; filp->f_pos = 1; @@ -4085,11 +4124,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, filp->f_pos = 2; } path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->reada = 2; + if (key_type == BTRFS_DIR_INDEX_KEY) { + INIT_LIST_HEAD(&ins_list); + INIT_LIST_HEAD(&del_list); + btrfs_get_delayed_items(inode, &ins_list, &del_list); + } + btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) @@ -4116,8 +4163,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, break; if (found_key.offset < filp->f_pos) goto next; + if (key_type == BTRFS_DIR_INDEX_KEY && + btrfs_should_delete_dir_index(&del_list, + found_key.offset)) + goto next; filp->f_pos = found_key.offset; + is_curr = 1; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); di_cur = 0; @@ -4172,6 +4224,15 @@ next: path->slots[0]++; } + if (key_type == BTRFS_DIR_INDEX_KEY) { + if (is_curr) + filp->f_pos++; + ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, + &ins_list); + if (ret) + goto nopos; + } + /* Reached end of directory/root. Bump pos past the last item. */ if (key_type == BTRFS_DIR_INDEX_KEY) /* @@ -4184,6 +4245,8 @@ next: nopos: ret = 0; err: + if (key_type == BTRFS_DIR_INDEX_KEY) + btrfs_put_delayed_items(&ins_list, &del_list); btrfs_free_path(path); return ret; } @@ -4199,7 +4262,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; smp_mb(); - nolock = (root->fs_info->closing && root == root->fs_info->tree_root); + if (root->fs_info->closing && is_free_space_inode(root, inode)) + nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) @@ -4243,8 +4307,9 @@ void btrfs_dirty_inode(struct inode *inode) trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { printk_ratelimited(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %ld\n", - inode->i_ino, PTR_ERR(trans)); + "dirty inode %llu error %ld\n", + (unsigned long long)btrfs_ino(inode), + PTR_ERR(trans)); return; } btrfs_set_trans_block_group(trans, inode); @@ -4252,11 +4317,14 @@ void btrfs_dirty_inode(struct inode *inode) ret = btrfs_update_inode(trans, root, inode); if (ret) { printk_ratelimited(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %d\n", - inode->i_ino, ret); + "dirty inode %llu error %d\n", + (unsigned long long)btrfs_ino(inode), + ret); } } btrfs_end_transaction(trans, root); + if (BTRFS_I(inode)->delayed_node) + btrfs_balance_delayed_items(root); } /* @@ -4272,7 +4340,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) struct extent_buffer *leaf; int ret; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = (u64)-1; @@ -4304,7 +4372,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { BTRFS_I(inode)->index_cnt = 2; goto out; @@ -4325,9 +4393,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) int ret = 0; if (BTRFS_I(dir)->index_cnt == (u64)-1) { - ret = btrfs_set_inode_index_count(dir); - if (ret) - return ret; + ret = btrfs_inode_delayed_dir_index_count(dir); + if (ret) { + ret = btrfs_set_inode_index_count(dir); + if (ret) + return ret; + } } *index = BTRFS_I(dir)->index_cnt; @@ -4363,6 +4434,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(-ENOMEM); } + /* + * we have to initialize this early, so we can reclaim the inode + * number if we fail afterwards in this function. + */ + inode->i_ino = objectid; + if (dir) { trace_btrfs_inode_request(dir); @@ -4408,7 +4485,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, goto fail; inode_init_owner(inode, dir, mode); - inode->i_ino = objectid; inode_set_bytes(inode, 0); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], @@ -4472,29 +4548,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, int ret = 0; struct btrfs_key key; struct btrfs_root *root = BTRFS_I(parent_inode)->root; + u64 ino = btrfs_ino(inode); + u64 parent_ino = btrfs_ino(parent_inode); - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); } else { - key.objectid = inode->i_ino; + key.objectid = ino; btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; } - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, key.objectid, root->root_key.objectid, - parent_inode->i_ino, - index, name, name_len); + parent_ino, index, name, name_len); } else if (add_backref) { - ret = btrfs_insert_inode_ref(trans, root, - name, name_len, inode->i_ino, - parent_inode->i_ino, index); + ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, + parent_ino, index); } if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_inode, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -4537,10 +4613,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (!new_valid_dev(rdev)) return -EINVAL; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 for inode item and ref * 2 for dir items @@ -4552,8 +4624,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); @@ -4600,9 +4676,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, u64 objectid; u64 index = 0; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 for inode item and ref * 2 for dir items @@ -4614,8 +4687,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); @@ -4726,10 +4803,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) u64 index = 0; unsigned long nr = 1; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 items for inode and ref * 2 items for dir items @@ -4740,8 +4813,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) return PTR_ERR(trans); btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_fail; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, &index); if (IS_ERR(inode)) { @@ -4864,7 +4941,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 bytenr; u64 extent_start = 0; u64 extent_end = 0; - u64 objectid = inode->i_ino; + u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5372,7 +5449,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), offset, 0); if (ret < 0) goto out; @@ -5389,7 +5466,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != inode->i_ino || + if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) { /* not our file or wrong item type, must cow */ goto out; @@ -5423,7 +5500,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, * look for other files referencing this extent, if we * find any we must cow */ - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), key.offset - backref_offset, disk_bytenr)) goto out; @@ -5613,9 +5690,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) flush_dcache_page(bvec->bv_page); if (csum != *private) { - printk(KERN_ERR "btrfs csum failed ino %lu off" + printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", - inode->i_ino, (unsigned long long)start, + (unsigned long long)btrfs_ino(inode), + (unsigned long long)start, csum, *private); err = -EIO; } @@ -5762,9 +5840,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) struct btrfs_dio_private *dip = bio->bi_private; if (err) { - printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " + printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " "sector %#Lx len %u err no %d\n", - dip->inode->i_ino, bio->bi_rw, + (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, (unsigned long long)bio->bi_sector, bio->bi_size, err); dip->errors = 1; @@ -6607,6 +6685,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->dummy_inode = 0; ei->force_compress = BTRFS_COMPRESS_NONE; + ei->delayed_node = NULL; + inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree); extent_io_tree_init(&ei->io_tree, &inode->i_data); @@ -6674,8 +6754,8 @@ void btrfs_destroy_inode(struct inode *inode) spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { - printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", - inode->i_ino); + printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", + (unsigned long long)btrfs_ino(inode)); list_del_init(&BTRFS_I(inode)->i_orphan); } spin_unlock(&root->orphan_lock); @@ -6697,6 +6777,7 @@ void btrfs_destroy_inode(struct inode *inode) inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: + btrfs_remove_delayed_node(inode); call_rcu(&inode->i_rcu, btrfs_i_callback); } @@ -6705,7 +6786,7 @@ int btrfs_drop_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0 && - root != root->fs_info->tree_root) + !is_free_space_inode(root, inode)) return 1; else return generic_drop_inode(inode); @@ -6808,38 +6889,39 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; struct btrfs_root *dest = BTRFS_I(new_dir)->root; - struct inode *newinode = new_dentry->d_inode; + struct inode *new_inode = new_dentry->d_inode; struct inode *old_inode = old_dentry->d_inode; struct timespec ctime = CURRENT_TIME; u64 index = 0; u64 root_objectid; int ret; + u64 old_ino = btrfs_ino(old_inode); - if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; /* we only allow rename subvolume link between subvolumes */ - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) return -EXDEV; - if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (newinode && newinode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || + (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; - if (S_ISDIR(old_inode->i_mode) && newinode && - newinode->i_size > BTRFS_EMPTY_DIR_SIZE) + if (S_ISDIR(old_inode->i_mode) && new_inode && + new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; /* * we're using rename to replace one file with another. * and the replacement file is large. Start IO on it now so * we don't add too much work to the end of the transaction */ - if (newinode && S_ISREG(old_inode->i_mode) && newinode->i_size && + if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) filemap_flush(old_inode->i_mapping); /* close the racy window with snapshot create/destroy ioctl */ - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&root->fs_info->subvol_sem); /* * We want to reserve the absolute worst case amount of items. So if @@ -6864,15 +6946,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (ret) goto out_fail; - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { /* force full log commit if subvolume involved. */ root->fs_info->last_trans_log_full_commit = trans->transid; } else { ret = btrfs_insert_inode_ref(trans, dest, new_dentry->d_name.name, new_dentry->d_name.len, - old_inode->i_ino, - new_dir->i_ino, index); + old_ino, + btrfs_ino(new_dir), index); if (ret) goto out_fail; /* @@ -6888,10 +6970,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (newinode && newinode->i_size && - old_inode && S_ISREG(old_inode->i_mode)) { + if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) btrfs_add_ordered_operation(trans, root, old_inode); - } old_dir->i_ctime = old_dir->i_mtime = ctime; new_dir->i_ctime = new_dir->i_mtime = ctime; @@ -6900,7 +6980,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (old_dentry->d_parent != new_dentry->d_parent) btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, old_dentry->d_name.name, @@ -6915,16 +6995,16 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, } BUG_ON(ret); - if (newinode) { - newinode->i_ctime = CURRENT_TIME; - if (unlikely(newinode->i_ino == + if (new_inode) { + new_inode->i_ctime = CURRENT_TIME; + if (unlikely(btrfs_ino(new_inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - root_objectid = BTRFS_I(newinode)->location.objectid; + root_objectid = BTRFS_I(new_inode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, root_objectid, new_dentry->d_name.name, new_dentry->d_name.len); - BUG_ON(newinode->i_nlink == 0); + BUG_ON(new_inode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, new_dir, new_dentry->d_inode, @@ -6932,7 +7012,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); } BUG_ON(ret); - if (newinode->i_nlink == 0) { + if (new_inode->i_nlink == 0) { ret = btrfs_orphan_add(trans, new_dentry->d_inode); BUG_ON(ret); } @@ -6945,7 +7025,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len, 0, index); BUG_ON(ret); - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { struct dentry *parent = dget_parent(new_dentry); btrfs_log_new_name(trans, old_inode, old_dir, parent); dput(parent); @@ -6954,7 +7034,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, out_fail: btrfs_end_transaction_throttle(trans, root); out_notrans: - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem); return ret; @@ -7031,9 +7111,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) return -ENAMETOOLONG; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 items for inode item and ref * 2 items for dir items @@ -7045,8 +7122,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, &index); if (IS_ERR(inode)) { @@ -7078,7 +7159,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(name_len); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d11fc6548e15..ed8c055ab70f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -50,6 +50,7 @@ #include "print-tree.h" #include "volumes.h" #include "locking.h" +#include "inode-map.h" /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) @@ -81,6 +82,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) iflags |= FS_NOATIME_FL; if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; + if (flags & BTRFS_INODE_NODATACOW) + iflags |= FS_NOCOW_FL; + + if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) + iflags |= FS_COMPR_FL; + else if (flags & BTRFS_INODE_NOCOMPRESS) + iflags |= FS_NOCOMP_FL; return iflags; } @@ -144,16 +152,13 @@ static int check_flags(unsigned int flags) if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL | \ - FS_NOCOMP_FL | FS_COMPR_FL | \ - FS_NOCOW_FL | FS_COW_FL)) + FS_NOCOMP_FL | FS_COMPR_FL | + FS_NOCOW_FL)) return -EOPNOTSUPP; if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; - if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) - return -EINVAL; - return 0; } @@ -218,6 +223,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; + if (flags & FS_NOCOW_FL) + ip->flags |= BTRFS_INODE_NODATACOW; + else + ip->flags &= ~BTRFS_INODE_NODATACOW; /* * The COMPRESS flag can only be changed by users, while the NOCOMPRESS @@ -230,11 +239,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) } else if (flags & FS_COMPR_FL) { ip->flags |= BTRFS_INODE_COMPRESS; ip->flags &= ~BTRFS_INODE_NOCOMPRESS; + } else { + ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } - if (flags & FS_NOCOW_FL) - ip->flags |= BTRFS_INODE_NODATACOW; - else if (flags & FS_COW_FL) - ip->flags &= ~BTRFS_INODE_NODATACOW; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); @@ -323,8 +330,7 @@ static noinline int create_subvol(struct btrfs_root *root, u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; - ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, - 0, &objectid); + ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); if (ret) { dput(parent); return ret; @@ -416,7 +422,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, dir, &key, BTRFS_FT_DIR, index); if (ret) goto fail; @@ -427,7 +433,7 @@ static noinline int create_subvol(struct btrfs_root *root, ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, index, name, namelen); + btrfs_ino(dir), index, name, namelen); BUG_ON(ret); @@ -1123,7 +1129,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file, int ret = 0; u64 flags = 0; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&root->fs_info->subvol_sem); @@ -1150,7 +1156,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; if (copy_from_user(&flags, arg, sizeof(flags))) @@ -1633,7 +1639,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out_dput; } - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } @@ -1919,7 +1925,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } /* clone data */ - key.objectid = src->i_ino; + key.objectid = btrfs_ino(src); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; @@ -1946,7 +1952,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_item_key_to_cpu(leaf, &key, slot); if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || - key.objectid != src->i_ino) + key.objectid != btrfs_ino(src)) break; if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { @@ -1989,7 +1995,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, goto next; memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; + new_key.objectid = btrfs_ino(inode); if (off <= key.offset) new_key.offset = key.offset + destoff - off; else @@ -2043,7 +2049,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ret = btrfs_inc_extent_ref(trans, root, disko, diskl, 0, root->root_key.objectid, - inode->i_ino, + btrfs_ino(inode), new_key.offset - datao); BUG_ON(ret); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f726e72dd362..051992c7fcc9 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -30,6 +30,7 @@ #include "btrfs_inode.h" #include "async-thread.h" #include "free-space-cache.h" +#include "inode-map.h" /* * backref_node, mapping_node and tree_block start with this @@ -1409,9 +1410,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -1419,7 +1420,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -1434,7 +1435,7 @@ again: return inode; } - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; if (cond_resched_lock(&root->inode_lock)) goto again; @@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, return -ENOMEM; bytenr -= BTRFS_I(reloc_inode)->index_cnt; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, + ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), bytenr, 0); if (ret < 0) goto out; @@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (first) { inode = find_next_inode(root, key.objectid); first = 0; - } else if (inode && inode->i_ino < key.objectid) { + } else if (inode && btrfs_ino(inode) < key.objectid) { btrfs_add_delayed_iput(inode); inode = find_next_inode(root, key.objectid); } - if (inode && inode->i_ino == key.objectid) { + if (inode && btrfs_ino(inode) == key.objectid) { end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); WARN_ON(!IS_ALIGNED(key.offset, @@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, struct inode *inode = NULL; u64 objectid; u64 start, end; + u64 ino; objectid = min_key->objectid; while (1) { @@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root, inode = find_next_inode(root, objectid); if (!inode) break; + ino = btrfs_ino(inode); - if (inode->i_ino > max_key->objectid) { + if (ino > max_key->objectid) { iput(inode); break; } - objectid = inode->i_ino + 1; + objectid = ino + 1; if (!S_ISREG(inode->i_mode)) continue; - if (unlikely(min_key->objectid == inode->i_ino)) { + if (unlikely(min_key->objectid == ino)) { if (min_key->type > BTRFS_EXTENT_DATA_KEY) continue; if (min_key->type < BTRFS_EXTENT_DATA_KEY) @@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, start = 0; } - if (unlikely(max_key->objectid == inode->i_ino)) { + if (unlikely(max_key->objectid == ino)) { if (max_key->type < BTRFS_EXTENT_DATA_KEY) continue; if (max_key->type > BTRFS_EXTENT_DATA_KEY) { @@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, if (IS_ERR(trans)) return ERR_CAST(trans); - err = btrfs_find_free_objectid(trans, root, objectid, &objectid); + err = btrfs_find_free_objectid(root, &objectid); if (err) goto out; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3e28521643fb..fb72e2bea882 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -40,6 +40,7 @@ #include <linux/magic.h> #include <linux/slab.h> #include "compat.h" +#include "delayed-inode.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -1206,10 +1207,14 @@ static int __init init_btrfs_fs(void) if (err) goto free_extent_io; - err = btrfs_interface_init(); + err = btrfs_delayed_inode_init(); if (err) goto free_extent_map; + err = btrfs_interface_init(); + if (err) + goto free_delayed_inode; + err = register_filesystem(&btrfs_fs_type); if (err) goto unregister_ioctl; @@ -1219,6 +1224,8 @@ static int __init init_btrfs_fs(void) unregister_ioctl: btrfs_interface_exit(); +free_delayed_inode: + btrfs_delayed_inode_exit(); free_extent_map: extent_map_exit(); free_extent_io: @@ -1235,6 +1242,7 @@ free_sysfs: static void __exit exit_btrfs_fs(void) { btrfs_destroy_cachep(); + btrfs_delayed_inode_exit(); extent_map_exit(); extent_io_exit(); btrfs_interface_exit(); diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index ab9633fd72a4..c3c223ae6691 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -174,18 +174,6 @@ static const struct sysfs_ops btrfs_root_attr_ops = { .store = btrfs_root_attr_store, }; -static struct kobj_type btrfs_root_ktype = { - .default_attrs = btrfs_root_attrs, - .sysfs_ops = &btrfs_root_attr_ops, - .release = btrfs_root_release, -}; - -static struct kobj_type btrfs_super_ktype = { - .default_attrs = btrfs_super_attrs, - .sysfs_ops = &btrfs_super_attr_ops, - .release = btrfs_super_release, -}; - /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 211aceeb9ea0..33679fc710c6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -27,6 +27,7 @@ #include "transaction.h" #include "locking.h" #include "tree-log.h" +#include "inode-map.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -443,19 +444,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 1, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 1, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 0); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 0); + if (ret) + return ret; + return 0; +} + +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + return __btrfs_end_transaction(trans, root, 1, 1); } /* @@ -716,8 +738,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); btrfs_orphan_commit_root(trans, root); + btrfs_save_ino_cache(root, trans); + if (root->commit_root != root->node) { + mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); + btrfs_unpin_free_ino(root); + mutex_unlock(&root->fs_commit_mutex); + btrfs_set_root_node(&root->root_item, root->node); } @@ -795,7 +823,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); + ret = btrfs_find_free_objectid(tree_root, &objectid); if (ret) { pending->error = ret; goto fail; @@ -832,7 +860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + parent_inode, &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -874,7 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, */ ret = btrfs_add_root_ref(trans, tree_root, objectid, parent_root->root_key.objectid, - parent_inode->i_ino, index, + btrfs_ino(parent_inode), index, dentry->d_name.name, dentry->d_name.len); BUG_ON(ret); dput(parent); @@ -902,6 +930,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { + /* + * We must deal with the delayed items before creating + * snapshots, or we will create a snapthot with inconsistent + * information. + */ + ret = btrfs_run_delayed_items(trans, fs_info->fs_root); + BUG_ON(ret); + ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1155,6 +1191,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, BUG_ON(ret); } + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + /* * rename don't use btrfs_join_transaction, so, once we * set the transaction to blocked above, we aren't going @@ -1181,6 +1220,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); @@ -1297,6 +1339,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) root = list_entry(list.next, struct btrfs_root, root_list); list_del(&root->root_list); + btrfs_kill_all_delayed_nodes(root); + if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) btrfs_drop_snapshot(root, NULL, 0); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 000a41008c3b..804c88639e5d 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -112,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); void btrfs_throttle(struct btrfs_root *root); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c599e8c2a53c..a794b9f60138 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -519,7 +519,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0); if (ret == 0 && @@ -832,7 +832,7 @@ again: read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); /* if we already have a perfect match, we're done */ - if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, + if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), btrfs_inode_ref_index(eb, ref), name, namelen)) { goto out; @@ -960,8 +960,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, unsigned long ptr; unsigned long ptr_end; int name_len; + u64 ino = btrfs_ino(inode); - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; @@ -980,7 +981,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid != inode->i_ino || + if (key.objectid != ino || key.type != BTRFS_INODE_REF_KEY) break; ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); @@ -1011,10 +1012,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (inode->i_nlink == 0) { if (S_ISDIR(inode->i_mode)) { ret = replay_dir_deletes(trans, root, NULL, path, - inode->i_ino, 1); + ino, 1); BUG_ON(ret); } - ret = insert_orphan_item(trans, root, inode->i_ino); + ret = insert_orphan_item(trans, root, ino); BUG_ON(ret); } btrfs_free_path(path); @@ -2197,6 +2198,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, int ret; int err = 0; int bytes_del = 0; + u64 dir_ino = btrfs_ino(dir); if (BTRFS_I(dir)->logged_trans < trans->transid) return 0; @@ -2214,7 +2216,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, goto out_unlock; } - di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, log, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2226,7 +2228,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, BUG_ON(ret); } btrfs_release_path(path); - di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2244,7 +2246,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (bytes_del) { struct btrfs_key key; - key.objectid = dir->i_ino; + key.objectid = dir_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; btrfs_release_path(path); @@ -2303,7 +2305,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, log = root->log_root; mutex_lock(&BTRFS_I(inode)->log_mutex); - ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, + ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), dirid, &index); mutex_unlock(&BTRFS_I(inode)->log_mutex); if (ret == -ENOSPC) { @@ -2369,13 +2371,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, int nritems; u64 first_offset = min_offset; u64 last_offset = (u64)-1; + u64 ino = btrfs_ino(inode); log = root->log_root; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; max_key.offset = (u64)-1; max_key.type = key_type; - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = key_type; min_key.offset = min_offset; @@ -2388,9 +2391,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * we didn't find anything from this transaction, see if there * is anything at all */ - if (ret != 0 || min_key.objectid != inode->i_ino || - min_key.type != key_type) { - min_key.objectid = inode->i_ino; + if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { + min_key.objectid = ino; min_key.type = key_type; min_key.offset = (u64)-1; btrfs_release_path(path); @@ -2399,7 +2401,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, btrfs_release_path(path); return ret; } - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); /* if ret == 0 there are items for this type, * create a range to tell us the last key of this type. @@ -2417,7 +2419,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } /* go backward to find any previous key */ - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); @@ -2452,8 +2454,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, for (i = path->slots[0]; i < nritems; i++) { btrfs_item_key_to_cpu(src, &min_key, i); - if (min_key.objectid != inode->i_ino || - min_key.type != key_type) + if (min_key.objectid != ino || min_key.type != key_type) goto done; ret = overwrite_item(trans, log, dst_path, src, i, &min_key); @@ -2474,7 +2475,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, goto done; } btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); - if (tmp.objectid != inode->i_ino || tmp.type != key_type) { + if (tmp.objectid != ino || tmp.type != key_type) { last_offset = (u64)-1; goto done; } @@ -2500,8 +2501,7 @@ done: * is valid */ ret = insert_dir_log_key(trans, log, path, key_type, - inode->i_ino, first_offset, - last_offset); + ino, first_offset, last_offset); if (ret) err = ret; } @@ -2745,6 +2745,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, int nritems; int ins_start_slot = 0; int ins_nr; + u64 ino = btrfs_ino(inode); log = root->log_root; @@ -2757,11 +2758,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return -ENOMEM; } - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = BTRFS_INODE_ITEM_KEY; min_key.offset = 0; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; /* today the code can only do partial logging of directories */ if (!S_ISDIR(inode->i_mode)) @@ -2773,6 +2774,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.type = (u8)-1; max_key.offset = (u64)-1; + ret = btrfs_commit_inode_delayed_items(trans, inode); + if (ret) { + btrfs_free_path(path); + btrfs_free_path(dst_path); + return ret; + } + mutex_lock(&BTRFS_I(inode)->log_mutex); /* @@ -2784,8 +2792,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (inode_only == LOG_INODE_EXISTS) max_key_type = BTRFS_XATTR_ITEM_KEY; - ret = drop_objectid_items(trans, log, path, - inode->i_ino, max_key_type); + ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } @@ -2803,7 +2810,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, break; again: /* note, ins_nr might be > 0 here, cleanup outside the loop */ - if (min_key.objectid != inode->i_ino) + if (min_key.objectid != ino) break; if (min_key.type > max_key.type) break; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 4ca88d1e18e2..f3107e4b4d56 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name, return -ENOMEM; /* lookup the xattr by name */ - di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; @@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, return -ENOMEM; /* first lets see if we already have this xattr */ - di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, strlen(name), -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, } /* ok we have to create a completely new xattr */ - ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); BUG_ON(ret); out: @@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = 0; |