diff options
author | Dave Chinner <dchinner@redhat.com> | 2013-08-28 10:18:00 +1000 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:30 -0400 |
commit | f604156751db77e08afe47ce29fe8f3d51ad9b04 (patch) | |
tree | e0a109be920e4db54ac6384bebb2460aa1e309a9 /fs | |
parent | d38fa6986e9124f827aa6ea4a9dde01e67a37be7 (diff) | |
download | blackbird-op-linux-f604156751db77e08afe47ce29fe8f3d51ad9b04.tar.gz blackbird-op-linux-f604156751db77e08afe47ce29fe8f3d51ad9b04.zip |
dcache: convert to use new lru list infrastructure
[glommer@openvz.org: don't reintroduce double decrement of nr_unused_dentries, adapted for new LRU return codes]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/dcache.c | 170 | ||||
-rw-r--r-- | fs/super.c | 11 |
2 files changed, 83 insertions, 98 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 77d466b13fef..38a4a03499a2 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -37,6 +37,7 @@ #include <linux/rculist_bl.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> +#include <linux/list_lru.h> #include "internal.h" #include "mount.h" @@ -356,28 +357,17 @@ static void dentry_unlink_inode(struct dentry * dentry) } /* - * dentry_lru_(add|del|move_list) must be called with d_lock held. + * dentry_lru_(add|del)_list) must be called with d_lock held. */ static void dentry_lru_add(struct dentry *dentry) { if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { - spin_lock(&dentry->d_sb->s_dentry_lru_lock); + if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) + this_cpu_inc(nr_dentry_unused); dentry->d_flags |= DCACHE_LRU_LIST; - list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); - dentry->d_sb->s_nr_dentry_unused++; - this_cpu_inc(nr_dentry_unused); - spin_unlock(&dentry->d_sb->s_dentry_lru_lock); } } -static void __dentry_lru_del(struct dentry *dentry) -{ - list_del_init(&dentry->d_lru); - dentry->d_flags &= ~DCACHE_LRU_LIST; - dentry->d_sb->s_nr_dentry_unused--; - this_cpu_dec(nr_dentry_unused); -} - /* * Remove a dentry with references from the LRU. * @@ -393,27 +383,9 @@ static void dentry_lru_del(struct dentry *dentry) return; } - if (!list_empty(&dentry->d_lru)) { - spin_lock(&dentry->d_sb->s_dentry_lru_lock); - __dentry_lru_del(dentry); - spin_unlock(&dentry->d_sb->s_dentry_lru_lock); - } -} - -static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) -{ - BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST); - - spin_lock(&dentry->d_sb->s_dentry_lru_lock); - if (list_empty(&dentry->d_lru)) { - dentry->d_flags |= DCACHE_LRU_LIST; - list_add_tail(&dentry->d_lru, list); - } else { - list_move_tail(&dentry->d_lru, list); - dentry->d_sb->s_nr_dentry_unused--; + if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) this_cpu_dec(nr_dentry_unused); - } - spin_unlock(&dentry->d_sb->s_dentry_lru_lock); + dentry->d_flags &= ~DCACHE_LRU_LIST; } /** @@ -901,12 +873,72 @@ static void shrink_dentry_list(struct list_head *list) rcu_read_unlock(); } +static enum lru_status +dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) +{ + struct list_head *freeable = arg; + struct dentry *dentry = container_of(item, struct dentry, d_lru); + + + /* + * we are inverting the lru lock/dentry->d_lock here, + * so use a trylock. If we fail to get the lock, just skip + * it + */ + if (!spin_trylock(&dentry->d_lock)) + return LRU_SKIP; + + /* + * Referenced dentries are still in use. If they have active + * counts, just remove them from the LRU. Otherwise give them + * another pass through the LRU. + */ + if (dentry->d_lockref.count) { + list_del_init(&dentry->d_lru); + spin_unlock(&dentry->d_lock); + return LRU_REMOVED; + } + + if (dentry->d_flags & DCACHE_REFERENCED) { + dentry->d_flags &= ~DCACHE_REFERENCED; + spin_unlock(&dentry->d_lock); + + /* + * The list move itself will be made by the common LRU code. At + * this point, we've dropped the dentry->d_lock but keep the + * lru lock. This is safe to do, since every list movement is + * protected by the lru lock even if both locks are held. + * + * This is guaranteed by the fact that all LRU management + * functions are intermediated by the LRU API calls like + * list_lru_add and list_lru_del. List movement in this file + * only ever occur through this functions or through callbacks + * like this one, that are called from the LRU API. + * + * The only exceptions to this are functions like + * shrink_dentry_list, and code that first checks for the + * DCACHE_SHRINK_LIST flag. Those are guaranteed to be + * operating only with stack provided lists after they are + * properly isolated from the main list. It is thus, always a + * local access. + */ + return LRU_ROTATE; + } + + dentry->d_flags |= DCACHE_SHRINK_LIST; + list_move_tail(&dentry->d_lru, freeable); + this_cpu_dec(nr_dentry_unused); + spin_unlock(&dentry->d_lock); + + return LRU_REMOVED; +} + /** * prune_dcache_sb - shrink the dcache * @sb: superblock - * @count: number of entries to try to free + * @nr_to_scan : number of entries to try to free * - * Attempt to shrink the superblock dcache LRU by @count entries. This is + * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is * done when we need more memory an called from the superblock shrinker * function. * @@ -915,45 +947,12 @@ static void shrink_dentry_list(struct list_head *list) */ long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan) { - struct dentry *dentry; - LIST_HEAD(referenced); - LIST_HEAD(tmp); - long freed = 0; + LIST_HEAD(dispose); + long freed; -relock: - spin_lock(&sb->s_dentry_lru_lock); - while (!list_empty(&sb->s_dentry_lru)) { - dentry = list_entry(sb->s_dentry_lru.prev, - struct dentry, d_lru); - BUG_ON(dentry->d_sb != sb); - - if (!spin_trylock(&dentry->d_lock)) { - spin_unlock(&sb->s_dentry_lru_lock); - cpu_relax(); - goto relock; - } - - if (dentry->d_flags & DCACHE_REFERENCED) { - dentry->d_flags &= ~DCACHE_REFERENCED; - list_move(&dentry->d_lru, &referenced); - spin_unlock(&dentry->d_lock); - } else { - list_move(&dentry->d_lru, &tmp); - dentry->d_flags |= DCACHE_SHRINK_LIST; - this_cpu_dec(nr_dentry_unused); - sb->s_nr_dentry_unused--; - spin_unlock(&dentry->d_lock); - freed++; - if (!--nr_to_scan) - break; - } - cond_resched_lock(&sb->s_dentry_lru_lock); - } - if (!list_empty(&referenced)) - list_splice(&referenced, &sb->s_dentry_lru); - spin_unlock(&sb->s_dentry_lru_lock); - - shrink_dentry_list(&tmp); + freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate, + &dispose, nr_to_scan); + shrink_dentry_list(&dispose); return freed; } @@ -987,24 +986,10 @@ shrink_dcache_list( */ void shrink_dcache_sb(struct super_block *sb) { - LIST_HEAD(tmp); - - spin_lock(&sb->s_dentry_lru_lock); - while (!list_empty(&sb->s_dentry_lru)) { - /* - * account for removal here so we don't need to handle it later - * even though the dentry is no longer on the lru list. - */ - list_splice_init(&sb->s_dentry_lru, &tmp); - this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused); - sb->s_nr_dentry_unused = 0; - spin_unlock(&sb->s_dentry_lru_lock); + long disposed; - shrink_dcache_list(&tmp); - - spin_lock(&sb->s_dentry_lru_lock); - } - spin_unlock(&sb->s_dentry_lru_lock); + disposed = list_lru_dispose_all(&sb->s_dentry_lru, shrink_dcache_list); + this_cpu_sub(nr_dentry_unused, disposed); } EXPORT_SYMBOL(shrink_dcache_sb); @@ -1366,7 +1351,8 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) if (dentry->d_lockref.count) { dentry_lru_del(dentry); } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { - dentry_lru_move_list(dentry, &data->dispose); + dentry_lru_del(dentry); + list_add_tail(&dentry->d_lru, &data->dispose); dentry->d_flags |= DCACHE_SHRINK_LIST; data->found++; ret = D_WALK_NORETRY; diff --git a/fs/super.c b/fs/super.c index aa7995d73bcc..cd3c2cd9144d 100644 --- a/fs/super.c +++ b/fs/super.c @@ -79,11 +79,11 @@ static unsigned long super_cache_scan(struct shrinker *shrink, fs_objects = sb->s_op->nr_cached_objects(sb); inodes = list_lru_count(&sb->s_inode_lru); - total_objects = sb->s_nr_dentry_unused + inodes + fs_objects + 1; + dentries = list_lru_count(&sb->s_dentry_lru); + total_objects = dentries + inodes + fs_objects + 1; /* proportion the scan between the caches */ - dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused, - total_objects); + dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); /* @@ -117,7 +117,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, if (sb->s_op && sb->s_op->nr_cached_objects) total_objects = sb->s_op->nr_cached_objects(sb); - total_objects += sb->s_nr_dentry_unused; + total_objects += list_lru_count(&sb->s_dentry_lru); total_objects += list_lru_count(&sb->s_inode_lru); total_objects = vfs_pressure_ratio(total_objects); @@ -191,8 +191,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); - INIT_LIST_HEAD(&s->s_dentry_lru); - spin_lock_init(&s->s_dentry_lru_lock); + list_lru_init(&s->s_dentry_lru); list_lru_init(&s->s_inode_lru); INIT_LIST_HEAD(&s->s_mounts); init_rwsem(&s->s_umount); |