diff options
author | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 17:49:33 +1100 |
---|---|---|
committer | Nick Piggin <npiggin@kernel.dk> | 2011-01-07 17:50:21 +1100 |
commit | da5029563a0a026c64821b09e8e7b4fd81d3fe1b (patch) | |
tree | 5d5618e0cb382390073377b1be7d0aa76879ac54 /fs/dcache.c | |
parent | b7ab39f631f505edc2bbdb86620d5493f995c9da (diff) | |
download | talos-op-linux-da5029563a0a026c64821b09e8e7b4fd81d3fe1b.tar.gz talos-op-linux-da5029563a0a026c64821b09e8e7b4fd81d3fe1b.zip |
fs: dcache scale d_unhashed
Protect d_unhashed(dentry) condition with d_lock. This means keeping
DCACHE_UNHASHED bit in synch with hash manipulations.
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 74 |
1 files changed, 50 insertions, 24 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 81e91502b294..ee127f9ab274 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -46,6 +46,7 @@ * - d_name * - d_lru * - d_count + * - d_unhashed() * * Ordering: * dcache_lock @@ -53,6 +54,13 @@ * dcache_lru_lock * dcache_hash_lock * + * If there is an ancestor relationship: + * dentry->d_parent->...->d_parent->d_lock + * ... + * dentry->d_parent->d_lock + * dentry->d_lock + * + * If no ancestor relationship: * if (dentry1 < dentry2) * dentry1->d_lock * dentry2->d_lock @@ -379,7 +387,9 @@ int d_invalidate(struct dentry * dentry) * If it's already been dropped, return OK. */ spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return 0; } @@ -388,9 +398,11 @@ int d_invalidate(struct dentry * dentry) * to get rid of unused child entries. */ if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); shrink_dcache_parent(dentry); spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); } /* @@ -403,7 +415,6 @@ int d_invalidate(struct dentry * dentry) * we might still populate it if it was a * working directory or similar). */ - spin_lock(&dentry->d_lock); if (dentry->d_count > 1) { if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { spin_unlock(&dentry->d_lock); @@ -490,35 +501,44 @@ EXPORT_SYMBOL(dget_parent); * any other hashed alias over that one unless @want_discon is set, * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. */ - -static struct dentry * __d_find_alias(struct inode *inode, int want_discon) +static struct dentry *__d_find_alias(struct inode *inode, int want_discon) { - struct list_head *head, *next, *tmp; - struct dentry *alias, *discon_alias=NULL; + struct dentry *alias, *discon_alias; - head = &inode->i_dentry; - next = inode->i_dentry.next; - while (next != head) { - tmp = next; - next = tmp->next; - prefetch(next); - alias = list_entry(tmp, struct dentry, d_alias); +again: + discon_alias = NULL; + list_for_each_entry(alias, &inode->i_dentry, d_alias) { + spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && - (alias->d_flags & DCACHE_DISCONNECTED)) + (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; - else if (!want_discon) { - __dget_locked(alias); + } else if (!want_discon) { + __dget_locked_dlock(alias); + spin_unlock(&alias->d_lock); + return alias; + } + } + spin_unlock(&alias->d_lock); + } + if (discon_alias) { + alias = discon_alias; + spin_lock(&alias->d_lock); + if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { + if (IS_ROOT(alias) && + (alias->d_flags & DCACHE_DISCONNECTED)) { + __dget_locked_dlock(alias); + spin_unlock(&alias->d_lock); return alias; } } + spin_unlock(&alias->d_lock); + goto again; } - if (discon_alias) - __dget_locked(discon_alias); - return discon_alias; + return NULL; } -struct dentry * d_find_alias(struct inode *inode) +struct dentry *d_find_alias(struct inode *inode) { struct dentry *de = NULL; @@ -801,8 +821,8 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); dentry_lru_del(dentry); - spin_unlock(&dentry->d_lock); __d_drop(dentry); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); for (;;) { @@ -817,8 +837,8 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) d_u.d_child) { spin_lock(&loop->d_lock); dentry_lru_del(loop); - spin_unlock(&loop->d_lock); __d_drop(loop); + spin_unlock(&loop->d_lock); cond_resched_lock(&dcache_lock); } spin_unlock(&dcache_lock); @@ -1863,7 +1883,10 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target) /* * XXXX: do we really need to take target->d_lock? */ - if (target < dentry) { + if (d_ancestor(dentry, target)) { + spin_lock(&dentry->d_lock); + spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); + } else if (d_ancestor(target, dentry) || target < dentry) { spin_lock(&target->d_lock); spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); } else { @@ -2542,13 +2565,16 @@ resume: struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; - if (d_unhashed(dentry)||!dentry->d_inode) + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + if (d_unhashed(dentry) || !dentry->d_inode) { + spin_unlock(&dentry->d_lock); continue; + } if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); this_parent = dentry; goto repeat; } - spin_lock(&dentry->d_lock); dentry->d_count--; spin_unlock(&dentry->d_lock); } |