diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:17:12 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-07-05 13:17:12 -0400 |
commit | 83715ad54fad5a7ed330110f83e31ae92630e9d9 (patch) | |
tree | 3e8717ef9d168cbefd950bac544cb38542e6ee90 /fs/nfs | |
parent | 5e66dd6d66ffe758b39b6dcadf2330753ee1159b (diff) | |
download | blackbird-op-linux-83715ad54fad5a7ed330110f83e31ae92630e9d9.tar.gz blackbird-op-linux-83715ad54fad5a7ed330110f83e31ae92630e9d9.zip |
NFS: Fix NFS page_state usage
The introduction of the FLUSH_INVALIDATE argument to nfs_sync_inode_wait()
does not clear the nr_unstable page state counter for pages that are being
released.
Also fix a longstanding similar bug when nfs_commit_list() fails.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/write.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bca5734ca9fb..86bac6a5008e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -578,7 +578,7 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un return ret; } -static void nfs_cancel_requests(struct list_head *head) +static void nfs_cancel_dirty_list(struct list_head *head) { struct nfs_page *req; while(!list_empty(head)) { @@ -589,6 +589,19 @@ static void nfs_cancel_requests(struct list_head *head) } } +static void nfs_cancel_commit_list(struct list_head *head) +{ + struct nfs_page *req; + + while(!list_empty(head)) { + req = nfs_list_entry(head->next); + nfs_list_remove_request(req); + nfs_inode_remove_request(req); + nfs_clear_page_writeback(req); + dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); + } +} + /* * nfs_scan_dirty - Scan an inode for dirty requests * @inode: NFS inode to scan @@ -1381,6 +1394,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) nfs_list_remove_request(req); nfs_mark_request_commit(req); nfs_clear_page_writeback(req); + dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); } return -ENOMEM; } @@ -1499,7 +1513,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, if (pages != 0) { spin_unlock(&nfsi->req_lock); if (how & FLUSH_INVALIDATE) - nfs_cancel_requests(&head); + nfs_cancel_dirty_list(&head); else ret = nfs_flush_list(inode, &head, pages, how); spin_lock(&nfsi->req_lock); @@ -1512,7 +1526,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, break; if (how & FLUSH_INVALIDATE) { spin_unlock(&nfsi->req_lock); - nfs_cancel_requests(&head); + nfs_cancel_commit_list(&head); spin_lock(&nfsi->req_lock); continue; } |