diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-10 15:47:01 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-10 15:47:01 -0400 |
commit | ffeb874b2b893aea7d10b0b088e06a7b1ded2a3e (patch) | |
tree | e670758c0ed4347fde74411603cf252978c966f6 /fs/gfs2 | |
parent | dc3e130a08996e2b56381365a5ac7bb1ce2a9f47 (diff) | |
download | blackbird-op-linux-ffeb874b2b893aea7d10b0b088e06a7b1ded2a3e.tar.gz blackbird-op-linux-ffeb874b2b893aea7d10b0b088e06a7b1ded2a3e.zip |
[GFS2] Bug fix to gfs2_readpages()
This fixes a bug where we were releasing a page incorrectly
sometimes when reading a stuffed file. This fixes the bug
that Kevin reported when using Xen.
Cc: Kevin Anderson <kanderso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/ops_address.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 27ce30148e69..2c4ec5cf21ff 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -249,8 +249,6 @@ out_unlock: goto out; } -#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) - /** * gfs2_readpages - Read a bunch of pages at once * @@ -290,7 +288,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping, struct pagevec lru_pvec; pagevec_init(&lru_pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { - struct page *page = list_to_page(pages); + struct page *page = list_entry(pages->prev, struct page, lru); + prefetchw(&page->flags); list_del(&page->lru); if (!add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { @@ -298,8 +297,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping, unlock_page(page); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); + } else { + page_cache_release(page); } - page_cache_release(page); } pagevec_lru_add(&lru_pvec); ret = 0; @@ -321,7 +321,7 @@ out_noerror: out_unlock: /* unlock all pages, we can't do any I/O right now */ for (page_idx = 0; page_idx < nr_pages; page_idx++) { - struct page *page = list_to_page(pages); + struct page *page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); unlock_page(page); page_cache_release(page); |