diff options
author | Nick Piggin <npiggin@suse.de> | 2009-09-16 11:50:12 +0200 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2009-09-16 11:50:12 +0200 |
commit | 750b4987b0cd4d408e54cb83a80a067cbe690feb (patch) | |
tree | 1372d0f64da3575eb93bac284c73e548e23b9ea6 | |
parent | 2a7684a23e9c263c2a1e8b2c0027ad1836a0f9df (diff) | |
download | blackbird-op-linux-750b4987b0cd4d408e54cb83a80a067cbe690feb.tar.gz blackbird-op-linux-750b4987b0cd4d408e54cb83a80a067cbe690feb.zip |
HWPOISON: Refactor truncate to allow direct truncating of page v2
Extract out truncate_inode_page() out of the truncate path so that
it can be used by memory-failure.c
[AK: description, headers, fix typos]
v2: Some white space changes from Fengguang Wu
Signed-off-by: Andi Kleen <ak@linux.intel.com>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/truncate.c | 29 |
2 files changed, 17 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 082b68cb5ffe..8cbc0aafd5bd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -794,6 +794,8 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); +int truncate_inode_page(struct address_space *mapping, struct page *page); + #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); diff --git a/mm/truncate.c b/mm/truncate.c index ccc3ecf7cb98..2519a7c92873 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page); * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ -static void +static int truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) - return; + return -EIO; if (page_has_private(page)) do_invalidatepage(page, 0); @@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) remove_from_page_cache(page); ClearPageMappedToDisk(page); page_cache_release(page); /* pagecache ref */ + return 0; } /* @@ -135,6 +136,16 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) return ret; } +int truncate_inode_page(struct address_space *mapping, struct page *page) +{ + if (page_mapped(page)) { + unmap_mapping_range(mapping, + (loff_t)page->index << PAGE_CACHE_SHIFT, + PAGE_CACHE_SIZE, 0); + } + return truncate_complete_page(mapping, page); +} + /** * truncate_inode_pages - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate @@ -196,12 +207,7 @@ void truncate_inode_pages_range(struct address_space *mapping, unlock_page(page); continue; } - if (page_mapped(page)) { - unmap_mapping_range(mapping, - (loff_t)page_index<<PAGE_CACHE_SHIFT, - PAGE_CACHE_SIZE, 0); - } - truncate_complete_page(mapping, page); + truncate_inode_page(mapping, page); unlock_page(page); } pagevec_release(&pvec); @@ -238,15 +244,10 @@ void truncate_inode_pages_range(struct address_space *mapping, break; lock_page(page); wait_on_page_writeback(page); - if (page_mapped(page)) { - unmap_mapping_range(mapping, - (loff_t)page->index<<PAGE_CACHE_SHIFT, - PAGE_CACHE_SIZE, 0); - } + truncate_inode_page(mapping, page); if (page->index > next) next = page->index; next++; - truncate_complete_page(mapping, page); unlock_page(page); } pagevec_release(&pvec); |