summaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h90
1 files changed, 64 insertions, 26 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a39b38ccdc97..5da31c12101c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -143,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page)
return 1;
}
+/*
+ * Same as above, but add instead of inc (could just be merged)
+ */
+static inline int page_cache_add_speculative(struct page *page, int count)
+{
+ VM_BUG_ON(in_interrupt());
+
+#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+# ifdef CONFIG_PREEMPT
+ VM_BUG_ON(!in_atomic());
+# endif
+ VM_BUG_ON(page_count(page) == 0);
+ atomic_add(count, &page->_count);
+
+#else
+ if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
+ return 0;
+#endif
+ VM_BUG_ON(PageCompound(page) && page != compound_head(page));
+
+ return 1;
+}
+
static inline int page_freeze_refs(struct page *page, int count)
{
return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
@@ -227,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, filler, data);
}
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-extern void remove_from_page_cache(struct page *page);
-extern void __remove_from_page_cache(struct page *page);
-
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run SetPageLocked() against it.
- */
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
-{
- int error;
-
- SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- ClearPageLocked(page);
- return error;
-}
-
/*
* Return byte-offset into filesystem object for page.
*/
@@ -271,13 +271,28 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);
+static inline void set_page_locked(struct page *page)
+{
+ set_bit(PG_locked, &page->flags);
+}
+
+static inline void clear_page_locked(struct page *page)
+{
+ clear_bit(PG_locked, &page->flags);
+}
+
+static inline int trylock_page(struct page *page)
+{
+ return !test_and_set_bit(PG_locked, &page->flags);
+}
+
/*
* lock_page may only be called if we have the page's inode pinned.
*/
static inline void lock_page(struct page *page)
{
might_sleep();
- if (TestSetPageLocked(page))
+ if (!trylock_page(page))
__lock_page(page);
}
@@ -289,7 +304,7 @@ static inline void lock_page(struct page *page)
static inline int lock_page_killable(struct page *page)
{
might_sleep();
- if (TestSetPageLocked(page))
+ if (!trylock_page(page))
return __lock_page_killable(page);
return 0;
}
@@ -301,7 +316,7 @@ static inline int lock_page_killable(struct page *page)
static inline void lock_page_nosync(struct page *page)
{
might_sleep();
- if (TestSetPageLocked(page))
+ if (!trylock_page(page))
__lock_page_nosync(page);
}
@@ -386,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
return ret;
}
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern void remove_from_page_cache(struct page *page);
+extern void __remove_from_page_cache(struct page *page);
+
+/*
+ * Like add_to_page_cache_locked, but used to add newly allocated pages:
+ * the page is new, so we can just run set_page_locked() against it.
+ */
+static inline int add_to_page_cache(struct page *page,
+ struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+{
+ int error;
+
+ set_page_locked(page);
+ error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
+ if (unlikely(error))
+ clear_page_locked(page);
+ return error;
+}
+
#endif /* _LINUX_PAGEMAP_H */
OpenPOWER on IntegriCloud