summaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h105
1 files changed, 21 insertions, 84 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..b3e7a667e03c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
-enum mem_cgroup_filter_t {
- VISIT, /* visit current node */
- SKIP, /* skip the current node and continue traversal */
- SKIP_TREE, /* skip the whole subtree and continue traversal */
-};
-
-/*
- * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
- * iterate through the hierarchy tree. Each tree element is checked by the
- * predicate before it is returned by the iterator. If a filter returns
- * SKIP or SKIP_TREE then the iterator code continues traversal (with the
- * next node down the hierarchy or the next node that doesn't belong under the
- * memcg's subtree).
- */
-typedef enum mem_cgroup_filter_t
-(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
-
#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim,
- mem_cgroup_iter_filter cond);
-
-static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim)
-{
- return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
-}
-
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
@@ -163,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);
-/**
- * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
- * @new: true to enable, false to disable
- *
- * Toggle whether a failed memcg charge should invoke the OOM killer
- * or just return -ENOMEM. Returns the previous toggle state.
- *
- * NOTE: Any path that enables the OOM killer before charging must
- * call mem_cgroup_oom_synchronize() afterward to finalize the
- * OOM handling and clean up.
- */
-static inline bool mem_cgroup_toggle_oom(bool new)
+static inline void mem_cgroup_oom_enable(void)
{
- bool old;
-
- old = current->memcg_oom.may_oom;
- current->memcg_oom.may_oom = new;
-
- return old;
+ WARN_ON(current->memcg_oom.may_oom);
+ current->memcg_oom.may_oom = 1;
}
-static inline void mem_cgroup_enable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
{
- bool old = mem_cgroup_toggle_oom(true);
-
- WARN_ON(old == true);
-}
-
-static inline void mem_cgroup_disable_oom(void)
-{
- bool old = mem_cgroup_toggle_oom(false);
-
- WARN_ON(old == false);
+ WARN_ON(!current->memcg_oom.may_oom);
+ current->memcg_oom.may_oom = 0;
}
static inline bool task_in_memcg_oom(struct task_struct *p)
{
- return p->memcg_oom.in_memcg_oom;
+ return p->memcg_oom.memcg;
}
-bool mem_cgroup_oom_synchronize(void);
+bool mem_cgroup_oom_synchronize(bool wait);
#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
@@ -260,9 +211,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
mem_cgroup_update_page_stat(page, idx, -1);
}
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
- struct mem_cgroup *root);
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +327,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
-static inline struct mem_cgroup *
-mem_cgroup_iter_cond(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim,
- mem_cgroup_iter_filter cond)
-{
- /* first call must return non-NULL, second return NULL */
- return (struct mem_cgroup *)(unsigned long)!prev;
-}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
@@ -437,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
{
}
-static inline bool mem_cgroup_toggle_oom(bool new)
+static inline void mem_cgroup_oom_enable(void)
{
- return false;
}
-static inline void mem_cgroup_enable_oom(void)
-{
-}
-
-static inline void mem_cgroup_disable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
{
}
@@ -455,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
return false;
}
-static inline bool mem_cgroup_oom_synchronize(void)
+static inline bool mem_cgroup_oom_synchronize(bool wait)
{
return false;
}
@@ -471,11 +408,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
}
static inline
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
- struct mem_cgroup *root)
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
{
- return VISIT;
+ return 0;
}
static inline void mem_cgroup_split_huge_fixup(struct page *head)
OpenPOWER on IntegriCloud