diff options
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r-- | include/linux/memcontrol.h | 281 |
1 files changed, 169 insertions, 112 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cd0e2413c358..f0c4bec6565b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -28,6 +28,7 @@ #include <linux/eventfd.h> #include <linux/mmzone.h> #include <linux/writeback.h> +#include <linux/page-flags.h> struct mem_cgroup; struct page; @@ -50,6 +51,9 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, + /* default hierarchy stats */ + MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, + MEMCG_NR_STAT, }; struct mem_cgroup_reclaim_cookie { @@ -85,37 +89,13 @@ enum mem_cgroup_events_target { MEM_CGROUP_NTARGETS, }; -/* - * Bits in struct cg_proto.flags - */ -enum cg_proto_flags { - /* Currently active and new sockets should be assigned to cgroups */ - MEMCG_SOCK_ACTIVE, - /* It was ever activated; we must disarm static keys on destruction */ - MEMCG_SOCK_ACTIVATED, -}; +#ifdef CONFIG_MEMCG -struct cg_proto { - struct page_counter memory_allocated; /* Current allocated memory. */ - struct percpu_counter sockets_allocated; /* Current number of sockets. */ - int memory_pressure; - long sysctl_mem[3]; - unsigned long flags; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; -}; +#define MEM_CGROUP_ID_SHIFT 16 +#define MEM_CGROUP_ID_MAX USHRT_MAX -#ifdef CONFIG_MEMCG struct mem_cgroup_stat_cpu { - long count[MEM_CGROUP_STAT_NSTATS]; + long count[MEMCG_NR_STAT]; unsigned long events[MEMCG_NR_EVENTS]; unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; @@ -174,6 +154,12 @@ struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *spare; }; +enum memcg_kmem_state { + KMEM_NONE, + KMEM_ALLOCATED, + KMEM_ONLINE, +}; + /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -185,21 +171,25 @@ struct mem_cgroup { /* Accounted resources */ struct page_counter memory; + struct page_counter swap; + + /* Legacy consumer-oriented counters */ struct page_counter memsw; struct page_counter kmem; + struct page_counter tcpmem; /* Normal memory consumption range */ unsigned long low; unsigned long high; + /* Range enforcement for interrupt charges */ + struct work_struct high_work; + unsigned long soft_limit; /* vmpressure notifications */ struct vmpressure vmpressure; - /* css_online() has been completed */ - int initialized; - /* * Should the accounting and control be hierarchical, per subtree? */ @@ -246,14 +236,16 @@ struct mem_cgroup { */ struct mem_cgroup_stat_cpu __percpu *stat; -#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) - struct cg_proto tcp_mem; -#endif -#if defined(CONFIG_MEMCG_KMEM) + unsigned long socket_pressure; + + /* Legacy tcp memory accounting */ + bool tcpmem_active; + int tcpmem_pressure; + +#ifndef CONFIG_SLOB /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; - bool kmem_acct_activated; - bool kmem_acct_active; + enum memcg_kmem_state kmem_state; #endif int last_scanned_node; @@ -275,7 +267,13 @@ struct mem_cgroup { struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; -extern struct cgroup_subsys_state *mem_cgroup_root_css; + +extern struct mem_cgroup *root_mem_cgroup; + +static inline bool mem_cgroup_disabled(void) +{ + return !cgroup_subsys_enabled(memory_cgrp_subsys); +} /** * mem_cgroup_events - count memory events against a cgroup @@ -294,32 +292,72 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcgp); + gfp_t gfp_mask, struct mem_cgroup **memcgp, + bool compound); void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - bool lrucare); -void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); + bool lrucare, bool compound); +void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, + bool compound); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); -void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); +void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); -struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } +#define mem_cgroup_from_counter(counter, member) \ + container_of(counter, struct mem_cgroup, member) + struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return 0; + + return memcg->css.id; +} + +/** + * mem_cgroup_from_id - look up a memcg from an id + * @id: the id to look up + * + * Caller must hold rcu_read_lock() and use css_tryget() as necessary. + */ +static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) +{ + struct cgroup_subsys_state *css; + + css = css_from_id(id, &memory_cgrp_subsys); + return mem_cgroup_from_css(css); +} + +/** + * parent_mem_cgroup - find the accounting parent of a memcg + * @memcg: memcg whose parent to find + * + * Returns the parent memcg, or NULL if this is the root or the memory + * controller is in legacy no-hierarchy mode. + */ +static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +{ + if (!memcg->memory.parent) + return NULL; + return mem_cgroup_from_counter(memcg->memory.parent, memory); +} + static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) { @@ -347,9 +385,11 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); ino_t page_cgroup_ino(struct page *page); -static inline bool mem_cgroup_disabled(void) +static inline bool mem_cgroup_online(struct mem_cgroup *memcg) { - return !cgroup_subsys_enabled(memory_cgrp_subsys); + if (mem_cgroup_disabled()) + return true; + return !!(memcg->css.flags & CSS_ONLINE); } /* @@ -360,20 +400,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int nr_pages); -static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) -{ - struct mem_cgroup_per_zone *mz; - struct mem_cgroup *memcg; - - if (mem_cgroup_disabled()) - return true; - - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); - memcg = mz->memcg; - - return !!(memcg->css.flags & CSS_ONLINE); -} - static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { @@ -430,36 +456,43 @@ bool mem_cgroup_oom_synchronize(bool wait); extern int do_swap_account; #endif -struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); +void lock_page_memcg(struct page *page); +void unlock_page_memcg(struct page *page); /** * mem_cgroup_update_page_stat - update page state statistics - * @memcg: memcg to account against + * @page: the page * @idx: page state item to account * @val: number of pages (positive or negative) * - * See mem_cgroup_begin_page_stat() for locking requirements. + * The @page must be locked or the caller must use lock_page_memcg() + * to prevent double accounting when the page is concurrently being + * moved to another memcg: + * + * lock_page(page) or lock_page_memcg(page) + * if (TestClearPageState(page)) + * mem_cgroup_update_page_stat(page, state, -1); + * unlock_page(page) or unlock_page_memcg(page) */ -static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int val) { - VM_BUG_ON(!rcu_read_lock_held()); + VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); - if (memcg) - this_cpu_add(memcg->stat->count[idx], val); + if (page->mem_cgroup) + this_cpu_add(page->mem_cgroup->stat->count[idx], val); } -static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_inc_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { - mem_cgroup_update_page_stat(memcg, idx, 1); + mem_cgroup_update_page_stat(page, idx, 1); } -static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_dec_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { - mem_cgroup_update_page_stat(memcg, idx, -1); + mem_cgroup_update_page_stat(page, idx, -1); } unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, @@ -497,8 +530,17 @@ void mem_cgroup_split_huge_fixup(struct page *head); #endif #else /* CONFIG_MEMCG */ + +#define MEM_CGROUP_ID_SHIFT 0 +#define MEM_CGROUP_ID_MAX 0 + struct mem_cgroup; +static inline bool mem_cgroup_disabled(void) +{ + return true; +} + static inline void mem_cgroup_events(struct mem_cgroup *memcg, enum mem_cgroup_events_index idx, unsigned int nr) @@ -513,7 +555,8 @@ static inline bool mem_cgroup_low(struct mem_cgroup *root, static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, - struct mem_cgroup **memcgp) + struct mem_cgroup **memcgp, + bool compound) { *memcgp = NULL; return 0; @@ -521,12 +564,13 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, static inline void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - bool lrucare) + bool lrucare, bool compound) { } static inline void mem_cgroup_cancel_charge(struct page *page, - struct mem_cgroup *memcg) + struct mem_cgroup *memcg, + bool compound) { } @@ -538,7 +582,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } -static inline void mem_cgroup_replace_page(struct page *old, struct page *new) +static inline void mem_cgroup_migrate(struct page *old, struct page *new) { } @@ -579,18 +623,25 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root, { } -static inline bool mem_cgroup_disabled(void) +static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) { - return true; + return 0; } -static inline bool -mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) +static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) +{ + WARN_ON_ONCE(id); + /* XXX: This should always return root_mem_cgroup */ + return NULL; +} + +static inline bool mem_cgroup_online(struct mem_cgroup *memcg) { return true; } -static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) +static inline bool +mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { return true; } @@ -612,12 +663,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } -static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) +static inline void lock_page_memcg(struct page *page) { - return NULL; } -static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) +static inline void unlock_page_memcg(struct page *page) { } @@ -643,12 +693,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait) return false; } -static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_inc_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { } -static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_dec_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { } @@ -671,12 +721,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -enum { - UNDER_LIMIT, - SOFT_LIMIT, - OVER_LIMIT, -}; - #ifdef CONFIG_CGROUP_WRITEBACK struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); @@ -703,20 +747,33 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) void sock_update_memcg(struct sock *sk); void sock_release_memcg(struct sock *sk); -#else -static inline void sock_update_memcg(struct sock *sk) +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +#ifdef CONFIG_MEMCG +extern struct static_key_false memcg_sockets_enabled_key; +#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) + return true; + do { + if (time_before(jiffies, memcg->socket_pressure)) + return true; + } while ((memcg = parent_mem_cgroup(memcg))); + return false; } -static inline void sock_release_memcg(struct sock *sk) +#else +#define mem_cgroup_sockets_enabled 0 +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { + return false; } -#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ +#endif -#ifdef CONFIG_MEMCG_KMEM -extern struct static_key memcg_kmem_enabled_key; +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) +extern struct static_key_false memcg_kmem_enabled_key; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); @@ -732,12 +789,12 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_key_false(&memcg_kmem_enabled_key); + return static_branch_unlikely(&memcg_kmem_enabled_key); } -static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) +static inline bool memcg_kmem_online(struct mem_cgroup *memcg) { - return memcg->kmem_acct_active; + return memcg->kmem_state == KMEM_ONLINE; } /* @@ -757,7 +814,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge(struct page *page, int order); /* - * helper for acessing a memcg's index. It will be used as an index in the + * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function * will return -1 when this is not a kmem-limited memcg. */ @@ -766,15 +823,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); +struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); void __memcg_kmem_put_cache(struct kmem_cache *cachep); -static inline bool __memcg_kmem_bypass(gfp_t gfp) +static inline bool __memcg_kmem_bypass(void) { if (!memcg_kmem_enabled()) return true; - if (gfp & __GFP_NOACCOUNT) - return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; return false; @@ -791,7 +846,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp) static __always_inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { - if (__memcg_kmem_bypass(gfp)) + if (__memcg_kmem_bypass()) + return 0; + if (!(gfp & __GFP_ACCOUNT)) return 0; return __memcg_kmem_charge(page, gfp, order); } @@ -810,16 +867,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order) /** * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation * @cachep: the original global kmem cache - * @gfp: allocation flags. * * All memory allocated from a per-memcg cache is charged to the owner memcg. */ static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (__memcg_kmem_bypass(gfp)) + if (__memcg_kmem_bypass()) return cachep; - return __memcg_kmem_get_cache(cachep); + return __memcg_kmem_get_cache(cachep, gfp); } static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) @@ -836,7 +892,7 @@ static inline bool memcg_kmem_enabled(void) return false; } -static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) +static inline bool memcg_kmem_online(struct mem_cgroup *memcg) { return false; } @@ -872,5 +928,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { } -#endif /* CONFIG_MEMCG_KMEM */ +#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ + #endif /* _LINUX_MEMCONTROL_H */ |