summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/oom.h5
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/page_alloc.c2
3 files changed, 7 insertions, 9 deletions
diff --git a/include/linux/oom.h b/include/linux/oom.h
index da60007075b5..4cd62677feb9 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -82,6 +82,11 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false;
}
+static inline bool oom_gfp_allowed(gfp_t gfp_mask)
+{
+ return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
+}
+
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
/* sysctls */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 13b9d0f221b8..3427de9897a5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2984,21 +2984,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
struct res_counter *fail_res;
struct mem_cgroup *_memcg;
int ret = 0;
- bool may_oom;
ret = res_counter_charge(&memcg->kmem, size, &fail_res);
if (ret)
return ret;
- /*
- * Conditions under which we can wait for the oom_killer. Those are
- * the same conditions tested by the core page allocator
- */
- may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
-
_memcg = memcg;
ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
- &_memcg, may_oom);
+ &_memcg, oom_gfp_allowed(gfp));
if (ret == -EINTR) {
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3d1d75a6629f..e0412c026e0d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2593,7 +2593,7 @@ rebalance:
* running out of options and have to consider going OOM
*/
if (!did_some_progress) {
- if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+ if (oom_gfp_allowed(gfp_mask)) {
if (oom_killer_disabled)
goto nopage;
/* Coredumps can quickly deplete all memory reserves */
OpenPOWER on IntegriCloud