summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-05-27 14:27:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-27 14:49:37 -0700
commite2fe14564d3316d1625ed20bf1083995f4960893 (patch)
tree6839d455f54a74288b13b1d95c3f39594a6e1638 /mm
parentf65e91df25aa426289cbcb580ca3183e24979fb1 (diff)
downloadtalos-obmc-linux-e2fe14564d3316d1625ed20bf1083995f4960893.tar.gz
talos-obmc-linux-e2fe14564d3316d1625ed20bf1083995f4960893.zip
oom_reaper: close race with exiting task
Tetsuo has reported: Out of memory: Kill process 443 (oleg's-test) score 855 or sacrifice child Killed process 443 (oleg's-test) total-vm:493248kB, anon-rss:423880kB, file-rss:4kB, shmem-rss:0kB sh invoked oom-killer: gfp_mask=0x24201ca(GFP_HIGHUSER_MOVABLE|__GFP_COLD), order=0, oom_score_adj=0 sh cpuset=/ mems_allowed=0 CPU: 2 PID: 1 Comm: sh Not tainted 4.6.0-rc7+ #51 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/31/2013 Call Trace: dump_stack+0x85/0xc8 dump_header+0x5b/0x394 oom_reaper: reaped process 443 (oleg's-test), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB In other words: __oom_reap_task exit_mm atomic_inc_not_zero tsk->mm = NULL mmput atomic_dec_and_test # > 0 exit_oom_victim # New victim will be # selected <OOM killer invoked> # no TIF_MEMDIE task so we can select a new one unmap_page_range # to release the memory The race exists even without the oom_reaper because anybody who pins the address space and gets preempted might race with exit_mm but oom_reaper made this race more probable. We can address the oom_reaper part by using oom_lock for __oom_reap_task because this would guarantee that a new oom victim will not be selected if the oom reaper might race with the exit path. This doesn't solve the original issue, though, because somebody else still might be pinning mm_users and so __mmput won't be called to release the memory but that is not really realiably solvable because the task will get away from the oom sight as soon as it is unhashed from the task_list and so we cannot guarantee a new victim won't be selected. [akpm@linux-foundation.org: fix use of unused `mm', Per Stephen] [akpm@linux-foundation.org: coding-style fixes] Fixes: aac453635549 ("mm, oom: introduce oom reaper") Link: http://lkml.kernel.org/r/1464271493-20008-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/oom_kill.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 326dd14938f0..dfb1ab61fb23 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -443,13 +443,29 @@ static bool __oom_reap_task(struct task_struct *tsk)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
- struct mm_struct *mm;
+ struct mm_struct *mm = NULL;
struct task_struct *p;
struct zap_details details = {.check_swap_entries = true,
.ignore_dirty = true};
bool ret = true;
/*
+ * We have to make sure to not race with the victim exit path
+ * and cause premature new oom victim selection:
+ * __oom_reap_task exit_mm
+ * atomic_inc_not_zero
+ * mmput
+ * atomic_dec_and_test
+ * exit_oom_victim
+ * [...]
+ * out_of_memory
+ * select_bad_process
+ * # no TIF_MEMDIE task selects new victim
+ * unmap_page_range # frees some memory
+ */
+ mutex_lock(&oom_lock);
+
+ /*
* Make sure we find the associated mm_struct even when the particular
* thread has already terminated and cleared its mm.
* We might have race with exit path so consider our work done if there
@@ -457,19 +473,19 @@ static bool __oom_reap_task(struct task_struct *tsk)
*/
p = find_lock_task_mm(tsk);
if (!p)
- return true;
+ goto unlock_oom;
mm = p->mm;
if (!atomic_inc_not_zero(&mm->mm_users)) {
task_unlock(p);
- return true;
+ goto unlock_oom;
}
task_unlock(p);
if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
- goto out;
+ goto unlock_oom;
}
tlb_gather_mmu(&tlb, mm, 0, -1);
@@ -511,13 +527,15 @@ static bool __oom_reap_task(struct task_struct *tsk)
* to release its memory.
*/
set_bit(MMF_OOM_REAPED, &mm->flags);
-out:
+unlock_oom:
+ mutex_unlock(&oom_lock);
/*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
* put the oom_reaper out of the way.
*/
- mmput_async(mm);
+ if (mm)
+ mmput_async(mm);
return ret;
}
OpenPOWER on IntegriCloud