diff options
author | Olof Johansson <olof@lixom.net> | 2012-11-05 10:09:12 -0800 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2012-11-05 10:09:12 -0800 |
commit | 6d06721570aa0c38d886e3036796d59983963a27 (patch) | |
tree | d674cf54e26837bee89095c211ffa362c8546f03 /fs/proc/task_mmu.c | |
parent | 148a8698763130c96004ef419b5f0d44a93d413c (diff) | |
parent | 54ec52b6dd3b0ba4bc4eb97e7e1b2534705b326c (diff) | |
download | blackbird-op-linux-6d06721570aa0c38d886e3036796d59983963a27.tar.gz blackbird-op-linux-6d06721570aa0c38d886e3036796d59983963a27.zip |
Merge branch 'depends/tty' into next/headers
Merging in Greg's tty tree including a cleanup patch needed by the OMAP serial
header cleanups.
* depends/tty: (305 commits)
tty/serial/8250: Make omap hardware workarounds local to 8250.h
serial/8250/8250_early: Prevent rounding error in uartclk
serial: samsung: use clk_prepare_enable and clk_disable_unprepare
TTY: Report warning when low_latency flag is wrongly used
console: use might_sleep in console_lock
TTY: move tty buffers to tty_port
TTY: add port -> tty link
TTY: tty_buffer, cache pointer to tty->buf
TTY: move TTY_FLUSH* flags to tty_port
TTY: n_tty, propagate n_tty_data
TTY: move ldisc data from tty_struct: locks
TTY: move ldisc data from tty_struct: read_* and echo_* and canon_* stuff
TTY: move ldisc data from tty_struct: bitmaps
TTY: move ldisc data from tty_struct: simple members
TTY: n_tty, add ldisc data to n_tty
TTY: audit, stop accessing tty->icount
TTY: n_tty, remove bogus checks
TTY: n_tty, simplify read_buf+echo_buf allocation
TTY: hci_ldisc, remove invalid check in open
TTY: ldisc, wait for idle ldisc in release
...
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r-- | fs/proc/task_mmu.c | 53 |
1 files changed, 50 insertions, 3 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 79827ce03e3b..90c63f9392a5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -90,10 +90,55 @@ static void pad_len_spaces(struct seq_file *m, int len) seq_printf(m, "%*c", len, ' '); } +#ifdef CONFIG_NUMA +/* + * These functions are for numa_maps but called in generic **maps seq_file + * ->start(), ->stop() ops. + * + * numa_maps scans all vmas under mmap_sem and checks their mempolicy. + * Each mempolicy object is controlled by reference counting. The problem here + * is how to avoid accessing dead mempolicy object. + * + * Because we're holding mmap_sem while reading seq_file, it's safe to access + * each vma's mempolicy, no vma objects will never drop refs to mempolicy. + * + * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy + * is set and replaced under mmap_sem but unrefed and cleared under task_lock(). + * So, without task_lock(), we cannot trust get_vma_policy() because we cannot + * gurantee the task never exits under us. But taking task_lock() around + * get_vma_plicy() causes lock order problem. + * + * To access task->mempolicy without lock, we hold a reference count of an + * object pointed by task->mempolicy and remember it. This will guarantee + * that task->mempolicy points to an alive object or NULL in numa_maps accesses. + */ +static void hold_task_mempolicy(struct proc_maps_private *priv) +{ + struct task_struct *task = priv->task; + + task_lock(task); + priv->task_mempolicy = task->mempolicy; + mpol_get(priv->task_mempolicy); + task_unlock(task); +} +static void release_task_mempolicy(struct proc_maps_private *priv) +{ + mpol_put(priv->task_mempolicy); +} +#else +static void hold_task_mempolicy(struct proc_maps_private *priv) +{ +} +static void release_task_mempolicy(struct proc_maps_private *priv) +{ +} +#endif + static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) { if (vma && vma != priv->tail_vma) { struct mm_struct *mm = vma->vm_mm; + release_task_mempolicy(priv); up_read(&mm->mmap_sem); mmput(mm); } @@ -132,7 +177,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) tail_vma = get_gate_vma(priv->task->mm); priv->tail_vma = tail_vma; - + hold_task_mempolicy(priv); /* Start with last addr hint */ vma = find_vma(mm, last_addr); if (last_addr && vma) { @@ -159,6 +204,7 @@ out: if (vma) return vma; + release_task_mempolicy(priv); /* End of vmas has been reached */ m->version = (tail_vma != NULL)? 0: -1UL; up_read(&mm->mmap_sem); @@ -1158,6 +1204,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) struct vm_area_struct *vma = v; struct numa_maps *md = &numa_priv->md; struct file *file = vma->vm_file; + struct task_struct *task = proc_priv->task; struct mm_struct *mm = vma->vm_mm; struct mm_walk walk = {}; struct mempolicy *pol; @@ -1177,7 +1224,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) walk.private = md; walk.mm = mm; - pol = get_vma_policy(proc_priv->task, vma, vma->vm_start); + pol = get_vma_policy(task, vma, vma->vm_start); mpol_to_str(buffer, sizeof(buffer), pol, 0); mpol_cond_put(pol); @@ -1189,7 +1236,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else { - pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid); + pid_t tid = vm_is_stack(task, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or |