diff options
-rw-r--r-- | include/linux/ptrace.h | 35 | ||||
-rw-r--r-- | kernel/signal.c | 33 |
2 files changed, 67 insertions, 1 deletions
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 515bff053de8..6ab80714a916 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -204,6 +204,41 @@ static inline void user_enable_block_step(struct task_struct *task) } #endif /* arch_has_block_step */ +#ifndef arch_ptrace_stop_needed +/** + * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called + * @code: current->exit_code value ptrace will stop with + * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with + * + * This is called with the siglock held, to decide whether or not it's + * necessary to release the siglock and call arch_ptrace_stop() with the + * same @code and @info arguments. It can be defined to a constant if + * arch_ptrace_stop() is never required, or always is. On machines where + * this makes sense, it should be defined to a quick test to optimize out + * calling arch_ptrace_stop() when it would be superfluous. For example, + * if the thread has not been back to user mode since the last stop, the + * thread state might indicate that nothing needs to be done. + */ +#define arch_ptrace_stop_needed(code, info) (0) +#endif + +#ifndef arch_ptrace_stop +/** + * arch_ptrace_stop - Do machine-specific work before stopping for ptrace + * @code: current->exit_code value ptrace will stop with + * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with + * + * This is called with no locks held when arch_ptrace_stop_needed() has + * just returned nonzero. It is allowed to block, e.g. for user memory + * access. The arch can have machine-specific work to be done before + * ptrace stops. On ia64, register backing store gets written back to user + * memory here. Since this can be costly (requires dropping the siglock), + * we only do it when the arch requires it for this particular stop, as + * indicated by arch_ptrace_stop_needed(). + */ +#define arch_ptrace_stop(code, info) do { } while (0) +#endif + #endif #endif diff --git a/kernel/signal.c b/kernel/signal.c index e46971560fcb..5d30ff561847 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1578,6 +1578,17 @@ static inline int may_ptrace_stop(void) } /* + * Return nonzero if there is a SIGKILL that should be waking us up. + * Called with the siglock held. + */ +static int sigkill_pending(struct task_struct *tsk) +{ + return ((sigismember(&tsk->pending.signal, SIGKILL) || + sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && + !unlikely(sigismember(&tsk->blocked, SIGKILL))); +} + +/* * This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. @@ -1590,6 +1601,26 @@ static inline int may_ptrace_stop(void) */ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) { + int killed = 0; + + if (arch_ptrace_stop_needed(exit_code, info)) { + /* + * The arch code has something special to do before a + * ptrace stop. This is allowed to block, e.g. for faults + * on user stack pages. We can't keep the siglock while + * calling arch_ptrace_stop, so we must release it now. + * To preserve proper semantics, we must do this before + * any signal bookkeeping like checking group_stop_count. + * Meanwhile, a SIGKILL could come in before we retake the + * siglock. That must prevent us from sleeping in TASK_TRACED. + * So after regaining the lock, we must check for SIGKILL. + */ + spin_unlock_irq(¤t->sighand->siglock); + arch_ptrace_stop(exit_code, info); + spin_lock_irq(¤t->sighand->siglock); + killed = sigkill_pending(current); + } + /* * If there is a group stop in progress, * we must participate in the bookkeeping. @@ -1605,7 +1636,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) spin_unlock_irq(¤t->sighand->siglock); try_to_freeze(); read_lock(&tasklist_lock); - if (may_ptrace_stop()) { + if (!unlikely(killed) && may_ptrace_stop()) { do_notify_parent_cldstop(current, CLD_TRAPPED); read_unlock(&tasklist_lock); schedule(); |