summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/exec.c30
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/signal.c35
4 files changed, 37 insertions, 32 deletions
diff --git a/fs/exec.c b/fs/exec.c
index a5c51646d1ad..b58ba7d127e0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1368,12 +1368,14 @@ static void format_corename(char *corename, const char *pattern, long signr)
*out_ptr = 0;
}
-static void zap_process(struct task_struct *start, int *ptraced)
+static void zap_process(struct task_struct *start)
{
struct task_struct *t;
unsigned long flags;
spin_lock_irqsave(&start->sighand->siglock, flags);
+ start->signal->flags = SIGNAL_GROUP_EXIT;
+ start->signal->group_stop_count = 0;
t = start;
do {
@@ -1381,22 +1383,17 @@ static void zap_process(struct task_struct *start, int *ptraced)
t->mm->core_waiters++;
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
-
- if (unlikely(t->ptrace) &&
- unlikely(t->parent->mm == t->mm))
- *ptraced = 1;
}
} while ((t = next_thread(t)) != start);
spin_unlock_irqrestore(&start->sighand->siglock, flags);
}
-static void zap_threads (struct mm_struct *mm)
+static void zap_threads(struct mm_struct *mm)
{
struct task_struct *g, *p;
struct task_struct *tsk = current;
struct completion *vfork_done = tsk->vfork_done;
- int traced = 0;
/*
* Make sure nobody is waiting for us to release the VM,
@@ -1413,29 +1410,12 @@ static void zap_threads (struct mm_struct *mm)
do {
if (p->mm) {
if (p->mm == mm)
- zap_process(p, &traced);
+ zap_process(p);
break;
}
} while ((p = next_thread(p)) != g);
}
read_unlock(&tasklist_lock);
-
- if (unlikely(traced)) {
- /*
- * We are zapping a thread and the thread it ptraces.
- * If the tracee went into a ptrace stop for exit tracing,
- * we could deadlock since the tracer is waiting for this
- * coredump to finish. Detach them so they can both die.
- */
- write_lock_irq(&tasklist_lock);
- do_each_thread(g,p) {
- if (mm == p->mm && p != tsk &&
- p->ptrace && p->parent->mm == mm) {
- __ptrace_detach(p, 0);
- }
- } while_each_thread(g,p);
- write_unlock_irq(&tasklist_lock);
- }
}
static void coredump_wait(struct mm_struct *mm)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ee918bc6e18c..8b2749a259dc 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern int ptrace_attach(struct task_struct *tsk);
extern int ptrace_detach(struct task_struct *, unsigned int);
-extern void __ptrace_detach(struct task_struct *, unsigned int);
extern void ptrace_disable(struct task_struct *);
extern int ptrace_check_attach(struct task_struct *task, int kill);
extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6252d2fa2bf3..335c5b932e14 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -214,7 +214,7 @@ out:
return retval;
}
-void __ptrace_detach(struct task_struct *child, unsigned int data)
+static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
{
child->exit_code = data;
/* .. re-parent .. */
@@ -233,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
ptrace_disable(child);
write_lock_irq(&tasklist_lock);
+ /* protect against de_thread()->release_task() */
if (child->ptrace)
__ptrace_detach(child, data);
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/signal.c b/kernel/signal.c
index 1b3c921737e2..52adf53929f6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+static inline int may_ptrace_stop(void)
+{
+ if (!likely(current->ptrace & PT_PTRACED))
+ return 0;
+
+ if (unlikely(current->parent == current->real_parent &&
+ (current->ptrace & PT_ATTACHED)))
+ return 0;
+
+ if (unlikely(current->signal == current->parent->signal) &&
+ unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
+ return 0;
+
+ /*
+ * Are we in the middle of do_coredump?
+ * If so and our tracer is also part of the coredump stopping
+ * is a deadlock situation, and pointless because our tracer
+ * is dead so don't allow us to stop.
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_waiters != 0. Otherwise it
+ * is safe to enter schedule().
+ */
+ if (unlikely(current->mm->core_waiters) &&
+ unlikely(current->mm == current->parent->mm))
+ return 0;
+
+ return 1;
+}
+
/*
* This must be called with current->sighand->siglock held.
*
@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
spin_unlock_irq(&current->sighand->siglock);
try_to_freeze();
read_lock(&tasklist_lock);
- if (likely(current->ptrace & PT_PTRACED) &&
- likely(current->parent != current->real_parent ||
- !(current->ptrace & PT_ATTACHED)) &&
- (likely(current->parent->signal != current->signal) ||
- !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
+ if (may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
OpenPOWER on IntegriCloud