summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-28 16:11:13 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-28 18:36:42 -0800
commitf63ee72e0fb82e504a0489490babc7612c7cd6c2 (patch)
tree85167f8016d1f746135bf1777646a58c44376af1
parentaa1757f90bea3f598b6e5d04d922a6a60200f1da (diff)
downloadblackbird-op-linux-f63ee72e0fb82e504a0489490babc7612c7cd6c2.tar.gz
blackbird-op-linux-f63ee72e0fb82e504a0489490babc7612c7cd6c2.zip
[PATCH] introduce lock_task_sighand() helper
Add lock_task_sighand() helper and converts group_send_sig_info() to use it. Hopefully we will have more users soon. This patch also removes '!sighand->count' and '!p->usage' checks, I think they both are bogus, racy and unneeded (but probably it makes sense to restore them as BUG_ON()s). ->sighand is cleared and it's ->count is decremented in release_task() with sighand->siglock held, so it is a bug to have '!p->usage || !->count' after we already locked and verified it is the same. On the other hand, an already dead task without ->sighand can have a non-zero ->usage due to ptrace, for example. If we read the stale value of ->sighand we must see the change after spin_lock(), because that change was done while holding that same old ->sighand.siglock. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/sched.h9
-rw-r--r--kernel/signal.c38
2 files changed, 33 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bbcfc873bd98..ca1fd31aae97 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1225,6 +1225,15 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags);
+
+static inline void unlock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
+}
+
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) (task)->thread_info
diff --git a/kernel/signal.c b/kernel/signal.c
index b0b1ca9daa33..819fa49aa70a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1120,27 +1120,37 @@ void zap_other_threads(struct task_struct *p)
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+{
+ struct sighand_struct *sighand;
+
+ for (;;) {
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL))
+ break;
+
+ spin_lock_irqsave(&sighand->siglock, *flags);
+ if (likely(sighand == tsk->sighand))
+ break;
+ spin_unlock_irqrestore(&sighand->siglock, *flags);
+ }
+
+ return sighand;
+}
+
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
- struct sighand_struct *sp;
int ret;
-retry:
ret = check_kill_permission(sig, info, p);
- if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
- spin_lock_irqsave(&sp->siglock, flags);
- if (p->sighand != sp) {
- spin_unlock_irqrestore(&sp->siglock, flags);
- goto retry;
- }
- if ((atomic_read(&sp->count) == 0) ||
- (atomic_read(&p->usage) == 0)) {
- spin_unlock_irqrestore(&sp->siglock, flags);
- return -ESRCH;
+
+ if (!ret && sig) {
+ ret = -ESRCH;
+ if (lock_task_sighand(p, &flags)) {
+ ret = __group_send_sig_info(sig, info, p);
+ unlock_task_sighand(p, &flags);
}
- ret = __group_send_sig_info(sig, info, p);
- spin_unlock_irqrestore(&sp->siglock, flags);
}
return ret;
OpenPOWER on IntegriCloud