summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-28 16:11:18 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-28 18:36:43 -0800
commit6a14c5c9da0b4c34b5be783403c54f0396fcfe77 (patch)
tree63e40e5761eb5327ae4f713b284c67128cb4d261 /kernel
parentc81addc9d3a0ebff2155e0cd86f90820ab97147e (diff)
downloadblackbird-op-linux-6a14c5c9da0b4c34b5be783403c54f0396fcfe77.tar.gz
blackbird-op-linux-6a14c5c9da0b4c34b5be783403c54f0396fcfe77.zip
[PATCH] move __exit_signal() to kernel/exit.c
__exit_signal() is private to release_task() now. I think it is better to make it static in kernel/exit.c and export flush_sigqueue() instead - this function is much more simple and straightforward. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c63
-rw-r--r--kernel/signal.c65
2 files changed, 64 insertions, 64 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 77c35efad88c..3823ec89d7b8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,6 +29,7 @@
#include <linux/cpuset.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
+#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
@@ -62,6 +63,68 @@ static void __unhash_process(struct task_struct *p)
remove_parent(p);
}
+/*
+ * This function expects the tasklist_lock write-locked.
+ */
+static void __exit_signal(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+ struct sighand_struct *sighand;
+
+ BUG_ON(!sig);
+ BUG_ON(!atomic_read(&sig->count));
+
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ spin_lock(&sighand->siglock);
+
+ posix_cpu_timers_exit(tsk);
+ if (atomic_dec_and_test(&sig->count))
+ posix_cpu_timers_exit_group(tsk);
+ else {
+ /*
+ * If there is any task waiting for the group exit
+ * then notify it:
+ */
+ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
+ wake_up_process(sig->group_exit_task);
+ sig->group_exit_task = NULL;
+ }
+ if (tsk == sig->curr_target)
+ sig->curr_target = next_thread(tsk);
+ /*
+ * Accumulate here the counters for all threads but the
+ * group leader as they die, so they can be added into
+ * the process-wide totals when those are taken.
+ * The group leader stays around as a zombie as long
+ * as there are other threads. When it gets reaped,
+ * the exit.c code will add its counts into these totals.
+ * We won't ever get here for the group leader, since it
+ * will have been the last reference on the signal_struct.
+ */
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
+ sig->sched_time += tsk->sched_time;
+ sig = NULL; /* Marker for below. */
+ }
+
+ tsk->signal = NULL;
+ cleanup_sighand(tsk);
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+
+ clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
+ flush_sigqueue(&tsk->pending);
+ if (sig) {
+ flush_sigqueue(&sig->shared_pending);
+ __cleanup_signal(sig);
+ }
+}
+
void release_task(struct task_struct * p)
{
int zap_leader;
diff --git a/kernel/signal.c b/kernel/signal.c
index b29c868bd5ee..6ea49f742a2f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,7 +22,6 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
-#include <linux/posix-timers.h>
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/capability.h>
@@ -295,7 +294,7 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
-static void flush_sigqueue(struct sigpending *queue)
+void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
@@ -322,68 +321,6 @@ void flush_signals(struct task_struct *t)
}
/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_signal(struct task_struct *tsk)
-{
- struct signal_struct *sig = tsk->signal;
- struct sighand_struct *sighand;
-
- BUG_ON(!sig);
- BUG_ON(!atomic_read(&sig->count));
-
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- spin_lock(&sighand->siglock);
-
- posix_cpu_timers_exit(tsk);
- if (atomic_dec_and_test(&sig->count))
- posix_cpu_timers_exit_group(tsk);
- else {
- /*
- * If there is any task waiting for the group exit
- * then notify it:
- */
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
- wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
- if (tsk == sig->curr_target)
- sig->curr_target = next_thread(tsk);
- /*
- * Accumulate here the counters for all threads but the
- * group leader as they die, so they can be added into
- * the process-wide totals when those are taken.
- * The group leader stays around as a zombie as long
- * as there are other threads. When it gets reaped,
- * the exit.c code will add its counts into these totals.
- * We won't ever get here for the group leader, since it
- * will have been the last reference on the signal_struct.
- */
- sig->utime = cputime_add(sig->utime, tsk->utime);
- sig->stime = cputime_add(sig->stime, tsk->stime);
- sig->min_flt += tsk->min_flt;
- sig->maj_flt += tsk->maj_flt;
- sig->nvcsw += tsk->nvcsw;
- sig->nivcsw += tsk->nivcsw;
- sig->sched_time += tsk->sched_time;
- sig = NULL; /* Marker for below. */
- }
-
- tsk->signal = NULL;
- cleanup_sighand(tsk);
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
-
- clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
- flush_sigqueue(&tsk->pending);
- if (sig) {
- flush_sigqueue(&sig->shared_pending);
- __cleanup_signal(sig);
- }
-}
-
-/*
* Flush all handlers for a task.
*/
OpenPOWER on IntegriCloud