diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:18 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:18 +0200 |
commit | 178be793485d70d871a0fd46b29e9e3e7da636ad (patch) | |
tree | d7542c2e06e649197d4914e7bfe0ad31e072d58c /kernel/sched.c | |
parent | 1666703af948ae87c87c2bc7121aa34271cc52ab (diff) | |
download | blackbird-op-linux-178be793485d70d871a0fd46b29e9e3e7da636ad.tar.gz blackbird-op-linux-178be793485d70d871a0fd46b29e9e3e7da636ad.zip |
sched: do not normalize kernel threads via SysRq-N
do not normalize kernel threads via SysRq-N: the migration threads,
softlockup threads, etc. might be essential for the system to
function properly. So only zap user tasks.
pointed out by Andi Kleen.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fc61b1fc67d5..791dd08c692f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -365,15 +365,6 @@ static inline int cpu_of(struct rq *rq) #endif } -static inline int is_migration_thread(struct task_struct *p, struct rq *rq) -{ -#ifdef CONFIG_SMP - return p == rq->migration_thread; -#else - return 0; -#endif -} - /* * Update the per-runqueue clock, as finegrained as the platform can give * us, but without assuming monotonicity, etc.: @@ -6563,6 +6554,12 @@ void normalize_rt_tasks(void) read_lock_irq(&tasklist_lock); do_each_thread(g, p) { + /* + * Only normalize user tasks: + */ + if (!p->mm) + continue; + p->se.exec_start = 0; #ifdef CONFIG_SCHEDSTATS p->se.wait_start = 0; @@ -6584,8 +6581,7 @@ void normalize_rt_tasks(void) spin_lock_irqsave(&p->pi_lock, flags); rq = __task_rq_lock(p); - if (!is_migration_thread(p, rq)) - normalize_task(rq, p); + normalize_task(rq, p); __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); |