diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-12 00:11:49 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-12 00:11:49 +0200 |
commit | 23a0ee908cbfba3264d19729c67c22b20fa73886 (patch) | |
tree | 541103f6283cbac6b82cff88a7b91128acfce046 /kernel/sched.c | |
parent | cc7a486cac78f6fc1a24e8cd63036bae8d2ab431 (diff) | |
parent | 0f2bc27be27ca1dcc66b96131e44bf7648b959c6 (diff) | |
download | talos-obmc-linux-23a0ee908cbfba3264d19729c67c22b20fa73886.tar.gz talos-obmc-linux-23a0ee908cbfba3264d19729c67c22b20fa73886.zip |
Merge branch 'core/locking' into core/urgent
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 04160d277e7a..ace566bdfc68 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -600,7 +600,6 @@ struct rq { /* BKL stats */ unsigned int bkl_count; #endif - struct lock_class_key rq_lock_key; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) } else { if (rq1 < rq2) { spin_lock(&rq1->lock); - spin_lock(&rq2->lock); + spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); } else { spin_lock(&rq2->lock); - spin_lock(&rq1->lock); + spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); } } update_rq_clock(rq1); @@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) if (busiest < this_rq) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); - spin_lock(&this_rq->lock); + spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); ret = 1; } else - spin_lock(&busiest->lock); + spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); } return ret; } +static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) + __releases(busiest->lock) +{ + spin_unlock(&busiest->lock); + lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); +} + /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -3637,7 +3643,7 @@ redo: ld_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, CPU_NEWLY_IDLE, &all_pinned); - spin_unlock(&busiest->lock); + double_unlock_balance(this_rq, busiest); if (unlikely(all_pinned)) { cpu_clear(cpu_of(busiest), *cpus); @@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) else schedstat_inc(sd, alb_failed); } - spin_unlock(&target_rq->lock); + double_unlock_balance(busiest_rq, target_rq); } #ifdef CONFIG_NO_HZ @@ -8000,7 +8006,6 @@ void __init sched_init(void) rq = cpu_rq(i); spin_lock_init(&rq->lock); - lockdep_set_class(&rq->lock, &rq->rq_lock_key); rq->nr_running = 0; init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); |