diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-25 12:47:56 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-25 12:47:56 -0700 |
commit | 0b79dada976198cb1a4c043068e3b44d5cab2a5a (patch) | |
tree | fde938c698545a7c8764a2eac4304dee002bb281 | |
parent | 50be4917ee70218f59e04dec029121b97fb9cb3d (diff) | |
parent | 3f5087a2bae5d1ce10a3d698dec8f879a96f5419 (diff) | |
download | talos-op-linux-0b79dada976198cb1a4c043068e3b44d5cab2a5a.tar.gz talos-op-linux-0b79dada976198cb1a4c043068e3b44d5cab2a5a.zip |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes:
sched: fix share (re)distribution
softlockup: fix NOHZ wakeup
seqlock: livelock fix
-rw-r--r-- | include/linux/seqlock.h | 46 | ||||
-rw-r--r-- | kernel/sched.c | 47 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 1 |
3 files changed, 32 insertions, 62 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 26e4925bc35b..632205ccc25d 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -85,23 +85,29 @@ static inline int write_tryseqlock(seqlock_t *sl) /* Start of read calculation -- fetch last complete writer token */ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) { - unsigned ret = sl->sequence; + unsigned ret; + +repeat: + ret = sl->sequence; smp_rmb(); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + return ret; } -/* Test if reader processed invalid data. - * If initial values is odd, - * then writer had already started when section was entered - * If sequence value changed - * then writer changed data while in section - * - * Using xor saves one conditional branch. +/* + * Test if reader processed invalid data. + * + * If sequence value changed then writer changed data while in section. */ -static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv) +static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) { smp_rmb(); - return (iv & 1) | (sl->sequence ^ iv); + + return (sl->sequence != start); } @@ -122,20 +128,26 @@ typedef struct seqcount { /* Start of read using pointer to a sequence counter only. */ static inline unsigned read_seqcount_begin(const seqcount_t *s) { - unsigned ret = s->sequence; + unsigned ret; + +repeat: + ret = s->sequence; smp_rmb(); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } return ret; } -/* Test if reader processed invalid data. - * Equivalent to: iv is odd or sequence number has changed. - * (iv & 1) || (*s != iv) - * Using xor saves one conditional branch. +/* + * Test if reader processed invalid data because sequence number has changed. */ -static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv) +static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return (iv & 1) | (s->sequence ^ iv); + + return s->sequence != start; } diff --git a/kernel/sched.c b/kernel/sched.c index 09ca69b2c17d..9e19287f3359 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1657,42 +1657,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) } /* - * Redistribute tg->shares amongst all tg->cfs_rq[]s. - */ -static void __aggregate_redistribute_shares(struct task_group *tg) -{ - int i, max_cpu = smp_processor_id(); - unsigned long rq_weight = 0; - unsigned long shares, max_shares = 0, shares_rem = tg->shares; - - for_each_possible_cpu(i) - rq_weight += tg->cfs_rq[i]->load.weight; - - for_each_possible_cpu(i) { - /* - * divide shares proportional to the rq_weights. - */ - shares = tg->shares * tg->cfs_rq[i]->load.weight; - shares /= rq_weight + 1; - - tg->cfs_rq[i]->shares = shares; - - if (shares > max_shares) { - max_shares = shares; - max_cpu = i; - } - shares_rem -= shares; - } - - /* - * Ensure it all adds up to tg->shares; we can loose a few - * due to rounding down when computing the per-cpu shares. - */ - if (shares_rem) - tg->cfs_rq[max_cpu]->shares += shares_rem; -} - -/* * Compute the weight of this group on the given cpus. */ static @@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) unsigned long shares = 0; int i; -again: for_each_cpu_mask(i, sd->span) shares += tg->cfs_rq[i]->shares; - /* - * When the span doesn't have any shares assigned, but does have - * tasks to run do a machine wide rebalance (should be rare). - */ - if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) { - __aggregate_redistribute_shares(tg); - goto again; - } + if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares) + shares = tg->shares; aggregate(tg, sd)->shares = shares; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d358d4e3a958..b854a895591e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void) sub_preempt_count(HARDIRQ_OFFSET); } + touch_softlockup_watchdog(); /* * Cancel the scheduled timer and restore the tick */ |