summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-23 21:38:39 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-23 21:38:39 -0700
commitd0797b39dcd70fe366b114515cb898ac6fecdd99 (patch)
tree1716f05d10cfe5a52646eda23275a5d773054e81 /kernel
parent0542170dec523d50e8bed5515e2f7314e738c8d8 (diff)
parent505c0efd58031923ae01deac16d896607cafa70e (diff)
downloadblackbird-op-linux-d0797b39dcd70fe366b114515cb898ac6fecdd99.tar.gz
blackbird-op-linux-d0797b39dcd70fe366b114515cb898ac6fecdd99.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: tweak the sched_runtime_limit tunable sched: skip updating rq's next_balance under null SD sched: fix broken SMT/MC optimizations sched: accounting regression since rc1 sched: fix sysctl directory permissions sched: sched_clock_idle_[sleep|wakeup]_event()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c68
-rw-r--r--kernel/sched_debug.c3
2 files changed, 53 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 45e17b83b7f1..96e9b82246d2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -262,7 +262,8 @@ struct rq {
s64 clock_max_delta;
unsigned int clock_warps, clock_overflows;
- unsigned int clock_unstable_events;
+ u64 idle_clock;
+ unsigned int clock_deep_idle_events;
u64 tick_timestamp;
atomic_t nr_iowait;
@@ -556,18 +557,40 @@ static inline struct rq *this_rq_lock(void)
}
/*
- * CPU frequency is/was unstable - start new by setting prev_clock_raw:
+ * We are going deep-idle (irqs are disabled):
*/
-void sched_clock_unstable_event(void)
+void sched_clock_idle_sleep_event(void)
{
- unsigned long flags;
- struct rq *rq;
+ struct rq *rq = cpu_rq(smp_processor_id());
- rq = task_rq_lock(current, &flags);
- rq->prev_clock_raw = sched_clock();
- rq->clock_unstable_events++;
- task_rq_unlock(rq, &flags);
+ spin_lock(&rq->lock);
+ __update_rq_clock(rq);
+ spin_unlock(&rq->lock);
+ rq->clock_deep_idle_events++;
}
+EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
+
+/*
+ * We just idled delta nanoseconds (called with irqs disabled):
+ */
+void sched_clock_idle_wakeup_event(u64 delta_ns)
+{
+ struct rq *rq = cpu_rq(smp_processor_id());
+ u64 now = sched_clock();
+
+ rq->idle_clock += delta_ns;
+ /*
+ * Override the previous timestamp and ignore all
+ * sched_clock() deltas that occured while we idled,
+ * and use the PM-provided delta_ns to advance the
+ * rq clock:
+ */
+ spin_lock(&rq->lock);
+ rq->prev_clock_raw = now;
+ rq->clock += delta_ns;
+ spin_unlock(&rq->lock);
+}
+EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
/*
* resched_task - mark a task 'to be rescheduled now'.
@@ -2494,7 +2517,7 @@ group_next:
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task/2) {
+ if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn;
@@ -3020,6 +3043,7 @@ static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
struct sched_domain *sd;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
+ int update_next_balance = 0;
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
@@ -3056,8 +3080,10 @@ static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
if (sd->flags & SD_SERIALIZE)
spin_unlock(&balancing);
out:
- if (time_after(next_balance, sd->last_balance + interval))
+ if (time_after(next_balance, sd->last_balance + interval)) {
next_balance = sd->last_balance + interval;
+ update_next_balance = 1;
+ }
/*
* Stop the load balance at this level. There is another
@@ -3067,7 +3093,14 @@ out:
if (!balance)
break;
}
- rq->next_balance = next_balance;
+
+ /*
+ * next_balance will be updated only when there is a need.
+ * When the cpu is attached to null domain for ex, it will not be
+ * updated.
+ */
+ if (likely(update_next_balance))
+ rq->next_balance = next_balance;
}
/*
@@ -4890,7 +4923,7 @@ static inline void sched_init_granularity(void)
if (sysctl_sched_granularity > gran_limit)
sysctl_sched_granularity = gran_limit;
- sysctl_sched_runtime_limit = sysctl_sched_granularity * 4;
+ sysctl_sched_runtime_limit = sysctl_sched_granularity * 8;
sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2;
}
@@ -5234,15 +5267,16 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
static struct ctl_table sd_ctl_dir[] = {
{
.procname = "sched_domain",
- .mode = 0755,
+ .mode = 0555,
},
{0,},
};
static struct ctl_table sd_ctl_root[] = {
{
+ .ctl_name = CTL_KERN,
.procname = "kernel",
- .mode = 0755,
+ .mode = 0555,
.child = sd_ctl_dir,
},
{0,},
@@ -5318,7 +5352,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
for_each_domain(cpu, sd) {
snprintf(buf, 32, "domain%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
- entry->mode = 0755;
+ entry->mode = 0555;
entry->child = sd_alloc_ctl_domain_table(sd);
entry++;
i++;
@@ -5338,7 +5372,7 @@ static void init_sched_domain_sysctl(void)
for (i = 0; i < cpu_num; i++, entry++) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
- entry->mode = 0755;
+ entry->mode = 0555;
entry->child = sd_alloc_ctl_cpu_table(i);
}
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 87e524762b85..ab18f45f2ab2 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -154,10 +154,11 @@ static void print_cpu(struct seq_file *m, int cpu)
P(next_balance);
P(curr->pid);
P(clock);
+ P(idle_clock);
P(prev_clock_raw);
P(clock_warps);
P(clock_overflows);
- P(clock_unstable_events);
+ P(clock_deep_idle_events);
P(clock_max_delta);
P(cpu_load[0]);
P(cpu_load[1]);
OpenPOWER on IntegriCloud