summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2008-01-30 13:30:04 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:30:04 +0100
commit6378ddb592158db4b42197f1bc8666228800e379 (patch)
treef9b1e671dfd12fb221f6140dd231ccb14cd9f27e
parentbbe4d18ac2e058c56adb0cd71f49d9ed3216a405 (diff)
downloadblackbird-op-linux-6378ddb592158db4b42197f1bc8666228800e379.tar.gz
blackbird-op-linux-6378ddb592158db4b42197f1bc8666228800e379.zip
time: track accurate idle time with tick_sched.idle_sleeptime
Current idle time in kstat is based on jiffies and is coarse grained. tick_sched.idle_sleeptime is making some attempt to keep track of idle time in a fine grained manner. But, it is not handling the time spent in interrupts fully. Make tick_sched.idle_sleeptime accurate with respect to time spent on handling interrupts and also add tick_sched.idle_lastupdate, which keeps track of last time when idle_sleeptime was updated. This statistics will be crucial for cpufreq-ondemand governor, which can shed some conservative gaurd band that is uses today while setting the frequency. The ondemand changes that uses the exact idle time is coming soon. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/tick.h6
-rw-r--r--kernel/softirq.c7
-rw-r--r--kernel/time/tick-sched.c70
3 files changed, 60 insertions, 23 deletions
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f4a1395e05ff..0fadf95debe1 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -51,8 +51,10 @@ struct tick_sched {
unsigned long idle_jiffies;
unsigned long idle_calls;
unsigned long idle_sleeps;
+ int idle_active;
ktime_t idle_entrytime;
ktime_t idle_sleeptime;
+ ktime_t idle_lastupdate;
ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
@@ -103,6 +105,8 @@ extern void tick_nohz_stop_sched_tick(void);
extern void tick_nohz_restart_sched_tick(void);
extern void tick_nohz_update_jiffies(void);
extern ktime_t tick_nohz_get_sleep_length(void);
+extern void tick_nohz_stop_idle(int cpu);
+extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
# else
static inline void tick_nohz_stop_sched_tick(void) { }
static inline void tick_nohz_restart_sched_tick(void) { }
@@ -113,6 +117,8 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
return len;
}
+static inline void tick_nohz_stop_idle(int cpu) { }
+static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; }
# endif /* !NO_HZ */
#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8fe1ff40102d..d7837d45419e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -280,9 +280,14 @@ asmlinkage void do_softirq(void)
*/
void irq_enter(void)
{
+#ifdef CONFIG_NO_HZ
+ int cpu = smp_processor_id();
+ if (idle_cpu(cpu) && !in_interrupt())
+ tick_nohz_stop_idle(cpu);
+#endif
__irq_enter();
#ifdef CONFIG_NO_HZ
- if (idle_cpu(smp_processor_id()))
+ if (idle_cpu(cpu))
tick_nohz_update_jiffies();
#endif
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 49e12f6a4bab..63f24b550695 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -143,6 +143,44 @@ void tick_nohz_update_jiffies(void)
local_irq_restore(flags);
}
+void tick_nohz_stop_idle(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+ if (ts->idle_active) {
+ ktime_t now, delta;
+ now = ktime_get();
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_active = 0;
+ }
+}
+
+static ktime_t tick_nohz_start_idle(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t now, delta;
+
+ now = ktime_get();
+ if (ts->idle_active) {
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ }
+ ts->idle_entrytime = now;
+ ts->idle_active = 1;
+ return now;
+}
+
+u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+ *last_update_time = ktime_to_us(ts->idle_lastupdate);
+ return ktime_to_us(ts->idle_sleeptime);
+}
+
/**
* tick_nohz_stop_sched_tick - stop the idle tick from the idle task
*
@@ -155,13 +193,14 @@ void tick_nohz_stop_sched_tick(void)
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
unsigned long rt_jiffies;
struct tick_sched *ts;
- ktime_t last_update, expires, now, delta;
+ ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
int cpu;
local_irq_save(flags);
cpu = smp_processor_id();
+ now = tick_nohz_start_idle(cpu);
ts = &per_cpu(tick_cpu_sched, cpu);
/*
@@ -193,19 +232,7 @@ void tick_nohz_stop_sched_tick(void)
}
}
- now = ktime_get();
- /*
- * When called from irq_exit we need to account the idle sleep time
- * correctly.
- */
- if (ts->tick_stopped) {
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
- }
-
- ts->idle_entrytime = now;
ts->idle_calls++;
-
/* Read jiffies and the time when jiffies were updated last */
do {
seq = read_seqbegin(&xtime_lock);
@@ -337,23 +364,22 @@ void tick_nohz_restart_sched_tick(void)
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long ticks;
- ktime_t now, delta;
+ ktime_t now;
- if (!ts->tick_stopped)
+ local_irq_disable();
+ tick_nohz_stop_idle(cpu);
+
+ if (!ts->tick_stopped) {
+ local_irq_enable();
return;
+ }
/* Update jiffies first */
- now = ktime_get();
-
- local_irq_disable();
select_nohz_load_balancer(0);
+ now = ktime_get();
tick_do_update_jiffies64(now);
cpu_clear(cpu, nohz_cpu_mask);
- /* Account the idle time */
- delta = ktime_sub(now, ts->idle_entrytime);
- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
-
/*
* We stopped the tick in idle. Update process times would miss the
* time we slept as update_process_times does only a 1 tick
OpenPOWER on IntegriCloud