summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-07-25 07:56:04 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2013-01-27 19:23:27 +0100
commitabf917cd91cbb73952758f9741e2fa65002a48ee (patch)
tree5f975b87615dcaed9c98bc74b4548d568b92dcbc /kernel
parentae8dda5c473bf1a85913942adcaac449e5754bf3 (diff)
downloadblackbird-op-linux-abf917cd91cbb73952758f9741e2fa65002a48ee.tar.gz
blackbird-op-linux-abf917cd91cbb73952758f9741e2fa65002a48ee.zip
cputime: Generic on-demand virtual cputime accounting
If we want to stop the tick further idle, we need to be able to account the cputime without using the tick. Virtual based cputime accounting solves that problem by hooking into kernel/user boundaries. However implementing CONFIG_VIRT_CPU_ACCOUNTING require low level hooks and involves more overhead. But we already have a generic context tracking subsystem that is required for RCU needs by archs which plan to shut down the tick outside idle. This patch implements a generic virtual based cputime accounting that relies on these generic kernel/user hooks. There are some upsides of doing this: - This requires no arch code to implement CONFIG_VIRT_CPU_ACCOUNTING if context tracking is already built (already necessary for RCU in full tickless mode). - We can rely on the generic context tracking subsystem to dynamically (de)activate the hooks, so that we can switch anytime between virtual and tick based accounting. This way we don't have the overhead of the virtual accounting when the tick is running periodically. And one downside: - There is probably more overhead than a native virtual based cputime accounting. But this relies on hooks that are already set anyway. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context_tracking.c6
-rw-r--r--kernel/sched/cputime.c61
2 files changed, 61 insertions, 6 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 54f471e536dc..9002e92e6372 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -30,8 +30,9 @@ void user_enter(void)
local_irq_save(flags);
if (__this_cpu_read(context_tracking.active) &&
__this_cpu_read(context_tracking.state) != IN_USER) {
- __this_cpu_write(context_tracking.state, IN_USER);
+ vtime_user_enter(current);
rcu_user_enter();
+ __this_cpu_write(context_tracking.state, IN_USER);
}
local_irq_restore(flags);
}
@@ -53,8 +54,9 @@ void user_exit(void)
local_irq_save(flags);
if (__this_cpu_read(context_tracking.state) == IN_USER) {
- __this_cpu_write(context_tracking.state, IN_KERNEL);
rcu_user_exit();
+ vtime_user_exit(current);
+ __this_cpu_write(context_tracking.state, IN_KERNEL);
}
local_irq_restore(flags);
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5849448b981e..1c964eced92c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -3,6 +3,7 @@
#include <linux/tsacct_kern.h>
#include <linux/kernel_stat.h>
#include <linux/static_key.h>
+#include <linux/context_tracking.h>
#include "sched.h"
@@ -479,7 +480,9 @@ void vtime_task_switch(struct task_struct *prev)
else
vtime_account_system(prev);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
vtime_account_user(prev);
+#endif
arch_vtime_task_switch(prev);
}
#endif
@@ -495,10 +498,24 @@ void vtime_task_switch(struct task_struct *prev)
#ifndef __ARCH_HAS_VTIME_ACCOUNT
void vtime_account(struct task_struct *tsk)
{
- if (in_interrupt() || !is_idle_task(tsk))
- vtime_account_system(tsk);
- else
- vtime_account_idle(tsk);
+ if (!in_interrupt()) {
+ /*
+ * If we interrupted user, context_tracking_in_user()
+ * is 1 because the context tracking don't hook
+ * on irq entry/exit. This way we know if
+ * we need to flush user time on kernel entry.
+ */
+ if (context_tracking_in_user()) {
+ vtime_account_user(tsk);
+ return;
+ }
+
+ if (is_idle_task(tsk)) {
+ vtime_account_idle(tsk);
+ return;
+ }
+ }
+ vtime_account_system(tsk);
}
EXPORT_SYMBOL_GPL(vtime_account);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
@@ -583,3 +600,39 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
#endif
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+static DEFINE_PER_CPU(unsigned long long, cputime_snap);
+
+static cputime_t get_vtime_delta(void)
+{
+ unsigned long long delta;
+
+ delta = sched_clock() - __this_cpu_read(cputime_snap);
+ __this_cpu_add(cputime_snap, delta);
+
+ /* CHECKME: always safe to convert nsecs to cputime? */
+ return nsecs_to_cputime(delta);
+}
+
+void vtime_account_system(struct task_struct *tsk)
+{
+ cputime_t delta_cpu = get_vtime_delta();
+
+ account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
+}
+
+void vtime_account_user(struct task_struct *tsk)
+{
+ cputime_t delta_cpu = get_vtime_delta();
+
+ account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
+}
+
+void vtime_account_idle(struct task_struct *tsk)
+{
+ cputime_t delta_cpu = get_vtime_delta();
+
+ account_idle_time(delta_cpu);
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
OpenPOWER on IntegriCloud