summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 09:15:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 09:15:07 -0700
commit43c1266ce4dc06bfd236cec31e11e9ecd69c0bef (patch)
tree40a86739ca4c36200f447f655b01c57cfe646e26 /kernel/sched.c
parentb8c7f1dc5ca4e0d10709182233cdab932cef593d (diff)
parent57c0c15b5244320065374ad2c54f4fbec77a6428 (diff)
downloadblackbird-op-linux-43c1266ce4dc06bfd236cec31e11e9ecd69c0bef.tar.gz
blackbird-op-linux-43c1266ce4dc06bfd236cec31e11e9ecd69c0bef.zip
Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Tidy up after the big rename perf: Do the big rename: Performance Counters -> Performance Events perf_counter: Rename 'event' to event_id/hw_event perf_counter: Rename list_entry -> group_entry, counter_list -> group_list Manually resolved some fairly trivial conflicts with the tracing tree in include/trace/ftrace.h and kernel/trace/trace_syscalls.c.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 830967e18285..91843ba7f237 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
#endif
- perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1, 1, NULL, 0);
}
p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2718,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
- perf_counter_task_sched_in(current, cpu_of(rq));
+ perf_event_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
@@ -5193,7 +5193,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
spin_unlock(&rq->lock);
- perf_counter_task_tick(curr, cpu);
+ perf_event_task_tick(curr, cpu);
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
@@ -5409,7 +5409,7 @@ need_resched_nonpreemptible:
if (likely(prev != next)) {
sched_info_switch(prev, next);
- perf_counter_task_sched_out(prev, next, cpu);
+ perf_event_task_sched_out(prev, next, cpu);
rq->nr_switches++;
rq->curr = next;
@@ -7671,7 +7671,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
- * the notifier in the perf_counter subsystem, though.
+ * the notifier in the perf_event subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
@@ -9528,7 +9528,7 @@ void __init sched_init(void)
alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
- perf_counter_init();
+ perf_event_init();
scheduler_running = 1;
}
OpenPOWER on IntegriCloud