diff options
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | kernel/sched.c | 17 |
2 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 94f624aef017..33b9b4841ee7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); + +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +extern unsigned long long cpu_clock(int cpu); + extern unsigned long long task_sched_runtime(struct task_struct *task); diff --git a/kernel/sched.c b/kernel/sched.c index a35a92ff38fd..93cf241cfbe9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +unsigned long long cpu_clock(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long long now; + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + now = rq_clock(rq); + spin_unlock_irqrestore(&rq->lock, flags); + + return now; +} + #ifdef CONFIG_FAIR_GROUP_SCHED /* Change a task's ->cfs_rq if it moves across CPUs */ static inline void set_task_cfs_rq(struct task_struct *p) |