From d15bcfdbe1818478891d714343f037cfe60875f0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:57 +0200 Subject: sched: rename idle_type/SCHED_IDLE enum idle_type (used by the load-balancer) clashes with the SCHED_IDLE name that we want to introduce. 'CPU_IDLE' instead of 'SCHED_IDLE' is more descriptive as well. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 693f0e6c54d4..2acfb23f3681 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -639,12 +639,11 @@ static inline int sched_info_on(void) #endif } -enum idle_type -{ - SCHED_IDLE, - NOT_IDLE, - NEWLY_IDLE, - MAX_IDLE_TYPES +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES }; /* @@ -719,14 +718,14 @@ struct sched_domain { #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ - unsigned long lb_cnt[MAX_IDLE_TYPES]; - unsigned long lb_failed[MAX_IDLE_TYPES]; - unsigned long lb_balanced[MAX_IDLE_TYPES]; - unsigned long lb_imbalance[MAX_IDLE_TYPES]; - unsigned long lb_gained[MAX_IDLE_TYPES]; - unsigned long lb_hot_gained[MAX_IDLE_TYPES]; - unsigned long lb_nobusyg[MAX_IDLE_TYPES]; - unsigned long lb_nobusyq[MAX_IDLE_TYPES]; + unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; + unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; /* Active load balancing */ unsigned long alb_cnt; -- cgit v1.2.1 From 0e6aca43e08a62a48d6770e9a159dbec167bf4c6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:57 +0200 Subject: sched: add SCHED_IDLE policy this patch adds the SCHED_IDLE policy to sched.h. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 2acfb23f3681..7e74262f98e1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -34,6 +34,8 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 +/* SCHED_ISO: reserved but not implemented yet */ +#define SCHED_IDLE 5 #ifdef __KERNEL__ -- cgit v1.2.1 From 0437e109e1841607f2988891eaa36c531c6aa6ac Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:57 +0200 Subject: sched: zap the migration init / cache-hot balancing code the SMP load-balancer uses the boot-time migration-cost estimation code to attempt to improve the quality of balancing. The reason for this code is that the discrete priority queues do not preserve the order of scheduling accurately, so the load-balancer skips tasks that were running on a CPU 'recently'. this code is fundamental fragile: the boot-time migration cost detector doesnt really work on systems that had large L3 caches, it caused boot delays on large systems and the whole cache-hot concept made the balancing code pretty undeterministic as well. (and hey, i wrote most of it, so i can say it out loud that it sucks ;-) under CFS the same purpose of cache affinity can be achieved without any special cache-hot special-case: tasks are sorted in the 'timeline' tree and the SMP balancer picks tasks from the left side of the tree, thus the most cache-cold task is balanced automatically. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 7e74262f98e1..8764cda0feca 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -754,12 +754,6 @@ struct sched_domain { extern int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2); -/* - * Maximum cache size the migration-costs auto-tuning code will - * search from: - */ -extern unsigned int max_cache_size; - #endif /* CONFIG_SMP */ -- cgit v1.2.1 From c65cc8705256ad7524c97564b4fe3ca9782bf6d1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:58 +0200 Subject: sched: uninline set_task_cpu() uninline set_task_cpu(): CFS will add more code to it. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 8764cda0feca..4b912e753ca0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1633,10 +1633,7 @@ static inline unsigned int task_cpu(const struct task_struct *p) return task_thread_info(p)->cpu; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) -{ - task_thread_info(p)->cpu = cpu; -} +extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else -- cgit v1.2.1 From 1df21055e34b6a68d62cf0c524b9e52deebd7ead Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:58 +0200 Subject: sched: add init_idle_bootup_task() add the init_idle_bootup_task() callback to the bootup thread, unused at the moment. (CFS will use it to switch the scheduling class of the boot thread to the idle class) Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 4b912e753ca0..61a111fe2b7a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -195,6 +195,7 @@ struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); extern cpumask_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) -- cgit v1.2.1 From 9aa7b369819940cb1f3c74ba210516739a32ad95 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:58 +0200 Subject: sched: increase the resolution of smpnice increase SMP-nice's resolution. This is needed by CFS to implement SCHED_IDLE and cleaned up nice level support. no behavioral changes. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 61a111fe2b7a..d5084e7c48cf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -652,7 +652,14 @@ enum cpu_idle_type { /* * sched-domains (multiprocessor balancing) declarations: */ -#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ + +/* + * Increase resolution of nice-level calculations: + */ +#define SCHED_LOAD_SHIFT 10 +#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) + +#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5) #ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ -- cgit v1.2.1 From bf0f6f24a1ece8988b243aefe84ee613099a9245 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:58 +0200 Subject: sched: cfs core, kernel/sched_fair.c add kernel/sched_fair.c - which implements the bulk of CFS's behavioral changes for SCHED_OTHER tasks. see Documentation/sched-design-CFS.txt about details. Authors: Ingo Molnar Dmitry Adamushko Srivatsa Vaddagiri Mike Galbraith Signed-off-by: Ingo Molnar Signed-off-by: Mike Galbraith Signed-off-by: Dmitry Adamushko Signed-off-by: Srivatsa Vaddagiri --- include/linux/sched.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index d5084e7c48cf..90420321994f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1243,6 +1243,14 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); +extern unsigned int sysctl_sched_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_batch_wakeup_granularity; +extern unsigned int sysctl_sched_stat_granularity; +extern unsigned int sysctl_sched_runtime_limit; +extern unsigned int sysctl_sched_child_runs_first; +extern unsigned int sysctl_sched_features; + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); -- cgit v1.2.1 From 20b8a59f2461e1be911dce2cfafefab9d22e4eee Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:58 +0200 Subject: sched: cfs, core data types add the CFS data types to sched.h. (the old scheduler is still fully intact.) Signed-off-by: Ingo Molnar --- include/linux/sched.h | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 90420321994f..995eb407c234 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -820,6 +820,86 @@ enum sleep_type { }; struct prio_array; +struct rq; +struct sched_domain; + +struct sched_class { + struct sched_class *next; + + void (*enqueue_task) (struct rq *rq, struct task_struct *p, + int wakeup, u64 now); + void (*dequeue_task) (struct rq *rq, struct task_struct *p, + int sleep, u64 now); + void (*yield_task) (struct rq *rq, struct task_struct *p); + + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); + + struct task_struct * (*pick_next_task) (struct rq *rq, u64 now); + void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now); + + int (*load_balance) (struct rq *this_rq, int this_cpu, + struct rq *busiest, + unsigned long max_nr_move, unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, unsigned long *total_load_moved); + + void (*set_curr_task) (struct rq *rq); + void (*task_tick) (struct rq *rq, struct task_struct *p); + void (*task_new) (struct rq *rq, struct task_struct *p); +}; + +struct load_weight { + unsigned long weight, inv_weight; +}; + +/* + * CFS stats for a schedulable entity (task, task-group etc) + * + * Current field usage histogram: + * + * 4 se->block_start + * 4 se->run_node + * 4 se->sleep_start + * 4 se->sleep_start_fair + * 6 se->load.weight + * 7 se->delta_fair + * 15 se->wait_runtime + */ +struct sched_entity { + long wait_runtime; + unsigned long delta_fair_run; + unsigned long delta_fair_sleep; + unsigned long delta_exec; + s64 fair_key; + struct load_weight load; /* for load-balancing */ + struct rb_node run_node; + unsigned int on_rq; + + u64 wait_start_fair; + u64 wait_start; + u64 exec_start; + u64 sleep_start; + u64 sleep_start_fair; + u64 block_start; + u64 sleep_max; + u64 block_max; + u64 exec_max; + u64 wait_max; + u64 last_ran; + + u64 sum_exec_runtime; + s64 sum_wait_runtime; + s64 sum_sleep_runtime; + unsigned long wait_runtime_overruns; + unsigned long wait_runtime_underruns; +#ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; +#endif +}; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -839,6 +919,8 @@ struct task_struct { int prio, static_prio, normal_prio; struct list_head run_list; struct prio_array *array; + struct sched_class *sched_class; + struct sched_entity se; unsigned short ioprio; #ifdef CONFIG_BLK_DEV_IO_TRACE -- cgit v1.2.1 From 41b86e9c510ae66639bf29d3201e1d2384a7fde6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:58 +0200 Subject: sched: make posix-cpu-timers use CFS's accounting information update the posix-cpu-timers code to use CFS's CPU accounting information. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 995eb407c234..3e7f1890e55d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -482,7 +482,8 @@ struct signal_struct { * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */ - unsigned long long sched_time; + unsigned long sched_time; + unsigned long long sum_sched_runtime; /* * We don't bother to synchronize most readers of this at all, @@ -1308,7 +1309,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); extern unsigned long long -current_sched_time(const struct task_struct *current_task); +task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP -- cgit v1.2.1 From e05606d3301525aa67b081ad9fccade2b31ab35a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:59 +0200 Subject: sched: clean up the rt priority macros clean up the rt priority macros, pointed out by Andrew Morton. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 61 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 25 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 3e7f1890e55d..4dcc61cca00a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -525,31 +525,6 @@ struct signal_struct { #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ - -/* - * Priority of a process goes from 0..MAX_PRIO-1, valid RT - * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH - * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority - * values are inverted: lower p->prio value means higher priority. - * - * The MAX_USER_RT_PRIO value allows the actual maximum - * RT priority to be separate from the value exported to - * user-space. This allows kernel threads to set their - * priority to a value higher than any user task. Note: - * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. - */ - -#define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO - -#define MAX_PRIO (MAX_RT_PRIO + 40) - -#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) -#define rt_task(p) rt_prio((p)->prio) -#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) -#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH) -#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) - /* * Some day this will be a full-fledged user tracking system.. */ @@ -1164,6 +1139,42 @@ struct task_struct { #endif }; +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority + * values are inverted: lower p->prio value means higher priority. + * + * The MAX_USER_RT_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) + +static inline int rt_prio(int prio) +{ + if (unlikely(prio < MAX_RT_PRIO)) + return 1; + return 0; +} + +static inline int rt_task(struct task_struct *p) +{ + return rt_prio(p->prio); +} + +static inline int batch_task(struct task_struct *p) +{ + return p->policy == SCHED_BATCH; +} + static inline pid_t process_group(struct task_struct *tsk) { return tsk->signal->pgrp; -- cgit v1.2.1 From f2ac58ee617fd9f6cd9922fbcd291b661d7c9954 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:59 +0200 Subject: sched: remove sleep_type remove the sleep_type heuristics from the core scheduler - scheduling policy is implemented in the scheduling-policy modules. (and CFS does not use this type of sleep-type heuristics) Signed-off-by: Ingo Molnar --- include/linux/sched.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dcc61cca00a..be2460e6f55b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -788,13 +788,6 @@ struct mempolicy; struct pipe_inode_info; struct uts_namespace; -enum sleep_type { - SLEEP_NORMAL, - SLEEP_NONINTERACTIVE, - SLEEP_INTERACTIVE, - SLEEP_INTERRUPTED, -}; - struct prio_array; struct rq; struct sched_domain; @@ -905,7 +898,6 @@ struct task_struct { unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ - enum sleep_type sleep_type; unsigned int policy; cpumask_t cpus_allowed; -- cgit v1.2.1 From bb29ab26863c022743143f27956cc0ca362f258c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:51:59 +0200 Subject: sched: x86, track TSC-unstable events track TSC-unstable events and propagate it to the scheduler code. Also allow sched_clock() to be used when the TSC is unstable, the rq_clock() wrapper creates a reliable clock out of it. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index be2460e6f55b..fa895b309da0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1321,6 +1321,8 @@ extern void sched_exec(void); #define sched_exec() {} #endif +extern void sched_clock_unstable_event(void); + #ifdef CONFIG_HOTPLUG_CPU extern void idle_task_exit(void); #else -- cgit v1.2.1 From 172ba844a8851c3edd13c0a979cdf46bd5e3cc1a Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Mon, 9 Jul 2007 18:52:00 +0200 Subject: sched: update delay-accounting to use CFS's precise stats update delay-accounting to use CFS's precise stats. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index fa895b309da0..e64dbd4cd829 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -562,13 +562,13 @@ struct reclaim_state; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info { /* cumulative counters */ - unsigned long cpu_time, /* time spent on the cpu */ - run_delay, /* time spent waiting on a runqueue */ - pcnt; /* # of timeslices run on this cpu */ + unsigned long pcnt; /* # of times run on this cpu */ + unsigned long long cpu_time, /* time spent on the cpu */ + run_delay; /* time spent waiting on a runqueue */ /* timestamps */ - unsigned long last_arrival, /* when we last ran on a cpu */ - last_queued; /* when we were last queued to run */ + unsigned long long last_arrival,/* when we last ran on a cpu */ + last_queued; /* when we were last queued to run */ }; #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ -- cgit v1.2.1 From ad46c2c4ebcead75cd364a79b63b134393094fb9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:52:00 +0200 Subject: sched: clean up fastcall uses of sched_fork()/sched_exit() sched_fork()/sched_exit() does not need to specify fastcall anymore, as the x86 kernel defaults to regparm3, and no assembly code calls these functions. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index e64dbd4cd829..ce0c5adc9eb0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1416,8 +1416,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); -extern void FASTCALL(sched_exit(struct task_struct * p)); +extern void sched_fork(struct task_struct *p, int clone_flags); +extern void sched_dead(struct task_struct *p); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); -- cgit v1.2.1 From 50e645a8a1a91f57dd5d8454620be5f1cb0fc089 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:52:00 +0200 Subject: sched: remove interactivity types from sched.h remove now-unused types/fields used by the old scheduler. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index ce0c5adc9eb0..efa3beb007ff 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -788,7 +788,6 @@ struct mempolicy; struct pipe_inode_info; struct uts_namespace; -struct prio_array; struct rq; struct sched_domain; @@ -884,10 +883,9 @@ struct task_struct { int oncpu; #endif #endif - int load_weight; /* for niceness load balancing purposes */ + int prio, static_prio, normal_prio; struct list_head run_list; - struct prio_array *array; struct sched_class *sched_class; struct sched_entity se; @@ -895,13 +893,10 @@ struct task_struct { #ifdef CONFIG_BLK_DEV_IO_TRACE unsigned int btrace_seq; #endif - unsigned long sleep_avg; - unsigned long long timestamp, last_ran; - unsigned long long sched_time; /* sched_clock time spent running */ unsigned int policy; cpumask_t cpus_allowed; - unsigned int time_slice, first_time_slice; + unsigned int time_slice; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; -- cgit v1.2.1 From 0c57d5893e4a9857ff22ec9e379f6bdbdad50850 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:52:00 +0200 Subject: sched: remove batch_task() batch_task() in sched.h is now unused - remove it. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index efa3beb007ff..aa582be8cafa 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1157,11 +1157,6 @@ static inline int rt_task(struct task_struct *p) return rt_prio(p->prio); } -static inline int batch_task(struct task_struct *p) -{ - return p->policy == SCHED_BATCH; -} - static inline pid_t process_group(struct task_struct *tsk) { return tsk->signal->pgrp; -- cgit v1.2.1 From 7dd593608df3f9d4e4531cfe29f28c3a3766a0ee Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:52:00 +0200 Subject: sched: remove old cpu accounting field remove the old cpu-accounting field from signal_struct, now that the code is using CFS's stats. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index aa582be8cafa..c9d65738bb7a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -482,7 +482,6 @@ struct signal_struct { * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */ - unsigned long sched_time; unsigned long long sum_sched_runtime; /* -- cgit v1.2.1 From 43ae34cb4cd650d1eb4460a8253a8e747ba052ac Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:52:00 +0200 Subject: sched: scheduler debugging, core scheduler debugging core: implement /proc/sched_debug and /proc//sched files for scheduler debugging. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index c9d65738bb7a..785ec8465bd3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -132,6 +132,26 @@ extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); extern unsigned long weighted_cpuload(const int cpu); +struct seq_file; +struct cfs_rq; +#ifdef CONFIG_SCHED_DEBUG +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +extern void +print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now); +#else +static inline void +proc_sched_show_task(struct task_struct *p, struct seq_file *m) +{ +} +static inline void proc_sched_set_task(struct task_struct *p) +{ +} +static inline void +print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now) +{ +} +#endif /* * Task state bitmask. NOTE! These bits are also -- cgit v1.2.1 From 6fb43d7b50e49a36f8be3199141bec473e5ecb00 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 9 Jul 2007 18:52:01 +0200 Subject: sched: micro-optimize mmdrop() micro-optimize mmdrop(). Improves schedule()'s assembly a bit. Signed-off-by: Ingo Molnar --- include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/sched.h') diff --git a/include/linux/sched.h b/include/linux/sched.h index 785ec8465bd3..cfb680585ab8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1514,7 +1514,7 @@ extern struct mm_struct * mm_alloc(void); extern void FASTCALL(__mmdrop(struct mm_struct *)); static inline void mmdrop(struct mm_struct * mm) { - if (atomic_dec_and_test(&mm->mm_count)) + if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } -- cgit v1.2.1