summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 19:45:00 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:45:00 +0200
commiteff766a65c60237bfa865160c3129de31fab591b (patch)
treec003be272be4bd614485606c77893295f3b175a2 /kernel
parentec7dc8ac73e4a56ed03b673f026f08c0d547f597 (diff)
downloadblackbird-op-linux-eff766a65c60237bfa865160c3129de31fab591b.tar.gz
blackbird-op-linux-eff766a65c60237bfa865160c3129de31fab591b.zip
sched: fix the task_group hierarchy for UID grouping
UID grouping doesn't actually have a task_group representing the root of the task_group tree. Add one. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/user.c2
2 files changed, 42 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f9c8da798bbf..e03b45ccf789 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -274,6 +274,14 @@ struct task_group {
};
#ifdef CONFIG_USER_SCHED
+
+/*
+ * Root task group.
+ * Every UID task group (including init_task_group aka UID-0) will
+ * be a child to this group.
+ */
+struct task_group root_task_group;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
@@ -285,6 +293,8 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
#endif
+#else
+#define root_task_group init_task_group
#endif
/* task_group_lock serializes add/remove of task groups and also changes to
@@ -7508,6 +7518,9 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
+#ifdef CONFIG_USER_SCHED
+ alloc_size *= 2;
+#endif
/*
* As sched_init() is called before page_alloc is setup,
* we use alloc_bootmem().
@@ -7521,12 +7534,29 @@ void __init sched_init(void)
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
+
+#ifdef CONFIG_USER_SCHED
+ root_task_group.se = (struct sched_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+#endif
#endif
#ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
init_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+#ifdef CONFIG_USER_SCHED
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+#endif
#endif
}
@@ -7540,6 +7570,10 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&init_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
+#ifdef CONFIG_USER_SCHED
+ init_rt_bandwidth(&root_task_group.rt_bandwidth,
+ global_rt_period(), RUNTIME_INF);
+#endif
#endif
#ifdef CONFIG_GROUP_SCHED
@@ -7582,6 +7616,8 @@ void __init sched_init(void)
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
#elif defined CONFIG_USER_SCHED
+ root_task_group.shares = NICE_0_LOAD;
+ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
/*
* In case of task-groups formed thr' the user id of tasks,
* init_task_group represents tasks belonging to root user.
@@ -7595,7 +7631,8 @@ void __init sched_init(void)
*/
init_tg_cfs_entry(&init_task_group,
&per_cpu(init_cfs_rq, i),
- &per_cpu(init_sched_entity, i), i, 1, NULL);
+ &per_cpu(init_sched_entity, i), i, 1,
+ root_task_group.se[i]);
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -7606,9 +7643,11 @@ void __init sched_init(void)
#ifdef CONFIG_CGROUP_SCHED
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
#elif defined CONFIG_USER_SCHED
+ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
init_tg_rt_entry(&init_task_group,
&per_cpu(init_rt_rq, i),
- &per_cpu(init_sched_rt_entity, i), i, 1, NULL);
+ &per_cpu(init_sched_rt_entity, i), i, 1,
+ root_task_group.rt_se[i]);
#endif
#endif
diff --git a/kernel/user.c b/kernel/user.c
index a28d9f992468..debce602bfdd 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up)
{
int rc = 0;
- up->tg = sched_create_group(NULL);
+ up->tg = sched_create_group(&root_task_group);
if (IS_ERR(up->tg))
rc = -ENOMEM;
OpenPOWER on IntegriCloud