summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-09-28 14:40:49 -0400
committerDavid S. Miller <davem@davemloft.net>2012-09-28 14:40:49 -0400
commit6a06e5e1bb217be077e1f8ee2745b4c5b1aa02db (patch)
tree8faea23112a11f52524eb413f71b7b02712d8b53 /kernel
parentd9f72f359e00a45a6cd7cc2d5121b04b9dc927e1 (diff)
parent6672d90fe779dc0dfffe027c3ede12609df091c2 (diff)
downloadblackbird-op-linux-6a06e5e1bb217be077e1f8ee2745b4c5b1aa02db.tar.gz
blackbird-op-linux-6a06e5e1bb217be077e1f8ee2745b4c5b1aa02db.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/team/team.c drivers/net/usb/qmi_wwan.c net/batman-adv/bat_iv_ogm.c net/ipv4/fib_frontend.c net/ipv4/route.c net/l2tp/l2tp_netlink.c The team, fib_frontend, route, and l2tp_netlink conflicts were simply overlapping changes. qmi_wwan and bat_iv_ogm were of the "use HEAD" variety. With help from Antonio Quartulli. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pid_namespace.c6
-rw-r--r--kernel/sched/core.c39
-rw-r--r--kernel/sched/fair.c28
-rw-r--r--kernel/time/timekeeping.c19
-rw-r--r--kernel/workqueue.c37
5 files changed, 57 insertions, 72 deletions
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index baa528d7dfbd..478bad2745e3 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -234,15 +234,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
*/
tmp.data = &current->nsproxy->pid_ns->last_pid;
- return proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
}
+extern int pid_max;
+static int zero = 0;
static struct ctl_table pid_ns_ctl_table[] = {
{
.procname = "ns_last_pid",
.maxlen = sizeof(int),
.mode = 0666, /* permissions are checked in the handler */
.proc_handler = pid_ns_ctl_handler,
+ .extra1 = &zero,
+ .extra2 = &pid_max,
},
{ }
};
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a4ea245f3d85..649c9f876cb1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6014,11 +6014,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
* allows us to avoid some pointer chasing select_idle_sibling().
*
- * Iterate domains and sched_groups downward, assigning CPUs to be
- * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
- * due to random perturbation self canceling, ie sw buddies pull
- * their counterpart to their CPU's hw counterpart.
- *
* Also keep a unique ID per domain (we use the first cpu number in
* the cpumask of the domain), this allows us to quickly tell if
* two cpus are in the same cache domain, see cpus_share_cache().
@@ -6032,40 +6027,8 @@ static void update_top_cache_domain(int cpu)
int id = cpu;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
- if (sd) {
- struct sched_domain *tmp = sd;
- struct sched_group *sg, *prev;
- bool right;
-
- /*
- * Traverse to first CPU in group, and count hops
- * to cpu from there, switching direction on each
- * hop, never ever pointing the last CPU rightward.
- */
- do {
- id = cpumask_first(sched_domain_span(tmp));
- prev = sg = tmp->groups;
- right = 1;
-
- while (cpumask_first(sched_group_cpus(sg)) != id)
- sg = sg->next;
-
- while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
- prev = sg;
- sg = sg->next;
- right = !right;
- }
-
- /* A CPU went down, never point back to domain start. */
- if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
- right = false;
-
- sg = right ? sg->next : prev;
- tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
- } while ((tmp = tmp->child));
-
+ if (sd)
id = cpumask_first(sched_domain_span(sd));
- }
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_id, cpu) = id;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 42d9df6a5ca4..96e2b18b6283 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
+ struct sched_group *sg;
+ int i;
/*
* If the task is going to be woken-up on this cpu and if it is
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target)
return prev_cpu;
/*
- * Otherwise, check assigned siblings to find an elegible idle cpu.
+ * Otherwise, iterate the domains and find an elegible idle cpu.
*/
sd = rcu_dereference(per_cpu(sd_llc, target));
-
for_each_lower_domain(sd) {
- if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p)))
- continue;
- if (idle_cpu(sd->idle_buddy))
- return sd->idle_buddy;
- }
+ sg = sd->groups;
+ do {
+ if (!cpumask_intersects(sched_group_cpus(sg),
+ tsk_cpus_allowed(p)))
+ goto next;
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ if (!idle_cpu(i))
+ goto next;
+ }
+
+ target = cpumask_first_and(sched_group_cpus(sg),
+ tsk_cpus_allowed(p));
+ goto done;
+next:
+ sg = sg->next;
+ } while (sg != sd->groups);
+ }
+done:
return target;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 34e5eac81424..d3b91e75cecd 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -303,10 +303,11 @@ void getnstimeofday(struct timespec *ts)
seq = read_seqbegin(&tk->lock);
ts->tv_sec = tk->xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(tk);
+ nsecs = timekeeping_get_ns(tk);
} while (read_seqretry(&tk->lock, seq));
+ ts->tv_nsec = 0;
timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getnstimeofday);
@@ -345,6 +346,7 @@ void ktime_get_ts(struct timespec *ts)
{
struct timekeeper *tk = &timekeeper;
struct timespec tomono;
+ s64 nsec;
unsigned int seq;
WARN_ON(timekeeping_suspended);
@@ -352,13 +354,14 @@ void ktime_get_ts(struct timespec *ts)
do {
seq = read_seqbegin(&tk->lock);
ts->tv_sec = tk->xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(tk);
+ nsec = timekeeping_get_ns(tk);
tomono = tk->wall_to_monotonic;
} while (read_seqretry(&tk->lock, seq));
- set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
- ts->tv_nsec + tomono.tv_nsec);
+ ts->tv_sec += tomono.tv_sec;
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsec + tomono.tv_nsec);
}
EXPORT_SYMBOL_GPL(ktime_get_ts);
@@ -1244,6 +1247,7 @@ void get_monotonic_boottime(struct timespec *ts)
{
struct timekeeper *tk = &timekeeper;
struct timespec tomono, sleep;
+ s64 nsec;
unsigned int seq;
WARN_ON(timekeeping_suspended);
@@ -1251,14 +1255,15 @@ void get_monotonic_boottime(struct timespec *ts)
do {
seq = read_seqbegin(&tk->lock);
ts->tv_sec = tk->xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(tk);
+ nsec = timekeeping_get_ns(tk);
tomono = tk->wall_to_monotonic;
sleep = tk->total_sleep_time;
} while (read_seqretry(&tk->lock, seq));
- set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
- ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
+ ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
}
EXPORT_SYMBOL_GPL(get_monotonic_boottime);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1e1373bcb3e3..3c5a79e2134c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1349,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work)
struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->pool->gcwq;
- if (worker_maybe_bind_and_lock(worker))
- worker_clr_flags(worker, WORKER_REBIND);
+ worker_maybe_bind_and_lock(worker);
+
+ /*
+ * %WORKER_REBIND must be cleared even if the above binding failed;
+ * otherwise, we may confuse the next CPU_UP cycle or oops / get
+ * stuck by calling idle_worker_rebind() prematurely. If CPU went
+ * down again inbetween, %WORKER_UNBOUND would be set, so clearing
+ * %WORKER_REBIND is always safe.
+ */
+ worker_clr_flags(worker, WORKER_REBIND);
spin_unlock_irq(&gcwq->lock);
}
@@ -3568,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
#ifdef CONFIG_SMP
struct work_for_cpu {
- struct completion completion;
+ struct work_struct work;
long (*fn)(void *);
void *arg;
long ret;
};
-static int do_work_for_cpu(void *_wfc)
+static void work_for_cpu_fn(struct work_struct *work)
{
- struct work_for_cpu *wfc = _wfc;
+ struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
+
wfc->ret = wfc->fn(wfc->arg);
- complete(&wfc->completion);
- return 0;
}
/**
@@ -3594,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
- struct task_struct *sub_thread;
- struct work_for_cpu wfc = {
- .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
- .fn = fn,
- .arg = arg,
- };
+ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
- sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
- if (IS_ERR(sub_thread))
- return PTR_ERR(sub_thread);
- kthread_bind(sub_thread, cpu);
- wake_up_process(sub_thread);
- wait_for_completion(&wfc.completion);
+ INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
+ schedule_work_on(cpu, &wfc.work);
+ flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
OpenPOWER on IntegriCloud