diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-11-29 03:55:47 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-11-29 03:55:47 -0500 |
commit | b71d4da092801634d04190693a38ca03bdbe2505 (patch) | |
tree | e4adc88ade5832b844768c114b15d4d97253c4d0 /kernel | |
parent | 656563e32c3f1dfdc35b3944305ece1c5dfeade5 (diff) | |
parent | 624f54be206adf970cd8eece16446b027913e533 (diff) | |
download | talos-op-linux-b71d4da092801634d04190693a38ca03bdbe2505.tar.gz talos-op-linux-b71d4da092801634d04190693a38ca03bdbe2505.zip |
Merge branch 'master'
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 83 | ||||
-rw-r--r-- | kernel/fork.c | 7 | ||||
-rw-r--r-- | kernel/futex.c | 15 | ||||
-rw-r--r-- | kernel/irq/manage.c | 15 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 2 | ||||
-rw-r--r-- | kernel/printk.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 12 |
7 files changed, 75 insertions, 61 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index d61ba88f34e5..e882c6babf41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -16,47 +16,76 @@ #include <asm/semaphore.h> /* This protects CPUs going up and down... */ -DECLARE_MUTEX(cpucontrol); -EXPORT_SYMBOL_GPL(cpucontrol); +static DECLARE_MUTEX(cpucontrol); static struct notifier_block *cpu_chain; -/* - * Used to check by callers if they need to acquire the cpucontrol - * or not to protect a cpu from being removed. Its sometimes required to - * call these functions both for normal operations, and in response to - * a cpu being added/removed. If the context of the call is in the same - * thread context as a CPU hotplug thread, we dont need to take the lock - * since its already protected - * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj - */ +#ifdef CONFIG_HOTPLUG_CPU +static struct task_struct *lock_cpu_hotplug_owner; +static int lock_cpu_hotplug_depth; -int current_in_cpu_hotplug(void) +static int __lock_cpu_hotplug(int interruptible) { - return (current->flags & PF_HOTPLUG_CPU); + int ret = 0; + + if (lock_cpu_hotplug_owner != current) { + if (interruptible) + ret = down_interruptible(&cpucontrol); + else + down(&cpucontrol); + } + + /* + * Set only if we succeed in locking + */ + if (!ret) { + lock_cpu_hotplug_depth++; + lock_cpu_hotplug_owner = current; + } + + return ret; } -EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); +void lock_cpu_hotplug(void) +{ + __lock_cpu_hotplug(0); +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug); +void unlock_cpu_hotplug(void) +{ + if (--lock_cpu_hotplug_depth == 0) { + lock_cpu_hotplug_owner = NULL; + up(&cpucontrol); + } +} +EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); + +int lock_cpu_hotplug_interruptible(void) +{ + return __lock_cpu_hotplug(1); +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); +#endif /* CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) { int ret; - if ((ret = down_interruptible(&cpucontrol)) != 0) + if ((ret = lock_cpu_hotplug_interruptible()) != 0) return ret; ret = notifier_chain_register(&cpu_chain, nb); - up(&cpucontrol); + unlock_cpu_hotplug(); return ret; } EXPORT_SYMBOL(register_cpu_notifier); void unregister_cpu_notifier(struct notifier_block *nb) { - down(&cpucontrol); + lock_cpu_hotplug(); notifier_chain_unregister(&cpu_chain, nb); - up(&cpucontrol); + unlock_cpu_hotplug(); } EXPORT_SYMBOL(unregister_cpu_notifier); @@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) goto out; } - /* - * Leave a trace in current->flags indicating we are already in - * process of performing CPU hotplug. Callers can check if cpucontrol - * is already acquired by current thread, and if so not cause - * a dead lock by not acquiring the lock - */ - current->flags |= PF_HOTPLUG_CPU; err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { @@ -171,7 +193,6 @@ out_thread: out_allowed: set_cpus_allowed(current, old_allowed); out: - current->flags &= ~PF_HOTPLUG_CPU; unlock_cpu_hotplug(); return err; } @@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) int ret; void *hcpu = (void *)(long)cpu; - if ((ret = down_interruptible(&cpucontrol)) != 0) + if ((ret = lock_cpu_hotplug_interruptible()) != 0) return ret; if (cpu_online(cpu) || !cpu_present(cpu)) { @@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) goto out; } - /* - * Leave a trace in current->flags indicating we are already in - * process of performing CPU hotplug. - */ - current->flags |= PF_HOTPLUG_CPU; ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", @@ -217,7 +233,6 @@ out_notify: if (ret != 0) notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); out: - current->flags &= ~PF_HOTPLUG_CPU; - up(&cpucontrol); + unlock_cpu_hotplug(); return ret; } diff --git a/kernel/fork.c b/kernel/fork.c index e0d0b77343f8..fb8572a42297 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -263,7 +263,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) rb_parent = &tmp->vm_rb; mm->map_count++; - retval = copy_page_range(mm, oldmm, tmp); + retval = copy_page_range(mm, oldmm, mpnt); if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); @@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags, if (unlikely(p->ptrace & PT_PTRACED)) __ptrace_link(p, current->parent); - cpuset_fork(p); - attach_pid(p, PIDTYPE_PID, p->pid); attach_pid(p, PIDTYPE_TGID, p->tgid); if (thread_group_leader(p)) { @@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags, __get_cpu_var(process_counts)++; } - proc_fork_connector(p); if (!current->signal->tty && p->signal->tty) p->signal->tty = NULL; nr_threads++; total_forks++; write_unlock_irq(&tasklist_lock); + proc_fork_connector(p); + cpuset_fork(p); retval = 0; fork_out: diff --git a/kernel/futex.c b/kernel/futex.c index aca8d10704f6..5872e3507f35 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -201,21 +201,6 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key) * from swap. But that's a lot of code to duplicate here * for a rare case, so we simply fetch the page. */ - - /* - * Do a quick atomic lookup first - this is the fastpath. - */ - page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET); - if (likely(page != NULL)) { - key->shared.pgoff = - page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - put_page(page); - return 0; - } - - /* - * Do it the general way. - */ err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); if (err >= 0) { key->shared.pgoff = diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3bd7226d15fa..81c49a4d679e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -36,6 +36,9 @@ void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_desc + irq; + if (irq >= NR_IRQS) + return; + while (desc->status & IRQ_INPROGRESS) cpu_relax(); } @@ -60,6 +63,9 @@ void disable_irq_nosync(unsigned int irq) irq_desc_t *desc = irq_desc + irq; unsigned long flags; + if (irq >= NR_IRQS) + return; + spin_lock_irqsave(&desc->lock, flags); if (!desc->depth++) { desc->status |= IRQ_DISABLED; @@ -86,6 +92,9 @@ void disable_irq(unsigned int irq) { irq_desc_t *desc = irq_desc + irq; + if (irq >= NR_IRQS) + return; + disable_irq_nosync(irq); if (desc->action) synchronize_irq(irq); @@ -108,6 +117,9 @@ void enable_irq(unsigned int irq) irq_desc_t *desc = irq_desc + irq; unsigned long flags; + if (irq >= NR_IRQS) + return; + spin_lock_irqsave(&desc->lock, flags); switch (desc->depth) { case 0: @@ -163,6 +175,9 @@ int setup_irq(unsigned int irq, struct irqaction * new) unsigned long flags; int shared = 0; + if (irq >= NR_IRQS) + return -EINVAL; + if (desc->handler == &no_irq_type) return -ENOSYS; /* diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 84af54c39e1b..cae4f5728997 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp) union cpu_time_count ret; ret.sched = 0; /* high half always zero when .cpu used */ if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { - ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; + ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; } else { ret.cpu = timespec_to_cputime(tp); } diff --git a/kernel/printk.c b/kernel/printk.c index ac8a08f36207..5287be83e3e7 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -956,7 +956,7 @@ int unregister_console(struct console *console) if (console_drivers == console) { console_drivers=console->next; res = 0; - } else { + } else if (console_drivers) { for (a=console_drivers->next, b=console_drivers ; a; b=a, a=b->next) { if (a == console) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 42df83d7fad2..2bd5aee1c736 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) if (!test_and_set_bit(0, &work->pending)) { if (unlikely(is_single_threaded(wq))) - cpu = 0; + cpu = any_online_cpu(cpu_online_map); BUG_ON(!list_empty(&work->entry)); __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); ret = 1; @@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data) int cpu = smp_processor_id(); if (unlikely(is_single_threaded(wq))) - cpu = 0; + cpu = any_online_cpu(cpu_online_map); __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } @@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) might_sleep(); if (is_single_threaded(wq)) { - /* Always use cpu 0's area. */ - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0)); + /* Always use first cpu's area. */ + flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map))); } else { int cpu; @@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name, lock_cpu_hotplug(); if (singlethread) { INIT_LIST_HEAD(&wq->list); - p = create_workqueue_thread(wq, 0); + p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map)); if (!p) destroy = 1; else @@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq) /* We don't need the distraction of CPUs appearing and vanishing. */ lock_cpu_hotplug(); if (is_single_threaded(wq)) - cleanup_workqueue_thread(wq, 0); + cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map)); else { for_each_online_cpu(cpu) cleanup_workqueue_thread(wq, cpu); |