summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/perf_counter.c7
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/smp.c40
-rw-r--r--kernel/trace/trace_events.c6
7 files changed, 62 insertions, 18 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8ce10043e4ac..6ba0f1ecb212 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -401,6 +401,7 @@ int disable_nonboot_cpus(void)
break;
}
}
+
if (!error) {
BUG_ON(num_online_cpus() > 1);
/* Make sure the CPUs won't be enabled by someone else */
@@ -413,6 +414,14 @@ int disable_nonboot_cpus(void)
return error;
}
+void __weak arch_enable_nonboot_cpus_begin(void)
+{
+}
+
+void __weak arch_enable_nonboot_cpus_end(void)
+{
+}
+
void __ref enable_nonboot_cpus(void)
{
int cpu, error;
@@ -424,6 +433,9 @@ void __ref enable_nonboot_cpus(void)
goto out;
printk("Enabling non-boot CPUs ...\n");
+
+ arch_enable_nonboot_cpus_begin();
+
for_each_cpu(cpu, frozen_cpus) {
error = _cpu_up(cpu, 1);
if (!error) {
@@ -432,6 +444,9 @@ void __ref enable_nonboot_cpus(void)
}
printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
}
+
+ arch_enable_nonboot_cpus_end();
+
cpumask_clear(frozen_cpus);
out:
cpu_maps_update_done();
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 22e9dcfaa3d3..654efd09f6a9 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -34,7 +34,7 @@ config GCOV_KERNEL
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
depends on GCOV_KERNEL
- depends on S390 || X86
+ depends on S390 || X86 || (PPC && EXPERIMENTAL)
default n
---help---
This options activates profiling for the entire kernel.
diff --git a/kernel/module.c b/kernel/module.c
index 46580edff0cb..05ce49ced8f6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
-#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
@@ -394,7 +394,7 @@ static void percpu_modfree(void *freeme)
free_percpu(freeme);
}
-#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
/* Number of blocks used and allocated. */
static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -540,7 +540,7 @@ static int percpu_modinit(void)
}
__initcall(percpu_modinit);
-#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e0d91fdf0c3c..8cb94a52d1bb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -106,16 +106,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
void __weak perf_counter_print_debug(void) { }
-static DEFINE_PER_CPU(int, disable_count);
+static DEFINE_PER_CPU(int, perf_disable_count);
void __perf_disable(void)
{
- __get_cpu_var(disable_count)++;
+ __get_cpu_var(perf_disable_count)++;
}
bool __perf_enable(void)
{
- return !--__get_cpu_var(disable_count);
+ return !--__get_cpu_var(perf_disable_count);
}
void perf_disable(void)
@@ -4215,6 +4215,7 @@ static int perf_copy_attr(struct perf_counter_attr __user *uattr,
if (val)
goto err_size;
}
+ size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
diff --git a/kernel/sched.c b/kernel/sched.c
index e27a53685ed9..d9db3fb17573 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -295,12 +295,12 @@ struct task_group root_task_group;
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
diff --git a/kernel/smp.c b/kernel/smp.c
index 94188b8ecc33..8e218500ab14 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -177,6 +177,11 @@ void generic_smp_call_function_interrupt(void)
int cpu = get_cpu();
/*
+ * Shouldn't receive this interrupt on a cpu that is not yet online.
+ */
+ WARN_ON_ONCE(!cpu_online(cpu));
+
+ /*
* Ensure entry is visible on call_function_queue after we have
* entered the IPI. See comment in smp_call_function_many.
* If we don't have this, then we may miss an entry on the list
@@ -230,6 +235,11 @@ void generic_smp_call_function_single_interrupt(void)
unsigned int data_flags;
LIST_HEAD(list);
+ /*
+ * Shouldn't receive this interrupt on a cpu that is not yet online.
+ */
+ WARN_ON_ONCE(!cpu_online(smp_processor_id()));
+
spin_lock(&q->lock);
list_replace_init(&q->list, &list);
spin_unlock(&q->lock);
@@ -285,8 +295,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
*/
this_cpu = get_cpu();
- /* Can deadlock when called with interrupts disabled */
- WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
+ /*
+ * Can deadlock when called with interrupts disabled.
+ * We allow cpu's that are not yet online though, as no one else can
+ * send smp call function interrupt to this cpu and as such deadlocks
+ * can't happen.
+ */
+ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+ && !oops_in_progress);
if (cpu == this_cpu) {
local_irq_save(flags);
@@ -329,8 +345,14 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
{
csd_lock(data);
- /* Can deadlock when called with interrupts disabled */
- WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress);
+ /*
+ * Can deadlock when called with interrupts disabled.
+ * We allow cpu's that are not yet online though, as no one else can
+ * send smp call function interrupt to this cpu and as such deadlocks
+ * can't happen.
+ */
+ WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
+ && !oops_in_progress);
generic_exec_single(cpu, data, wait);
}
@@ -365,8 +387,14 @@ void smp_call_function_many(const struct cpumask *mask,
unsigned long flags;
int cpu, next_cpu, this_cpu = smp_processor_id();
- /* Can deadlock when called with interrupts disabled */
- WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
+ /*
+ * Can deadlock when called with interrupts disabled.
+ * We allow cpu's that are not yet online though, as no one else can
+ * send smp call function interrupt to this cpu and as such deadlocks
+ * can't happen.
+ */
+ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+ && !oops_in_progress);
/* So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..97e2c4d2e9eb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1432,7 +1432,7 @@ static __init void event_trace_self_tests(void)
#ifdef CONFIG_FUNCTION_TRACER
-static DEFINE_PER_CPU(atomic_t, test_event_disable);
+static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void
function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1449,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
pc = preempt_count();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
+ disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
if (disabled != 1)
goto out;
@@ -1468,7 +1468,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
out:
- atomic_dec(&per_cpu(test_event_disable, cpu));
+ atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
ftrace_preempt_enable(resched);
}
OpenPOWER on IntegriCloud