diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-02-03 08:52:05 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-02-03 08:52:05 -0800 |
commit | eb487ab4d5af0caee81bfaaa5d87b55844f60145 (patch) | |
tree | 5dc5470b73aeb1f4bfff9e77d52ff3387c18e7df /kernel | |
parent | 0b0abeaf3d30cec03ac6497fe978b8f7edecc5ae (diff) | |
parent | 542e72fc90f5ed9eecb574f80f70868c7f296093 (diff) | |
download | talos-op-linux-eb487ab4d5af0caee81bfaaa5d87b55844f60145.tar.gz talos-op-linux-eb487ab4d5af0caee81bfaaa5d87b55844f60145.zip |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Fix reading in perf_event_read()
watchdog: Don't change watchdog state on read of sysctl
watchdog: Fix sysctl consistency
watchdog: Fix broken nowatchdog logic
perf: Fix Pentium4 raw event validation
perf: Fix alloc_callchain_buffers()
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 10 | ||||
-rw-r--r-- | kernel/watchdog.c | 43 |
2 files changed, 24 insertions, 29 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 126a302c481c..999835b6112b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info) return; raw_spin_lock(&ctx->lock); - update_context_time(ctx); + if (ctx->is_active) + update_context_time(ctx); update_event_times(event); + if (event->state == PERF_EVENT_STATE_ACTIVE) + event->pmu->read(event); raw_spin_unlock(&ctx->lock); - - event->pmu->read(event); } static inline u64 perf_event_count(struct perf_event *event) @@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void) * accessed from NMI. Use a temporary manual per cpu allocation * until that gets sorted out. */ - size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * - num_possible_cpus(); + size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); entries = kzalloc(size, GFP_KERNEL); if (!entries) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d7ebdf4cea98..f37f974aa81b 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -27,7 +27,7 @@ #include <asm/irq_regs.h> #include <linux/perf_event.h> -int watchdog_enabled; +int watchdog_enabled = 1; int __read_mostly softlockup_thresh = 60; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); @@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif -static int no_watchdog; - - /* boot commands */ /* * Should we panic when a soft-lockup or hard-lockup occurs: @@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str) if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; else if (!strncmp(str, "0", 1)) - no_watchdog = 1; + watchdog_enabled = 0; return 1; } __setup("nmi_watchdog=", hardlockup_panic_setup); @@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); static int __init nowatchdog_setup(char *str) { - no_watchdog = 1; + watchdog_enabled = 0; return 1; } __setup("nowatchdog", nowatchdog_setup); @@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup); /* deprecated */ static int __init nosoftlockup_setup(char *str) { - no_watchdog = 1; + watchdog_enabled = 0; return 1; } __setup("nosoftlockup", nosoftlockup_setup); @@ -432,9 +429,6 @@ static int watchdog_enable(int cpu) wake_up_process(p); } - /* if any cpu succeeds, watchdog is considered enabled for the system */ - watchdog_enabled = 1; - return 0; } @@ -462,12 +456,16 @@ static void watchdog_disable(int cpu) static void watchdog_enable_all_cpus(void) { int cpu; - int result = 0; + + watchdog_enabled = 0; for_each_online_cpu(cpu) - result += watchdog_enable(cpu); + if (!watchdog_enable(cpu)) + /* if any cpu succeeds, watchdog is considered + enabled for the system */ + watchdog_enabled = 1; - if (result) + if (!watchdog_enabled) printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); } @@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void) { int cpu; - if (no_watchdog) - return; - for_each_online_cpu(cpu) watchdog_disable(cpu); @@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write, { proc_dointvec(table, write, buffer, length, ppos); - if (watchdog_enabled) - watchdog_enable_all_cpus(); - else - watchdog_disable_all_cpus(); + if (write) { + if (watchdog_enabled) + watchdog_enable_all_cpus(); + else + watchdog_disable_all_cpus(); + } return 0; } @@ -530,7 +527,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - err = watchdog_enable(hotcpu); + if (watchdog_enabled) + err = watchdog_enable(hotcpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: @@ -555,9 +553,6 @@ void __init lockup_detector_init(void) void *cpu = (void *)(long)smp_processor_id(); int err; - if (no_watchdog) - return; - err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); WARN_ON(notifier_to_errno(err)); |