summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-22 00:22:16 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-26 22:39:45 +0200
commit19384c0314342222b18d4c7f09cdce1ca74dfd2a (patch)
tree9312acd07471cb89f2090f01e3259eab40de3eea /kernel/trace/trace.c
parentc6531cce6e6e4b99bcda46b6268d6f2d9e30aea4 (diff)
downloadblackbird-op-linux-19384c0314342222b18d4c7f09cdce1ca74dfd2a.tar.gz
blackbird-op-linux-19384c0314342222b18d4c7f09cdce1ca74dfd2a.zip
ftrace: limit use of check pages
The check_pages function is called often enough that it can cause problems with trace outputs or even bringing the system to a halt. This patch limits the check_pages to the places that are most likely to have problems. The check is made at the flip between the global array and the max save array, as well as when the size of the buffers changes and the self tests. This patch also removes the BUG_ON from check_pages and replaces it with a WARN_ON and disabling of the tracer. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: pq@iki.fi Cc: proski@gnu.org Cc: sandmann@redhat.com Cc: a.p.zijlstra@chello.nl Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3271916ff033..0567f51bbea4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -249,24 +249,32 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
tracing_record_cmdline(current);
}
+#define CHECK_COND(cond) \
+ if (unlikely(cond)) { \
+ tracing_disabled = 1; \
+ WARN_ON(1); \
+ return -1; \
+ }
+
/**
* check_pages - integrity check of trace buffers
*
* As a safty measure we check to make sure the data pages have not
- * been corrupted. TODO: configure to disable this because it adds
- * a bit of overhead.
+ * been corrupted.
*/
-void check_pages(struct trace_array_cpu *data)
+int check_pages(struct trace_array_cpu *data)
{
struct page *page, *tmp;
- BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
- BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
+ CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
+ CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
- BUG_ON(page->lru.next->prev != &page->lru);
- BUG_ON(page->lru.prev->next != &page->lru);
+ CHECK_COND(page->lru.next->prev != &page->lru);
+ CHECK_COND(page->lru.prev->next != &page->lru);
}
+
+ return 0;
}
/**
@@ -280,7 +288,6 @@ void *head_page(struct trace_array_cpu *data)
{
struct page *page;
- check_pages(data);
if (list_empty(&data->trace_pages))
return NULL;
@@ -2566,7 +2573,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
{
unsigned long val;
char buf[64];
- int ret;
+ int i, ret;
if (cnt >= sizeof(buf))
return -EINVAL;
@@ -2635,8 +2642,15 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
trace_free_page();
}
+ /* check integrity */
+ for_each_tracing_cpu(i)
+ check_pages(global_trace.data[i]);
+
filp->f_pos += cnt;
+ /* If check pages failed, return ENOMEM */
+ if (tracing_disabled)
+ cnt = -ENOMEM;
out:
max_tr.entries = global_trace.entries;
mutex_unlock(&trace_types_lock);
OpenPOWER on IntegriCloud