lkml.org 
[lkml]   [2008]   [May]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 1/5] ftrace: limit use of check pages
The check_pages function is called often enough that it can cause problems
with trace outputs or even bringing the system to a halt.

This patch limits the check_pages to the places that are most likely to
have problems. The check is made at the flip between the global array and
the max save array, as well as when the size of the buffers changes and
the self tests.

This patch also removes the BUG_ON from check_pages and replaces it with
a WARN_ON and disabling of the tracer.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
---
kernel/trace/trace.c | 32 +++++++++++++++++++++++---------
kernel/trace/trace_selftest.c | 1 +
2 files changed, 24 insertions(+), 9 deletions(-)

Index: linux-tip.git/kernel/trace/trace.c
===================================================================
--- linux-tip.git.orig/kernel/trace/trace.c 2008-05-21 13:25:34.000000000 -0400
+++ linux-tip.git/kernel/trace/trace.c 2008-05-21 21:32:23.000000000 -0400
@@ -249,24 +249,32 @@ __update_max_tr(struct trace_array *tr,
tracing_record_cmdline(current);
}

+#define CHECK_COND(cond) \
+ if (unlikely(cond)) { \
+ tracing_disabled = 0; \
+ WARN_ON(1); \
+ return -1; \
+ }
+
/**
* check_pages - integrity check of trace buffers
*
* As a safty measure we check to make sure the data pages have not
- * been corrupted. TODO: configure to disable this because it adds
- * a bit of overhead.
+ * been corrupted.
*/
-void check_pages(struct trace_array_cpu *data)
+int check_pages(struct trace_array_cpu *data)
{
struct page *page, *tmp;

- BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
- BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
+ CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
+ CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);

list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
- BUG_ON(page->lru.next->prev != &page->lru);
- BUG_ON(page->lru.prev->next != &page->lru);
+ CHECK_COND(page->lru.next->prev != &page->lru);
+ CHECK_COND(page->lru.prev->next != &page->lru);
}
+
+ return 0;
}

/**
@@ -280,7 +288,6 @@ void *head_page(struct trace_array_cpu *
{
struct page *page;

- check_pages(data);
if (list_empty(&data->trace_pages))
return NULL;

@@ -2608,7 +2615,7 @@ tracing_entries_write(struct file *filp,
{
unsigned long val;
char buf[64];
- int ret;
+ int i, ret;

if (cnt >= sizeof(buf))
return -EINVAL;
@@ -2677,8 +2684,15 @@ tracing_entries_write(struct file *filp,
trace_free_page();
}

+ /* check integrity */
+ for_each_tracing_cpu(i)
+ check_pages(global_trace.data[i]);
+
filp->f_pos += cnt;

+ /* If check pages failed, return ENOMEM */
+ if (tracing_disabled)
+ cnt = -ENOMEM;
out:
max_tr.entries = global_trace.entries;
mutex_unlock(&trace_types_lock);
Index: linux-tip.git/kernel/trace/trace_selftest.c
===================================================================
--- linux-tip.git.orig/kernel/trace/trace_selftest.c 2008-05-21 19:06:11.000000000 -0400
+++ linux-tip.git/kernel/trace/trace_selftest.c 2008-05-21 20:25:05.000000000 -0400
@@ -28,6 +28,7 @@ trace_test_buffer_cpu(struct trace_array
page = list_entry(data->trace_pages.next, struct page, lru);
entries = page_address(page);

+ check_pages(data);
if (head_page(data) != entries)
goto failed;

--


\
 
 \ /
  Last update: 2008-05-22 06:55    [W:0.062 / U:0.492 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site