summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_selftest.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-13 22:44:51 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-13 22:44:51 +0100
commit97fb44eb6bc01f4ffed4300e475aa15e44877375 (patch)
tree481ed6efd0babe7185cae04f2fd295426b36411d /kernel/trace/trace_selftest.c
parent[ARM] 5422/1: ARM: MMU: add a Non-cacheable Normal executable memory type (diff)
parentqong: basic support for Dave/DENX QongEVB-LITE board (diff)
downloadlinux-97fb44eb6bc01f4ffed4300e475aa15e44877375.tar.xz
linux-97fb44eb6bc01f4ffed4300e475aa15e44877375.zip
Merge branch 'for-rmk' of git://git.pengutronix.de/git/imx/linux-2.6 into devel
Conflicts: arch/arm/mach-at91/gpio.c
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r--kernel/trace/trace_selftest.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 88c8eb70f54a..bc8e80a86bca 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
+ unsigned int loops = 0;
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
entry = ring_buffer_event_data(event);
+ /*
+ * The ring buffer is a size of trace_buf_size, if
+ * we loop more than the size, there's something wrong
+ * with the ring buffer.
+ */
+ if (loops++ > trace_buf_size) {
+ printk(KERN_CONT ".. bad ring buffer ");
+ goto failed;
+ }
if (!trace_valid_entry(entry)) {
printk(KERN_CONT ".. invalid entry %d ",
entry->type);
@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
cnt = ring_buffer_entries(tr->buffer);
+ /*
+ * The trace_test_buffer_cpu runs a while loop to consume all data.
+ * If the calling tracer is broken, and is constantly filling
+ * the buffer, this will run forever, and hard lock the box.
+ * We disable the ring buffer while we do this test to prevent
+ * a hard lock up.
+ */
+ tracing_off();
for_each_possible_cpu(cpu) {
ret = trace_test_buffer_cpu(tr, cpu);
if (ret)
break;
}
+ tracing_on();
__raw_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags);