diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-19 12:05:25 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-19 12:05:25 +0200 |
commit | be4bdbfbae6b303c21ebe446648f617908a794b5 (patch) | |
tree | 4ea485b2fa95cb17aded35b0ac83d6bdf9a0396d /include | |
parent | tracing: Remove markers (diff) | |
parent | tracing: Allocate the ftrace event profile buffer dynamically (diff) | |
download | linux-be4bdbfbae6b303c21ebe446648f617908a794b5.tar.xz linux-be4bdbfbae6b303c21ebe446648f617908a794b5.zip |
Merge branch 'tracing/core-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into tracing/urgent
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/ftrace_event.h | 10 | ||||
-rw-r--r-- | include/linux/syscalls.h | 24 | ||||
-rw-r--r-- | include/trace/ftrace.h | 111 |
3 files changed, 79 insertions, 66 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index bd099ba82ccc..4ec5e67e18cf 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -4,6 +4,7 @@ #include <linux/ring_buffer.h> #include <linux/trace_seq.h> #include <linux/percpu.h> +#include <linux/hardirq.h> struct trace_array; struct tracer; @@ -130,10 +131,15 @@ struct ftrace_event_call { void *data; atomic_t profile_count; - int (*profile_enable)(struct ftrace_event_call *); - void (*profile_disable)(struct ftrace_event_call *); + int (*profile_enable)(void); + void (*profile_disable)(void); }; +#define FTRACE_MAX_PROFILE_SIZE 2048 + +extern char *trace_profile_buf; +extern char *trace_profile_buf_nmi; + #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a8e37821cc60..7d9803cbb20f 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -100,33 +100,25 @@ struct perf_counter_attr; #ifdef CONFIG_EVENT_PROFILE #define TRACE_SYS_ENTER_PROFILE(sname) \ -static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \ +static int prof_sysenter_enable_##sname(void) \ { \ - int ret = 0; \ - if (!atomic_inc_return(&event_enter_##sname.profile_count)) \ - ret = reg_prof_syscall_enter("sys"#sname); \ - return ret; \ + return reg_prof_syscall_enter("sys"#sname); \ } \ \ -static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\ +static void prof_sysenter_disable_##sname(void) \ { \ - if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \ - unreg_prof_syscall_enter("sys"#sname); \ + unreg_prof_syscall_enter("sys"#sname); \ } #define TRACE_SYS_EXIT_PROFILE(sname) \ -static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \ +static int prof_sysexit_enable_##sname(void) \ { \ - int ret = 0; \ - if (!atomic_inc_return(&event_exit_##sname.profile_count)) \ - ret = reg_prof_syscall_exit("sys"#sname); \ - return ret; \ + return reg_prof_syscall_exit("sys"#sname); \ } \ \ -static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \ +static void prof_sysexit_disable_##sname(void) \ { \ - if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \ - unreg_prof_syscall_exit("sys"#sname); \ + unreg_prof_syscall_exit("sys"#sname); \ } #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 72a3b437b829..a0361cb69769 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \ * * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later * - * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) + * static int ftrace_profile_enable_<call>(void) * { - * int ret = 0; - * - * if (!atomic_inc_return(&event_call->profile_count)) - * ret = register_trace_<call>(ftrace_profile_<call>); - * - * return ret; + * return register_trace_<call>(ftrace_profile_<call>); * } * - * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) + * static void ftrace_profile_disable_<call>(void) * { - * if (atomic_add_negative(-1, &event->call->profile_count)) - * unregister_trace_<call>(ftrace_profile_<call>); + * unregister_trace_<call>(ftrace_profile_<call>); * } * */ @@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \ \ static void ftrace_profile_##call(proto); \ \ -static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ +static int ftrace_profile_enable_##call(void) \ { \ - int ret = 0; \ - \ - if (!atomic_inc_return(&event_call->profile_count)) \ - ret = register_trace_##call(ftrace_profile_##call); \ - \ - return ret; \ + return register_trace_##call(ftrace_profile_##call); \ } \ \ -static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ +static void ftrace_profile_disable_##call(void) \ { \ - if (atomic_add_negative(-1, &event_call->profile_count)) \ - unregister_trace_##call(ftrace_profile_##call); \ + unregister_trace_##call(ftrace_profile_##call); \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -660,11 +648,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * struct ftrace_raw_##call *entry; * u64 __addr = 0, __count = 1; * unsigned long irq_flags; + * struct trace_entry *ent; * int __entry_size; * int __data_size; + * int __cpu * int pc; * - * local_save_flags(irq_flags); * pc = preempt_count(); * * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); @@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * sizeof(u64)); * __entry_size -= sizeof(u32); * - * do { - * char raw_data[__entry_size]; <- allocate our sample in the stack - * struct trace_entry *ent; + * // Protect the non nmi buffer + * // This also protects the rcu read side + * local_irq_save(irq_flags); + * __cpu = smp_processor_id(); + * + * if (in_nmi()) + * raw_data = rcu_dereference(trace_profile_buf_nmi); + * else + * raw_data = rcu_dereference(trace_profile_buf); + * + * if (!raw_data) + * goto end; * - * zero dead bytes from alignment to avoid stack leak to userspace: + * raw_data = per_cpu_ptr(raw_data, __cpu); * - * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; - * entry = (struct ftrace_raw_<call> *)raw_data; - * ent = &entry->ent; - * tracing_generic_entry_update(ent, irq_flags, pc); - * ent->type = event_call->id; + * //zero dead bytes from alignment to avoid stack leak to userspace: + * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; + * entry = (struct ftrace_raw_<call> *)raw_data; + * ent = &entry->ent; + * tracing_generic_entry_update(ent, irq_flags, pc); + * ent->type = event_call->id; * - * <tstruct> <- do some jobs with dynamic arrays + * <tstruct> <- do some jobs with dynamic arrays * - * <assign> <- affect our values + * <assign> <- affect our values * - * perf_tpcounter_event(event_call->id, __addr, __count, entry, - * __entry_size); <- submit them to perf counter - * } while (0); + * perf_tpcounter_event(event_call->id, __addr, __count, entry, + * __entry_size); <- submit them to perf counter * * } */ @@ -716,11 +714,13 @@ static void ftrace_profile_##call(proto) \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct trace_entry *ent; \ int __entry_size; \ int __data_size; \ + char *raw_data; \ + int __cpu; \ int pc; \ \ - local_save_flags(irq_flags); \ pc = preempt_count(); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ @@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - do { \ - char raw_data[__entry_size]; \ - struct trace_entry *ent; \ + if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + "profile buffer not large enough")) \ + return; \ + \ + local_irq_save(irq_flags); \ + __cpu = smp_processor_id(); \ \ - *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ - entry = (struct ftrace_raw_##call *)raw_data; \ - ent = &entry->ent; \ - tracing_generic_entry_update(ent, irq_flags, pc); \ - ent->type = event_call->id; \ + if (in_nmi()) \ + raw_data = rcu_dereference(trace_profile_buf_nmi); \ + else \ + raw_data = rcu_dereference(trace_profile_buf); \ \ - tstruct \ + if (!raw_data) \ + goto end; \ \ - { assign; } \ + raw_data = per_cpu_ptr(raw_data, __cpu); \ \ - perf_tpcounter_event(event_call->id, __addr, __count, entry,\ + *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ + entry = (struct ftrace_raw_##call *)raw_data; \ + ent = &entry->ent; \ + tracing_generic_entry_update(ent, irq_flags, pc); \ + ent->type = event_call->id; \ + \ + tstruct \ + \ + { assign; } \ + \ + perf_tpcounter_event(event_call->id, __addr, __count, entry, \ __entry_size); \ - } while (0); \ + \ +end: \ + local_irq_restore(irq_flags); \ \ } |