summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpuset.h16
-rw-r--r--include/linux/ftrace.h49
-rw-r--r--include/linux/ftrace_event.h81
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/ring_buffer.h10
-rw-r--r--include/linux/sched.h70
-rw-r--r--include/linux/stop_machine.h122
-rw-r--r--include/linux/syscalls.h57
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/tracepoint.h196
-rw-r--r--include/linux/wait.h35
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h274
-rw-r--r--include/trace/syscall.h10
21 files changed, 582 insertions, 480 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index a5740fc4d04b..a73454aec333 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p,
- struct cpumask *mask);
+extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -69,9 +68,6 @@ struct seq_file;
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
-extern void cpuset_lock(void);
-extern void cpuset_unlock(void);
-
extern int cpuset_mem_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
{
cpumask_copy(mask, cpu_possible_mask);
}
-static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
- struct cpumask *mask)
+
+static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
+ return cpumask_any(cpu_active_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
{
}
-static inline void cpuset_lock(void) {}
-static inline void cpuset_unlock(void) {}
-
static inline int cpuset_mem_spread_node(void)
{
return 0;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index cc12b3c556b3..41e46330d9be 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -82,9 +82,13 @@ void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1);
#else /* !CONFIG_FUNCTION_TRACER */
-# define register_ftrace_function(ops) do { } while (0)
-# define unregister_ftrace_function(ops) do { } while (0)
-# define clear_ftrace_function(ops) do { } while (0)
+/*
+ * (un)register_ftrace_function must be a macro since the ops parameter
+ * must not be evaluated.
+ */
+#define register_ftrace_function(ops) ({ 0; })
+#define unregister_ftrace_function(ops) ({ 0; })
+static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { }
static inline void ftrace_start(void) { }
@@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else
-# define skip_trace(ip) ({ 0; })
-# define ftrace_force_update() ({ 0; })
-# define ftrace_set_filter(buf, len, reset) do { } while (0)
-# define ftrace_disable_daemon() do { } while (0)
-# define ftrace_enable_daemon() do { } while (0)
+static inline int skip_trace(unsigned long ip) { return 0; }
+static inline int ftrace_force_update(void) { return 0; }
+static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
+{
+}
+static inline void ftrace_disable_daemon(void) { }
+static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
@@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled)
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
#else
-# define time_hardirqs_on(a0, a1) do { } while (0)
-# define time_hardirqs_off(a0, a1) do { } while (0)
+ static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
+ static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
#endif
#ifdef CONFIG_PREEMPT_TRACER
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
-# define trace_preempt_on(a0, a1) do { } while (0)
-# define trace_preempt_off(a0, a1) do { } while (0)
+ static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
+ static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
@@ -352,6 +358,10 @@ struct ftrace_graph_ret {
int depth;
};
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* for init task */
@@ -400,10 +410,6 @@ extern char __irqentry_text_end[];
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
-/* Type of the callback handlers for tracing function graph*/
-typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
-typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
-
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc);
@@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void)
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+{
+ return -1;
+}
+static inline void unregister_ftrace_graph(void) { }
+
static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
@@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
-extern int ftrace_dump_on_oops;
+enum ftrace_dump_mode;
+
+extern enum ftrace_dump_mode ftrace_dump_on_oops;
#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c0f4b364c711..dc7fc646fa2e 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -58,6 +58,7 @@ struct trace_iterator {
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
+ unsigned long lost_events;
int leftover;
int cpu;
u64 ts;
@@ -69,18 +70,25 @@ struct trace_iterator {
};
+struct trace_event;
+
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
- int flags);
-struct trace_event {
- struct hlist_node node;
- struct list_head list;
- int type;
+ int flags, struct trace_event *event);
+
+struct trace_event_functions {
trace_print_func trace;
trace_print_func raw;
trace_print_func hex;
trace_print_func binary;
};
+struct trace_event {
+ struct hlist_node node;
+ struct list_head list;
+ int type;
+ struct trace_event_functions *funcs;
+};
+
extern int register_ftrace_event(struct trace_event *event);
extern int unregister_ftrace_event(struct trace_event *event);
@@ -112,28 +120,67 @@ void tracing_record_cmdline(struct task_struct *tsk);
struct event_filter;
+enum trace_reg {
+ TRACE_REG_REGISTER,
+ TRACE_REG_UNREGISTER,
+ TRACE_REG_PERF_REGISTER,
+ TRACE_REG_PERF_UNREGISTER,
+};
+
+struct ftrace_event_call;
+
+struct ftrace_event_class {
+ char *system;
+ void *probe;
+#ifdef CONFIG_PERF_EVENTS
+ void *perf_probe;
+#endif
+ int (*reg)(struct ftrace_event_call *event,
+ enum trace_reg type);
+ int (*define_fields)(struct ftrace_event_call *);
+ struct list_head *(*get_fields)(struct ftrace_event_call *);
+ struct list_head fields;
+ int (*raw_init)(struct ftrace_event_call *);
+};
+
+enum {
+ TRACE_EVENT_FL_ENABLED_BIT,
+ TRACE_EVENT_FL_FILTERED_BIT,
+};
+
+enum {
+ TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
+ TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
+};
+
struct ftrace_event_call {
struct list_head list;
+ struct ftrace_event_class *class;
char *name;
- char *system;
struct dentry *dir;
- struct trace_event *event;
- int enabled;
- int (*regfunc)(struct ftrace_event_call *);
- void (*unregfunc)(struct ftrace_event_call *);
- int id;
+ struct trace_event event;
const char *print_fmt;
- int (*raw_init)(struct ftrace_event_call *);
- int (*define_fields)(struct ftrace_event_call *);
- struct list_head fields;
- int filter_active;
struct event_filter *filter;
void *mod;
void *data;
+ /*
+ * 32 bit flags:
+ * bit 1: enabled
+ * bit 2: filter_active
+ *
+ * Changes to flags must hold the event_mutex.
+ *
+ * Note: Reads of flags do not hold the event_mutex since
+ * they occur in critical sections. But the way flags
+ * is currently used, these changes do no affect the code
+ * except that when a change is made, it may have a slight
+ * delay in propagating the changes to other CPUs due to
+ * caching and such.
+ */
+ unsigned int flags;
+
int perf_refcount;
- int (*perf_event_enable)(struct ftrace_event_call *);
- void (*perf_event_disable)(struct ftrace_event_call *);
};
#define PERF_MAX_TRACE_SIZE 2048
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9365227dbaf6..9fb1c1299032 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -490,6 +490,13 @@ static inline void tracing_off(void) { }
static inline void tracing_off_permanent(void) { }
static inline int tracing_is_on(void) { return 0; }
#endif
+
+enum ftrace_dump_mode {
+ DUMP_NONE,
+ DUMP_ALL,
+ DUMP_ORIG,
+};
+
#ifdef CONFIG_TRACING
extern void tracing_start(void);
extern void tracing_stop(void);
@@ -571,7 +578,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
extern int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
-extern void ftrace_dump(void);
+extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
@@ -592,7 +599,7 @@ ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
}
-static inline void ftrace_dump(void) { }
+static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
/*
diff --git a/include/linux/module.h b/include/linux/module.h
index 515d53ae6a79..6914fcad4673 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module)
if (module) {
preempt_disable();
__this_cpu_inc(module->refptr->incs);
- trace_module_get(module, _THIS_IP_,
- __this_cpu_read(module->refptr->incs));
+ trace_module_get(module, _THIS_IP_);
preempt_enable();
}
}
@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module)
if (likely(module_is_live(module))) {
__this_cpu_inc(module->refptr->incs);
- trace_module_get(module, _THIS_IP_,
- __this_cpu_read(module->refptr->incs));
+ trace_module_get(module, _THIS_IP_);
} else
ret = 0;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index a5195875480a..0006b2df00e1 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -60,8 +60,6 @@ static inline long rcu_batches_completed_bh(void)
return 0;
}
-extern int rcu_expedited_torture_stats(char *page);
-
static inline void rcu_force_quiescent_state(void)
{
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 42cc3a04779e..24e467e526b8 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -35,7 +35,6 @@ struct notifier_block;
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern int rcu_needs_cpu(int cpu);
-extern int rcu_expedited_torture_stats(char *page);
#ifdef CONFIG_TREE_PREEMPT_RCU
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 5fcc31ed5771..25b4f686d918 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);
struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ unsigned long *lost_events);
struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
+ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+void ring_buffer_read_prepare_sync(void);
+void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0447c64af6a..2a5b146fbaf9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -274,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
extern int get_nohz_load_balancer(void);
+extern int nohz_ratelimit(int cpu);
#else
static inline int select_nohz_load_balancer(int cpu)
{
return 0;
}
+
+static inline int nohz_ratelimit(int cpu)
+{
+ return 0;
+}
#endif
/*
@@ -953,6 +959,7 @@ struct sched_domain {
char *name;
#endif
+ unsigned int span_weight;
/*
* Span of all CPUs in this domain.
*
@@ -1025,12 +1032,17 @@ struct sched_domain;
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */
+#define ENQUEUE_WAKEUP 1
+#define ENQUEUE_WAKING 2
+#define ENQUEUE_HEAD 4
+
+#define DEQUEUE_SLEEP 1
+
struct sched_class {
const struct sched_class *next;
- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
- bool head);
- void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
@@ -1039,7 +1051,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
+ int (*select_task_rq)(struct rq *rq, struct task_struct *p,
+ int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
@@ -1076,36 +1089,8 @@ struct load_weight {
unsigned long weight, inv_weight;
};
-/*
- * CFS stats for a schedulable entity (task, task-group etc)
- *
- * Current field usage histogram:
- *
- * 4 se->block_start
- * 4 se->run_node
- * 4 se->sleep_start
- * 6 se->load.weight
- */
-struct sched_entity {
- struct load_weight load; /* for load-balancing */
- struct rb_node run_node;
- struct list_head group_node;
- unsigned int on_rq;
-
- u64 exec_start;
- u64 sum_exec_runtime;
- u64 vruntime;
- u64 prev_sum_exec_runtime;
-
- u64 last_wakeup;
- u64 avg_overlap;
-
- u64 nr_migrations;
-
- u64 start_runtime;
- u64 avg_wakeup;
-
#ifdef CONFIG_SCHEDSTATS
+struct sched_statistics {
u64 wait_start;
u64 wait_max;
u64 wait_count;
@@ -1137,6 +1122,24 @@ struct sched_entity {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+};
+#endif
+
+struct sched_entity {
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ struct list_head group_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
+ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+
+ u64 nr_migrations;
+
+#ifdef CONFIG_SCHEDSTATS
+ struct sched_statistics statistics;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1840,6 +1843,7 @@ extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_HOTPLUG_CPU
+extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index baba3a23a814..6b524a0d02e4 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -1,13 +1,101 @@
#ifndef _LINUX_STOP_MACHINE
#define _LINUX_STOP_MACHINE
-/* "Bogolock": stop the entire machine, disable interrupts. This is a
- very heavy lock, which is equivalent to grabbing every spinlock
- (and more). So the "read" side to such a lock is anything which
- disables preeempt. */
+
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/list.h>
#include <asm/system.h>
+/*
+ * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
+ * monopolization mechanism. The caller can specify a non-sleeping
+ * function to be executed on a single or multiple cpus preempting all
+ * other processes and monopolizing those cpus until it finishes.
+ *
+ * Resources for this mechanism are preallocated when a cpu is brought
+ * up and requests are guaranteed to be served as long as the target
+ * cpus are online.
+ */
+typedef int (*cpu_stop_fn_t)(void *arg);
+
+#ifdef CONFIG_SMP
+
+struct cpu_stop_work {
+ struct list_head list; /* cpu_stopper->works */
+ cpu_stop_fn_t fn;
+ void *arg;
+ struct cpu_stop_done *done;
+};
+
+int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
+void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf);
+int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+
+#else /* CONFIG_SMP */
+
+#include <linux/workqueue.h>
+
+struct cpu_stop_work {
+ struct work_struct work;
+ cpu_stop_fn_t fn;
+ void *arg;
+};
+
+static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
+{
+ int ret = -ENOENT;
+ preempt_disable();
+ if (cpu == smp_processor_id())
+ ret = fn(arg);
+ preempt_enable();
+ return ret;
+}
+
+static void stop_one_cpu_nowait_workfn(struct work_struct *work)
+{
+ struct cpu_stop_work *stwork =
+ container_of(work, struct cpu_stop_work, work);
+ preempt_disable();
+ stwork->fn(stwork->arg);
+ preempt_enable();
+}
+
+static inline void stop_one_cpu_nowait(unsigned int cpu,
+ cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf)
+{
+ if (cpu == smp_processor_id()) {
+ INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
+ work_buf->fn = fn;
+ work_buf->arg = arg;
+ schedule_work(&work_buf->work);
+ }
+}
+
+static inline int stop_cpus(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
+ return stop_one_cpu(raw_smp_processor_id(), fn, arg);
+ return -ENOENT;
+}
+
+static inline int try_stop_cpus(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ return stop_cpus(cpumask, fn, arg);
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * stop_machine "Bogolock": stop the entire machine, disable
+ * interrupts. This is a very heavy lock, which is equivalent to
+ * grabbing every spinlock (and more). So the "read" side to such a
+ * lock is anything which disables preeempt.
+ */
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
/**
@@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
*/
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
-/**
- * stop_machine_create: create all stop_machine threads
- *
- * Description: This causes all stop_machine threads to be created before
- * stop_machine actually gets called. This can be used by subsystems that
- * need a non failing stop_machine infrastructure.
- */
-int stop_machine_create(void);
-
-/**
- * stop_machine_destroy: destroy all stop_machine threads
- *
- * Description: This causes all stop_machine threads which were created with
- * stop_machine_create to be destroyed again.
- */
-void stop_machine_destroy(void);
-
-#else
+#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
static inline int stop_machine(int (*fn)(void *), void *data,
const struct cpumask *cpus)
@@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data,
return ret;
}
-static inline int stop_machine_create(void) { return 0; }
-static inline void stop_machine_destroy(void) { }
-
-#endif /* CONFIG_SMP */
-#endif /* _LINUX_STOP_MACHINE */
+#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 057929b0a651..a1a86a53bc73 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -103,22 +103,6 @@ struct perf_event_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
-#ifdef CONFIG_PERF_EVENTS
-
-#define TRACE_SYS_ENTER_PERF_INIT(sname) \
- .perf_event_enable = perf_sysenter_enable, \
- .perf_event_disable = perf_sysenter_disable,
-
-#define TRACE_SYS_EXIT_PERF_INIT(sname) \
- .perf_event_enable = perf_sysexit_enable, \
- .perf_event_disable = perf_sysexit_disable,
-#else
-#define TRACE_SYS_ENTER_PERF(sname)
-#define TRACE_SYS_ENTER_PERF_INIT(sname)
-#define TRACE_SYS_EXIT_PERF(sname)
-#define TRACE_SYS_EXIT_PERF_INIT(sname)
-#endif /* CONFIG_PERF_EVENTS */
-
#ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a
#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -134,54 +118,43 @@ struct perf_event_attr;
#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
+extern struct ftrace_event_class event_class_syscall_enter;
+extern struct ftrace_event_class event_class_syscall_exit;
+extern struct trace_event_functions enter_syscall_print_funcs;
+extern struct trace_event_functions exit_syscall_print_funcs;
+
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
- static const struct syscall_metadata __syscall_meta_##sname; \
+ static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_enter_##sname; \
- static struct trace_event enter_syscall_print_##sname = { \
- .trace = print_syscall_enter, \
- }; \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_enter_##sname = { \
.name = "sys_enter"#sname, \
- .system = "syscalls", \
- .event = &enter_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
- .define_fields = syscall_enter_define_fields, \
- .regfunc = reg_event_syscall_enter, \
- .unregfunc = unreg_event_syscall_enter, \
+ .class = &event_class_syscall_enter, \
+ .event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
- TRACE_SYS_ENTER_PERF_INIT(sname) \
}
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
- static const struct syscall_metadata __syscall_meta_##sname; \
+ static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_exit_##sname; \
- static struct trace_event exit_syscall_print_##sname = { \
- .trace = print_syscall_exit, \
- }; \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_exit_##sname = { \
.name = "sys_exit"#sname, \
- .system = "syscalls", \
- .event = &exit_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
- .define_fields = syscall_exit_define_fields, \
- .regfunc = reg_event_syscall_exit, \
- .unregfunc = unreg_event_syscall_exit, \
+ .class = &event_class_syscall_exit, \
+ .event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
- TRACE_SYS_EXIT_PERF_INIT(sname) \
}
#define SYSCALL_METADATA(sname, nb) \
SYSCALL_TRACE_ENTER_EVENT(sname); \
SYSCALL_TRACE_EXIT_EVENT(sname); \
- static const struct syscall_metadata __used \
+ static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
@@ -191,12 +164,14 @@ struct perf_event_attr;
.args = args_##sname, \
.enter_event = &event_enter_##sname, \
.exit_event = &event_exit_##sname, \
+ .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
+ .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
};
#define SYSCALL_DEFINE0(sname) \
SYSCALL_TRACE_ENTER_EVENT(_##sname); \
SYSCALL_TRACE_EXIT_EVENT(_##sname); \
- static const struct syscall_metadata __used \
+ static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta__##sname = { \
@@ -204,6 +179,8 @@ struct perf_event_attr;
.nb_args = 0, \
.enter_event = &event_enter__##sname, \
.exit_event = &event_exit__##sname, \
+ .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
+ .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
}; \
asmlinkage long sys_##sname(void)
#else
diff --git a/include/linux/tick.h b/include/linux/tick.h
index d2ae79e21be3..b232ccc0ee29 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -42,6 +42,7 @@ enum tick_nohz_mode {
* @idle_waketime: Time when the idle was interrupted
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
+ * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
* @sleep_length: Duration of the current idle sleep
* @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
@@ -60,7 +61,7 @@ struct tick_sched {
ktime_t idle_waketime;
ktime_t idle_exittime;
ktime_t idle_sleeptime;
- ktime_t idle_lastupdate;
+ ktime_t iowait_sleeptime;
ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
@@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
extern void tick_nohz_restart_sched_tick(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
+extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
# else
static inline void tick_nohz_stop_sched_tick(int inidle) { }
static inline void tick_nohz_restart_sched_tick(void) { }
@@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
return len;
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
+static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !NO_HZ */
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 78b4bd3be496..9a59d1f98cd4 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -20,12 +20,17 @@
struct module;
struct tracepoint;
+struct tracepoint_func {
+ void *func;
+ void *data;
+};
+
struct tracepoint {
const char *name; /* Tracepoint name */
int state; /* State. */
void (*regfunc)(void);
void (*unregfunc)(void);
- void **funcs;
+ struct tracepoint_func *funcs;
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
* globally visible and gcc happily
@@ -33,6 +38,68 @@ struct tracepoint {
* Keep in sync with vmlinux.lds.h.
*/
+/*
+ * Connect a probe to a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_register(const char *name, void *probe, void *data);
+
+/*
+ * Disconnect a probe from a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int
+tracepoint_probe_unregister(const char *name, void *probe, void *data);
+
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
+ void *data);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
+ void *data);
+extern void tracepoint_probe_update_all(void);
+
+struct tracepoint_iter {
+ struct module *module;
+ struct tracepoint *tracepoint;
+};
+
+extern void tracepoint_iter_start(struct tracepoint_iter *iter);
+extern void tracepoint_iter_next(struct tracepoint_iter *iter);
+extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
+extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
+extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+ struct tracepoint *begin, struct tracepoint *end);
+
+/*
+ * tracepoint_synchronize_unregister must be called between the last tracepoint
+ * probe unregistration and the end of module exit to make sure there is no
+ * caller executing a probe when it is freed.
+ */
+static inline void tracepoint_synchronize_unregister(void)
+{
+ synchronize_sched();
+}
+
+#define PARAMS(args...) args
+
+#ifdef CONFIG_TRACEPOINTS
+extern void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end);
+#else
+static inline void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end)
+{ }
+#endif /* CONFIG_TRACEPOINTS */
+
+#endif /* _LINUX_TRACEPOINT_H */
+
+/*
+ * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
+ * file ifdef protection.
+ * This is due to the way trace events work. If a file includes two
+ * trace event headers under one "CREATE_TRACE_POINTS" the first include
+ * will override the TRACE_EVENT and break the second include.
+ */
+
#ifndef DECLARE_TRACE
#define TP_PROTO(args...) args
@@ -43,17 +110,27 @@ struct tracepoint {
/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
+ *
+ * Note, the proto and args passed in includes "__data" as the first parameter.
+ * The reason for this is to handle the "void" prototype. If a tracepoint
+ * has a "void" prototype, then it is invalid to declare a function
+ * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
+ * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
#define __DO_TRACE(tp, proto, args) \
do { \
- void **it_func; \
+ struct tracepoint_func *it_func_ptr; \
+ void *it_func; \
+ void *__data; \
\
rcu_read_lock_sched_notrace(); \
- it_func = rcu_dereference_sched((tp)->funcs); \
- if (it_func) { \
+ it_func_ptr = rcu_dereference_sched((tp)->funcs); \
+ if (it_func_ptr) { \
do { \
- ((void(*)(proto))(*it_func))(args); \
- } while (*(++it_func)); \
+ it_func = (it_func_ptr)->func; \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(proto))(it_func))(args); \
+ } while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
} while (0)
@@ -63,24 +140,32 @@ struct tracepoint {
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
-#define DECLARE_TRACE(name, proto, args) \
+#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
- TP_PROTO(proto), TP_ARGS(args)); \
+ TP_PROTO(data_proto), \
+ TP_ARGS(data_args)); \
} \
- static inline int register_trace_##name(void (*probe)(proto)) \
+ static inline int \
+ register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
- return tracepoint_probe_register(#name, (void *)probe); \
+ return tracepoint_probe_register(#name, (void *)probe, \
+ data); \
} \
- static inline int unregister_trace_##name(void (*probe)(proto)) \
+ static inline int \
+ unregister_trace_##name(void (*probe)(data_proto), void *data) \
+ { \
+ return tracepoint_probe_unregister(#name, (void *)probe, \
+ data); \
+ } \
+ static inline void \
+ check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \
- return tracepoint_probe_unregister(#name, (void *)probe);\
}
-
#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
@@ -96,22 +181,24 @@ struct tracepoint {
#define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name)
-extern void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end);
-
#else /* !CONFIG_TRACEPOINTS */
-#define DECLARE_TRACE(name, proto, args) \
- static inline void _do_trace_##name(struct tracepoint *tp, proto) \
- { } \
+#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
- static inline int register_trace_##name(void (*probe)(proto)) \
+ static inline int \
+ register_trace_##name(void (*probe)(data_proto), \
+ void *data) \
{ \
return -ENOSYS; \
} \
- static inline int unregister_trace_##name(void (*probe)(proto)) \
+ static inline int \
+ unregister_trace_##name(void (*probe)(data_proto), \
+ void *data) \
{ \
return -ENOSYS; \
+ } \
+ static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
+ { \
}
#define DEFINE_TRACE_FN(name, reg, unreg)
@@ -119,60 +206,31 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
-static inline void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end)
-{ }
#endif /* CONFIG_TRACEPOINTS */
-#endif /* DECLARE_TRACE */
-
-/*
- * Connect a probe to a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_register(const char *name, void *probe);
-
-/*
- * Disconnect a probe from a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_unregister(const char *name, void *probe);
-
-extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
-extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
-extern void tracepoint_probe_update_all(void);
-
-struct tracepoint_iter {
- struct module *module;
- struct tracepoint *tracepoint;
-};
-
-extern void tracepoint_iter_start(struct tracepoint_iter *iter);
-extern void tracepoint_iter_next(struct tracepoint_iter *iter);
-extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
-extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
- struct tracepoint *begin, struct tracepoint *end);
/*
- * tracepoint_synchronize_unregister must be called between the last tracepoint
- * probe unregistration and the end of module exit to make sure there is no
- * caller executing a probe when it is freed.
+ * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
+ * (void). "void" is a special value in a function prototype and can
+ * not be combined with other arguments. Since the DECLARE_TRACE()
+ * macro adds a data element at the beginning of the prototype,
+ * we need a way to differentiate "(void *data, proto)" from
+ * "(void *data, void)". The second prototype is invalid.
+ *
+ * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
+ * and "void *__data" as the callback prototype.
+ *
+ * DECLARE_TRACE() passes "proto" as the tracepoint protoype and
+ * "void *__data, proto" as the callback prototype.
*/
-static inline void tracepoint_synchronize_unregister(void)
-{
- synchronize_sched();
-}
+#define DECLARE_TRACE_NOARGS(name) \
+ __DECLARE_TRACE(name, void, , void *__data, __data)
-#define PARAMS(args...) args
-
-#endif /* _LINUX_TRACEPOINT_H */
+#define DECLARE_TRACE(name, proto, args) \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
-/*
- * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
- * This is due to the way trace events work. If a file includes two
- * trace event headers under one "CREATE_TRACE_POINTS" the first include
- * will override the TRACE_EVENT and break the second include.
- */
+#endif /* DECLARE_TRACE */
#ifndef TRACE_EVENT
/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a48e16b77d5e..76d96d035ea0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
/*
* Used for wake-one threads:
*/
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+
static inline void __add_wait_queue_tail(wait_queue_head_t *head,
- wait_queue_t *new)
+ wait_queue_t *new)
{
list_add_tail(&new->task_list, &head->task_list);
}
+static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue_tail(q, wait);
+}
+
static inline void __remove_wait_queue(wait_queue_head_t *head,
wait_queue_t *old)
{
@@ -404,25 +418,6 @@ do { \
})
/*
- * Must be called with the spinlock in the wait_queue_head_t held.
- */
-static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
- wait_queue_t * wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(q, wait);
-}
-
-/*
- * Must be called with the spinlock in the wait_queue_head_t held.
- */
-static inline void remove_wait_queue_locked(wait_queue_head_t *q,
- wait_queue_t * wait)
-{
- __remove_wait_queue(q, wait);
-}
-
-/*
* These are the old interfaces to sleep waiting for an event.
* They are racy. DO NOT use them, use the wait_event* interfaces above.
* We plan to remove these interfaces.
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/* Make all open coded DECLARE_TRACE nops */
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(name, proto, args)
+
#ifdef CONFIG_EVENT_TRACING
#include <trace/ftrace.h>
#endif
@@ -75,6 +79,7 @@
#undef DEFINE_EVENT
#undef DEFINE_EVENT_PRINT
#undef TRACE_HEADER_MULTI_READ
+#undef DECLARE_TRACE
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 4b0f48ba16a6..c7bb2f0482fe 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
TP_printk("%s", __get_str(name))
);
+#ifdef CONFIG_MODULE_UNLOAD
+/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
+
DECLARE_EVENT_CLASS(module_refcnt,
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_PROTO(struct module *mod, unsigned long ip),
- TP_ARGS(mod, ip, refcnt),
+ TP_ARGS(mod, ip),
TP_STRUCT__entry(
__field( unsigned long, ip )
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign(
__entry->ip = ip;
- __entry->refcnt = refcnt;
+ __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
__assign_str(name, mod->name);
),
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
DEFINE_EVENT(module_refcnt, module_get,
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_PROTO(struct module *mod, unsigned long ip),
- TP_ARGS(mod, ip, refcnt)
+ TP_ARGS(mod, ip)
);
DEFINE_EVENT(module_refcnt, module_put,
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_PROTO(struct module *mod, unsigned long ip),
- TP_ARGS(mod, ip, refcnt)
+ TP_ARGS(mod, ip)
);
+#endif /* CONFIG_MODULE_UNLOAD */
TRACE_EVENT(module_request,
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
-#ifndef _TRACE_NAPI_H_
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM napi
+
+#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NAPI_H_
#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
TP_PROTO(struct napi_struct *napi),
TP_ARGS(napi));
-#endif
+#endif /* _TRACE_NAPI_H_ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cfceb0b73e20..4f733ecea46e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret,
/*
* Tracepoint for waiting on task to unschedule:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_wait_task,
- TP_PROTO(struct rq *rq, struct task_struct *p),
+ TP_PROTO(struct task_struct *p),
- TP_ARGS(rq, p),
+ TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task,
/*
* Tracepoint for waking up a task:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+ TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(rq, p, success),
+ TP_ARGS(p, success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
);
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
/*
* Tracepoint for waking up a new task:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
/*
* Tracepoint for task switches, performed by the scheduler:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_switch,
- TP_PROTO(struct rq *rq, struct task_struct *prev,
+ TP_PROTO(struct task_struct *prev,
struct task_struct *next),
- TP_ARGS(rq, prev, next),
+ TP_ARGS(prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75ac304..814566c99d29 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
__entry->sa_handler, __entry->sa_flags)
);
-/**
- * signal_overflow_fail - called when signal queue is overflow
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel fails to generate 'sig' signal with 'info' siginfo, because
- * siginfo queue is overflow, and the signal is dropped.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of RT signals.
- */
-TRACE_EVENT(signal_overflow_fail,
+DECLARE_EVENT_CLASS(signal_queue_overflow,
TP_PROTO(int sig, int group, struct siginfo *info),
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail,
);
/**
+ * signal_overflow_fail - called when signal queue is overflow
+ * @sig: signal number
+ * @group: signal to process group or not (bool)
+ * @info: pointer to struct siginfo
+ *
+ * Kernel fails to generate 'sig' signal with 'info' siginfo, because
+ * siginfo queue is overflow, and the signal is dropped.
+ * 'group' is not 0 if the signal will be sent to a process group.
+ * 'sig' is always one of RT signals.
+ */
+DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
+
+ TP_PROTO(int sig, int group, struct siginfo *info),
+
+ TP_ARGS(sig, group, info)
+);
+
+/**
* signal_lose_info - called when siginfo is lost
* @sig: signal number
* @group: signal to process group or not (bool)
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
* 'group' is not 0 if the signal will be sent to a process group.
* 'sig' is always one of non-RT signals.
*/
-TRACE_EVENT(signal_lose_info,
+DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
TP_PROTO(int sig, int group, struct siginfo *info),
- TP_ARGS(sig, group, info),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, group )
- __field( int, errno )
- __field( int, code )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- __entry->group = group;
- TP_STORE_SIGINFO(__entry, info);
- ),
-
- TP_printk("sig=%d group=%d errno=%d code=%d",
- __entry->sig, __entry->group, __entry->errno, __entry->code)
+ TP_ARGS(sig, group, info)
);
+
#endif /* _TRACE_SIGNAL_H */
/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 882c64832ffe..e0e8daa6767e 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -62,7 +62,10 @@
struct trace_entry ent; \
tstruct \
char __data[0]; \
- };
+ }; \
+ \
+ static struct ftrace_event_class event_class_##name;
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
static struct ftrace_event_call \
@@ -147,16 +150,18 @@
*
* entry = iter->ent;
*
- * if (entry->type != event_<call>.id) {
+ * if (entry->type != event_<call>->event.type) {
* WARN_ON_ONCE(1);
* return TRACE_TYPE_UNHANDLED;
* }
*
* field = (typeof(field))entry;
*
- * p = get_cpu_var(ftrace_event_seq);
+ * p = &get_cpu_var(ftrace_event_seq);
* trace_seq_init(p);
- * ret = trace_seq_printf(s, <TP_printk> "\n");
+ * ret = trace_seq_printf(s, "%s: ", <call>);
+ * if (ret)
+ * ret = trace_seq_printf(s, <TP_printk> "\n");
* put_cpu();
* if (!ret)
* return TRACE_TYPE_PARTIAL_LINE;
@@ -201,18 +206,22 @@
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
-ftrace_raw_output_id_##call(int event_id, const char *name, \
- struct trace_iterator *iter, int flags) \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
{ \
+ struct ftrace_event_call *event; \
struct trace_seq *s = &iter->seq; \
struct ftrace_raw_##call *field; \
struct trace_entry *entry; \
struct trace_seq *p; \
int ret; \
\
+ event = container_of(trace_event, struct ftrace_event_call, \
+ event); \
+ \
entry = iter->ent; \
\
- if (entry->type != event_id) { \
+ if (entry->type != event->event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
@@ -221,7 +230,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
\
p = &get_cpu_var(ftrace_event_seq); \
trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", name); \
+ ret = trace_seq_printf(s, "%s: ", event->name); \
if (ret) \
ret = trace_seq_printf(s, print); \
put_cpu(); \
@@ -229,21 +238,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
return TRACE_TYPE_PARTIAL_LINE; \
\
return TRACE_TYPE_HANDLED; \
-}
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args) \
-static notrace enum print_line_t \
-ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
-{ \
- return ftrace_raw_output_id_##template(event_##name.id, \
- #name, iter, flags); \
-}
+} \
+static struct trace_event_functions ftrace_event_type_funcs_##call = { \
+ .trace = ftrace_raw_output_##call, \
+};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *event) \
{ \
struct trace_seq *s = &iter->seq; \
struct ftrace_raw_##template *field; \
@@ -253,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
\
entry = iter->ent; \
\
- if (entry->type != event_##call.id) { \
+ if (entry->type != event_##call.event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
@@ -270,7 +274,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
return TRACE_TYPE_PARTIAL_LINE; \
\
return TRACE_TYPE_HANDLED; \
-}
+} \
+static struct trace_event_functions ftrace_event_type_funcs_##call = { \
+ .trace = ftrace_raw_output_##call, \
+};
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -376,142 +383,83 @@ static inline notrace int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#ifdef CONFIG_PERF_EVENTS
-
-/*
- * Generate the functions needed for tracepoint perf_event support.
- *
- * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
- *
- * static int ftrace_profile_enable_<call>(void)
- * {
- * return register_trace_<call>(ftrace_profile_<call>);
- * }
- *
- * static void ftrace_profile_disable_<call>(void)
- * {
- * unregister_trace_<call>(ftrace_profile_<call>);
- * }
- *
- */
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args) \
- \
-static void perf_trace_##name(proto); \
- \
-static notrace int \
-perf_trace_enable_##name(struct ftrace_event_call *unused) \
-{ \
- return register_trace_##name(perf_trace_##name); \
-} \
- \
-static notrace void \
-perf_trace_disable_##name(struct ftrace_event_call *unused) \
-{ \
- unregister_trace_##name(perf_trace_##name); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#endif /* CONFIG_PERF_EVENTS */
-
/*
* Stage 4 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
*
- * static void ftrace_event_<call>(proto)
- * {
- * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
- * }
- *
- * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
- * {
- * return register_trace_<call>(ftrace_event_<call>);
- * }
- *
- * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
- * {
- * unregister_trace_<call>(ftrace_event_<call>);
- * }
- *
- *
* For those macros defined with TRACE_EVENT:
*
* static struct ftrace_event_call event_<call>;
*
- * static void ftrace_raw_event_<call>(proto)
+ * static void ftrace_raw_event_<call>(void *__data, proto)
* {
+ * struct ftrace_event_call *event_call = __data;
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer;
* unsigned long irq_flags;
+ * int __data_size;
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
* event = trace_current_buffer_lock_reserve(&buffer,
- * event_<call>.id,
- * sizeof(struct ftrace_raw_<call>),
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
* return;
* entry = ring_buffer_event_data(event);
*
- * <assign>; <-- Here we assign the entries by the __field and
- * __array macros.
- *
- * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
- * }
- *
- * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
- * {
- * int ret;
- *
- * ret = register_trace_<call>(ftrace_raw_event_<call>);
- * if (!ret)
- * pr_info("event trace: Could not activate trace point "
- * "probe to <call>");
- * return ret;
- * }
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
*
- * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
- * {
- * unregister_trace_<call>(ftrace_raw_event_<call>);
+ * if (!filter_current_check_discard(buffer, event_call, entry, event))
+ * trace_current_buffer_unlock_commit(buffer,
+ * event, irq_flags, pc);
* }
*
* static struct trace_event ftrace_event_type_<call> = {
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
+ * static const char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct ftrace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = ftrace_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = ftrace_raw_event_##call,
+ * };
+ *
* static struct ftrace_event_call __used
* __attribute__((__aligned__(4)))
* __attribute__((section("_ftrace_events"))) event_<call> = {
* .name = "<call>",
- * .system = "<system>",
- * .raw_init = trace_event_raw_init,
- * .regfunc = ftrace_reg_event_<call>,
- * .unregfunc = ftrace_unreg_event_<call>,
- * }
+ * .class = event_class_<template>,
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * };
*
*/
#ifdef CONFIG_PERF_EVENTS
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
#define _TRACE_PERF_INIT(call) \
- .perf_event_enable = perf_trace_enable_##call, \
- .perf_event_disable = perf_trace_disable_##call,
+ .perf_probe = perf_trace_##call,
#else
+#define _TRACE_PERF_PROTO(call, proto)
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
@@ -545,9 +493,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
\
static notrace void \
-ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
- proto) \
+ftrace_raw_event_##call(void *__data, proto) \
{ \
+ struct ftrace_event_call *event_call = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
@@ -562,14 +510,13 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
event = trace_current_buffer_lock_reserve(&buffer, \
- event_call->id, \
+ event_call->event.type, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
if (!event) \
return; \
entry = ring_buffer_event_data(event); \
\
- \
tstruct \
\
{ assign; } \
@@ -578,34 +525,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
trace_nowake_buffer_unlock_commit(buffer, \
event, irq_flags, pc); \
}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
- \
-static notrace void ftrace_raw_event_##call(proto) \
-{ \
- ftrace_raw_event_id_##template(&event_##call, args); \
-} \
- \
-static notrace int \
-ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
-{ \
- return register_trace_##call(ftrace_raw_event_##call); \
-} \
- \
-static notrace void \
-ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
+static inline void ftrace_test_probe_##call(void) \
{ \
- unregister_trace_##call(ftrace_raw_event_##call); \
-} \
- \
-static struct trace_event ftrace_event_type_##call = { \
- .trace = ftrace_raw_output_##call, \
-};
+ check_trace_callback_type_##call(ftrace_raw_event_##template); \
+}
#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -622,7 +556,16 @@ static struct trace_event ftrace_event_type_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static const char print_fmt_##call[] = print;
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static const char print_fmt_##call[] = print; \
+static struct ftrace_event_class __used event_class_##call = { \
+ .system = __stringify(TRACE_SYSTEM), \
+ .define_fields = ftrace_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = ftrace_raw_event_##call, \
+ _TRACE_PERF_INIT(call) \
+};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
@@ -631,15 +574,10 @@ static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
- .system = __stringify(TRACE_SYSTEM), \
- .event = &ftrace_event_type_##call, \
- .raw_init = trace_event_raw_init, \
- .regfunc = ftrace_raw_reg_event_##call, \
- .unregfunc = ftrace_raw_unreg_event_##call, \
+ .class = &event_class_##template, \
+ .event.funcs = &ftrace_event_type_funcs_##template, \
.print_fmt = print_fmt_##template, \
- .define_fields = ftrace_define_fields_##template, \
- _TRACE_PERF_INIT(call) \
-}
+};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
@@ -650,14 +588,9 @@ static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
- .system = __stringify(TRACE_SYSTEM), \
- .event = &ftrace_event_type_##call, \
- .raw_init = trace_event_raw_init, \
- .regfunc = ftrace_raw_reg_event_##call, \
- .unregfunc = ftrace_raw_unreg_event_##call, \
+ .class = &event_class_##template, \
+ .event.funcs = &ftrace_event_type_funcs_##call, \
.print_fmt = print_fmt_##call, \
- .define_fields = ftrace_define_fields_##template, \
- _TRACE_PERF_INIT(call) \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -757,17 +690,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
-perf_trace_templ_##call(struct ftrace_event_call *event_call, \
- struct pt_regs *__regs, proto) \
+perf_trace_##call(void *__data, proto) \
{ \
+ struct ftrace_event_call *event_call = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \
+ struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
int __entry_size; \
int __data_size; \
int rctx; \
\
+ perf_fetch_caller_regs(__regs, 1); \
+ \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
sizeof(u64)); \
@@ -775,33 +711,35 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
\
if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
"profile buffer not large enough")) \
- return; \
+ goto out; \
entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
- __entry_size, event_call->id, &rctx, &irq_flags); \
+ __entry_size, event_call->event.type, &rctx, &irq_flags); \
if (!entry) \
- return; \
+ goto out; \
tstruct \
\
{ assign; } \
\
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, irq_flags, __regs); \
+ out: \
+ put_cpu_var(perf_trace_regs); \
}
+/*
+ * This part is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the
+ * perf probe will fail to compile unless it too is updated.
+ */
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
-static notrace void perf_trace_##call(proto) \
+static inline void perf_test_probe_##call(void) \
{ \
- struct ftrace_event_call *event_call = &event_##call; \
- struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
- \
- perf_fetch_caller_regs(__regs, 1); \
+ check_trace_callback_type_##call(perf_trace_##template); \
\
- perf_trace_templ_##template(event_call, __regs, args); \
- \
- put_cpu_var(perf_trace_regs); \
}
+
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index e5e5f48dbfb3..257e08960d7b 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -25,6 +25,8 @@ struct syscall_metadata {
int nb_args;
const char **types;
const char **args;
+ struct list_head enter_fields;
+ struct list_head exit_fields;
struct ftrace_event_call *enter_event;
struct ftrace_event_call *exit_event;
@@ -34,16 +36,16 @@ struct syscall_metadata {
extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call);
-extern int syscall_enter_define_fields(struct ftrace_event_call *call);
-extern int syscall_exit_define_fields(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call);
extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
extern int reg_event_syscall_exit(struct ftrace_event_call *call);
extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
extern int
ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
-enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
-enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
+enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
+ struct trace_event *event);
+enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
+ struct trace_event *event);
#endif
#ifdef CONFIG_PERF_EVENTS