diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 07:06:28 +0100 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 07:06:28 +0100 |
commit | f43dc23d5ea91fca257be02138a255f02d98e806 (patch) | |
tree | b29722f6e965316e90ac97abf79923ced250dc21 /include/trace | |
parent | serial: sh-sci: Kill off more unused defines. (diff) | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6 (diff) | |
download | linux-f43dc23d5ea91fca257be02138a255f02d98e806.tar.xz linux-f43dc23d5ea91fca257be02138a255f02d98e806.zip |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts:
arch/sh/kernel/cpu/sh2/setup-sh7619.c
arch/sh/kernel/cpu/sh2a/setup-mxg.c
arch/sh/kernel/cpu/sh2a/setup-sh7201.c
arch/sh/kernel/cpu/sh2a/setup-sh7203.c
arch/sh/kernel/cpu/sh2a/setup-sh7206.c
arch/sh/kernel/cpu/sh3/setup-sh7705.c
arch/sh/kernel/cpu/sh3/setup-sh770x.c
arch/sh/kernel/cpu/sh3/setup-sh7710.c
arch/sh/kernel/cpu/sh3/setup-sh7720.c
arch/sh/kernel/cpu/sh4/setup-sh4-202.c
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/cpu/sh4/setup-sh7760.c
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
arch/sh/kernel/cpu/sh4a/setup-sh7723.c
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
arch/sh/kernel/cpu/sh4a/setup-sh7786.c
arch/sh/kernel/cpu/sh4a/setup-shx3.c
arch/sh/kernel/cpu/sh5/setup-sh5.c
drivers/serial/sh-sci.c
drivers/serial/sh-sci.h
include/linux/serial_sci.h
Diffstat (limited to 'include/trace')
29 files changed, 4521 insertions, 1322 deletions
diff --git a/include/trace/boot.h b/include/trace/boot.h deleted file mode 100644 index 088ea089e31d..000000000000 --- a/include/trace/boot.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _LINUX_TRACE_BOOT_H -#define _LINUX_TRACE_BOOT_H - -#include <linux/module.h> -#include <linux/kallsyms.h> -#include <linux/init.h> - -/* - * Structure which defines the trace of an initcall - * while it is called. - * You don't have to fill the func field since it is - * only used internally by the tracer. - */ -struct boot_trace_call { - pid_t caller; - char func[KSYM_SYMBOL_LEN]; -}; - -/* - * Structure which defines the trace of an initcall - * while it returns. - */ -struct boot_trace_ret { - char func[KSYM_SYMBOL_LEN]; - int result; - unsigned long long duration; /* nsecs */ -}; - -#ifdef CONFIG_BOOT_TRACER -/* Append the traces on the ring-buffer */ -extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn); -extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn); - -/* Tells the tracer that smp_pre_initcall is finished. - * So we can start the tracing - */ -extern void start_boot_trace(void); - -/* Resume the tracing of other necessary events - * such as sched switches - */ -extern void enable_boot_trace(void); - -/* Suspend this tracing. Actually, only sched_switches tracing have - * to be suspended. Initcalls doesn't need it.) - */ -extern void disable_boot_trace(void); -#else -static inline -void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { } - -static inline -void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { } - -static inline void start_boot_trace(void) { } -static inline void enable_boot_trace(void) { } -static inline void disable_boot_trace(void) { } -#endif /* CONFIG_BOOT_TRACER */ - -#endif /* __LINUX_TRACE_BOOT_H */ diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index f7a7ae1e8f90..da39b22636f7 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -21,11 +21,47 @@ #undef CREATE_TRACE_POINTS #include <linux/stringify.h> +/* + * module.h includes tracepoints, and because ftrace.h + * pulls in module.h: + * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h -> + * linux/ftrace.h -> linux/module.h + * we must include module.h here before we play with any of + * the TRACE_EVENT() macros, otherwise the tracepoints included + * by module.h may break the build. + */ +#include <linux/module.h> #undef TRACE_EVENT #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ DEFINE_TRACE(name) +#undef TRACE_EVENT_CONDITION +#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \ + TRACE_EVENT(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)) + +#undef TRACE_EVENT_FN +#define TRACE_EVENT_FN(name, proto, args, tstruct, \ + assign, print, reg, unreg) \ + DEFINE_TRACE_FN(name, reg, unreg) + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ + DEFINE_TRACE(name) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_TRACE(name) + +#undef DEFINE_EVENT_CONDITION +#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #undef DECLARE_TRACE #define DECLARE_TRACE(name, proto, args) \ DEFINE_TRACE(name) @@ -52,11 +88,23 @@ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +/* Make all open coded DECLARE_TRACE nops */ +#undef DECLARE_TRACE +#define DECLARE_TRACE(name, proto, args) + #ifdef CONFIG_EVENT_TRACING #include <trace/ftrace.h> #endif +#undef TRACE_EVENT +#undef TRACE_EVENT_FN +#undef TRACE_EVENT_CONDITION +#undef DECLARE_EVENT_CLASS +#undef DEFINE_EVENT +#undef DEFINE_EVENT_PRINT +#undef DEFINE_EVENT_CONDITION #undef TRACE_HEADER_MULTI_READ +#undef DECLARE_TRACE /* Only undef what we defined in this file */ #ifdef UNDEF_TRACE_INCLUDE_FILE diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h new file mode 100644 index 000000000000..1af72dc24278 --- /dev/null +++ b/include/trace/events/bkl.h @@ -0,0 +1,61 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bkl + +#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BKL_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(lock_kernel, + + TP_PROTO(const char *func, const char *file, int line), + + TP_ARGS(func, file, line), + + TP_STRUCT__entry( + __field( int, depth ) + __field_ext( const char *, func, FILTER_PTR_STRING ) + __field_ext( const char *, file, FILTER_PTR_STRING ) + __field( int, line ) + ), + + TP_fast_assign( + /* We want to record the lock_depth after lock is acquired */ + __entry->depth = current->lock_depth + 1; + __entry->func = func; + __entry->file = file; + __entry->line = line; + ), + + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, + __entry->file, __entry->line, __entry->func) +); + +TRACE_EVENT(unlock_kernel, + + TP_PROTO(const char *func, const char *file, int line), + + TP_ARGS(func, file, line), + + TP_STRUCT__entry( + __field(int, depth ) + __field(const char *, func ) + __field(const char *, file ) + __field(int, line ) + ), + + TP_fast_assign( + __entry->depth = current->lock_depth; + __entry->func = func; + __entry->file = file; + __entry->line = line; + ), + + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, + __entry->file, __entry->line, __entry->func) +); + +#endif /* _TRACE_BKL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d6b05f42dd44..d8ce278515c3 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -1,3 +1,6 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM block + #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BLOCK_H @@ -5,10 +8,7 @@ #include <linux/blkdev.h> #include <linux/tracepoint.h> -#undef TRACE_SYSTEM -#define TRACE_SYSTEM block - -TRACE_EVENT(block_rq_abort, +DECLARE_EVENT_CLASS(block_rq_with_error, TP_PROTO(struct request_queue *q, struct request *rq), @@ -25,8 +25,10 @@ TRACE_EVENT(block_rq_abort, TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_pos(rq); + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_sectors(rq); __entry->errors = rq->errors; blk_fill_rwbs_rq(__entry->rwbs, rq); @@ -40,41 +42,58 @@ TRACE_EVENT(block_rq_abort, __entry->nr_sector, __entry->errors) ); -TRACE_EVENT(block_rq_insert, +/** + * block_rq_abort - abort block operation request + * @q: queue containing the block operation request + * @rq: block IO operation request + * + * Called immediately after pending block IO operation request @rq in + * queue @q is aborted. The fields in the operation request @rq + * can be examined to determine which device and sectors the pending + * operation would access. + */ +DEFINE_EVENT(block_rq_with_error, block_rq_abort, TP_PROTO(struct request_queue *q, struct request *rq), - TP_ARGS(q, rq), + TP_ARGS(q, rq) +); - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __field( unsigned int, bytes ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) - ), +/** + * block_rq_requeue - place block IO request back on a queue + * @q: queue holding operation + * @rq: block IO operation request + * + * The block operation request @rq is being placed back into queue + * @q. For some reason the request was not completed and needs to be + * put back in the queue. + */ +DEFINE_EVENT(block_rq_with_error, block_rq_requeue, - TP_fast_assign( - __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; + TP_PROTO(struct request_queue *q, struct request *rq), - blk_fill_rwbs_rq(__entry->rwbs, rq); - blk_dump_cmd(__get_str(cmd), rq); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), + TP_ARGS(q, rq) +); - TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, __entry->bytes, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) +/** + * block_rq_complete - block IO operation completed by device driver + * @q: queue containing the block operation request + * @rq: block operations request + * + * The block_rq_complete tracepoint event indicates that some portion + * of operation request has been completed by the device driver. If + * the @rq->bio is %NULL, then there is absolutely no additional work to + * do for the request. If @rq->bio is non-NULL then there is + * additional work required to complete the request. + */ +DEFINE_EVENT(block_rq_with_error, block_rq_complete, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) ); -TRACE_EVENT(block_rq_issue, +DECLARE_EVENT_CLASS(block_rq, TP_PROTO(struct request_queue *q, struct request *rq), @@ -86,15 +105,18 @@ TRACE_EVENT(block_rq_issue, __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) + __array( char, comm, TASK_COMM_LEN ) __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) ), TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_pos(rq); + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_sectors(rq); + __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_bytes(rq) : 0; blk_fill_rwbs_rq(__entry->rwbs, rq); blk_dump_cmd(__get_str(cmd), rq); @@ -108,69 +130,49 @@ TRACE_EVENT(block_rq_issue, __entry->nr_sector, __entry->comm) ); -TRACE_EVENT(block_rq_requeue, +/** + * block_rq_insert - insert block operation request into queue + * @q: target queue + * @rq: block IO operation request + * + * Called immediately before block operation request @rq is inserted + * into queue @q. The fields in the operation request @rq struct can + * be examined to determine which device and sectors the pending + * operation would access. + */ +DEFINE_EVENT(block_rq, block_rq_insert, TP_PROTO(struct request_queue *q, struct request *rq), - TP_ARGS(q, rq), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __field( int, errors ) - __array( char, rwbs, 6 ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) - ), - - TP_fast_assign( - __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->errors = rq->errors; - - blk_fill_rwbs_rq(__entry->rwbs, rq); - blk_dump_cmd(__get_str(cmd), rq); - ), - - TP_printk("%d,%d %s (%s) %llu + %u [%d]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->errors) + TP_ARGS(q, rq) ); -TRACE_EVENT(block_rq_complete, +/** + * block_rq_issue - issue pending block IO request operation to device driver + * @q: queue holding operation + * @rq: block IO operation operation request + * + * Called when block operation request @rq from queue @q is sent to a + * device driver for processing. + */ +DEFINE_EVENT(block_rq, block_rq_issue, TP_PROTO(struct request_queue *q, struct request *rq), - TP_ARGS(q, rq), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __field( int, errors ) - __array( char, rwbs, 6 ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) - ), - - TP_fast_assign( - __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->errors = rq->errors; - - blk_fill_rwbs_rq(__entry->rwbs, rq); - blk_dump_cmd(__get_str(cmd), rq); - ), - - TP_printk("%d,%d %s (%s) %llu + %u [%d]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->errors) + TP_ARGS(q, rq) ); + +/** + * block_bio_bounce - used bounce buffer when processing block operation + * @q: queue holding the block operation + * @bio: block operation + * + * A bounce buffer was used to handle the block operation @bio in @q. + * This occurs when hardware limitations prevent a direct transfer of + * data between the @bio data memory area and the IO device. Use of a + * bounce buffer requires extra copying of data and decreases + * performance. + */ TRACE_EVENT(block_bio_bounce, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -186,7 +188,8 @@ TRACE_EVENT(block_bio_bounce, ), TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; + __entry->dev = bio->bi_bdev ? + bio->bi_bdev->bd_dev : 0; __entry->sector = bio->bi_sector; __entry->nr_sector = bio->bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); @@ -199,6 +202,14 @@ TRACE_EVENT(block_bio_bounce, __entry->nr_sector, __entry->comm) ); +/** + * block_bio_complete - completed all work on the block operation + * @q: queue holding the block operation + * @bio: block operation completed + * + * This tracepoint indicates there is no further work to do on this + * block IO operation @bio. + */ TRACE_EVENT(block_bio_complete, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -226,7 +237,7 @@ TRACE_EVENT(block_bio_complete, __entry->nr_sector, __entry->error) ); -TRACE_EVENT(block_bio_backmerge, +DECLARE_EVENT_CLASS(block_bio, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -254,63 +265,51 @@ TRACE_EVENT(block_bio_backmerge, __entry->nr_sector, __entry->comm) ); -TRACE_EVENT(block_bio_frontmerge, +/** + * block_bio_backmerge - merging block operation to the end of an existing operation + * @q: queue holding operation + * @bio: new block operation to merge + * + * Merging block request @bio to the end of an existing block request + * in queue @q. + */ +DEFINE_EVENT(block_bio, block_bio_backmerge, TP_PROTO(struct request_queue *q, struct bio *bio), - TP_ARGS(q, bio), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned, nr_sector ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - ), - - TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), - - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, bio) ); -TRACE_EVENT(block_bio_queue, +/** + * block_bio_frontmerge - merging block operation to the beginning of an existing operation + * @q: queue holding operation + * @bio: new block operation to merge + * + * Merging block IO operation @bio to the beginning of an existing block + * operation in queue @q. + */ +DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_PROTO(struct request_queue *q, struct bio *bio), - TP_ARGS(q, bio), + TP_ARGS(q, bio) +); - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - ), +/** + * block_bio_queue - putting new block IO operation in queue + * @q: queue holding operation + * @bio: new block operation + * + * About to place the block IO operation @bio into queue @q. + */ +DEFINE_EVENT(block_bio, block_bio_queue, - TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), + TP_PROTO(struct request_queue *q, struct bio *bio), - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, bio) ); -TRACE_EVENT(block_getrq, +DECLARE_EVENT_CLASS(block_get_rq, TP_PROTO(struct request_queue *q, struct bio *bio, int rw), @@ -339,35 +338,48 @@ TRACE_EVENT(block_getrq, __entry->nr_sector, __entry->comm) ); -TRACE_EVENT(block_sleeprq, +/** + * block_getrq - get a free request entry in queue for block IO operations + * @q: queue for operations + * @bio: pending block IO operation + * @rw: low bit indicates a read (%0) or a write (%1) + * + * A request struct for queue @q has been allocated to handle the + * block IO operation @bio. + */ +DEFINE_EVENT(block_get_rq, block_getrq, TP_PROTO(struct request_queue *q, struct bio *bio, int rw), - TP_ARGS(q, bio, rw), + TP_ARGS(q, bio, rw) +); - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - ), +/** + * block_sleeprq - waiting to get a free request entry in queue for block IO operation + * @q: queue for operation + * @bio: pending block IO operation + * @rw: low bit indicates a read (%0) or a write (%1) + * + * In the case where a request struct cannot be provided for queue @q + * the process needs to wait for an request struct to become + * available. This tracepoint event is generated each time the + * process goes to sleep waiting for request struct become available. + */ +DEFINE_EVENT(block_get_rq, block_sleeprq, - TP_fast_assign( - __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; - __entry->sector = bio ? bio->bi_sector : 0; - __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; - blk_fill_rwbs(__entry->rwbs, - bio ? bio->bi_rw : 0, __entry->nr_sector); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, bio, rw) ); +/** + * block_plug - keep operations requests in request queue + * @q: request queue to plug + * + * Plug the request queue @q. Do not allow block operation requests + * to be sent to the device driver. Instead, accumulate requests in + * the queue to improve throughput performance of the block device. + */ TRACE_EVENT(block_plug, TP_PROTO(struct request_queue *q), @@ -385,7 +397,7 @@ TRACE_EVENT(block_plug, TP_printk("[%s]", __entry->comm) ); -TRACE_EVENT(block_unplug_timer, +DECLARE_EVENT_CLASS(block_unplug, TP_PROTO(struct request_queue *q), @@ -404,25 +416,45 @@ TRACE_EVENT(block_unplug_timer, TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); -TRACE_EVENT(block_unplug_io, +/** + * block_unplug_timer - timed release of operations requests in queue to device driver + * @q: request queue to unplug + * + * Unplug the request queue @q because a timer expired and allow block + * operation requests to be sent to the device driver. + */ +DEFINE_EVENT(block_unplug, block_unplug_timer, TP_PROTO(struct request_queue *q), - TP_ARGS(q), + TP_ARGS(q) +); - TP_STRUCT__entry( - __field( int, nr_rq ) - __array( char, comm, TASK_COMM_LEN ) - ), +/** + * block_unplug_io - release of operations requests in request queue + * @q: request queue to unplug + * + * Unplug request queue @q because device driver is scheduled to work + * on elements in the request queue. + */ +DEFINE_EVENT(block_unplug, block_unplug_io, - TP_fast_assign( - __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), + TP_PROTO(struct request_queue *q), - TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) + TP_ARGS(q) ); +/** + * block_split - split a single bio struct into two bio structs + * @q: queue containing the bio + * @bio: block operation being split + * @new_sector: The starting sector for the new bio + * + * The bio request @bio in request queue @q needs to be split into two + * bio requests. The newly created @bio request starts at + * @new_sector. This split may be required due to hardware limitation + * such as operation crossing device boundaries in a RAID system. + */ TRACE_EVENT(block_split, TP_PROTO(struct request_queue *q, struct bio *bio, @@ -453,6 +485,16 @@ TRACE_EVENT(block_split, __entry->comm) ); +/** + * block_remap - map request for a partition to the raw device + * @q: queue holding the operation + * @bio: revised operation + * @dev: device for the operation + * @from: original sector for the operation + * + * An operation for a partition on a block device has been mapped to the + * raw block device. + */ TRACE_EVENT(block_remap, TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, @@ -486,6 +528,50 @@ TRACE_EVENT(block_remap, (unsigned long long)__entry->old_sector) ); +/** + * block_rq_remap - map request for a block operation request + * @q: queue holding the operation + * @rq: block IO operation request + * @dev: device for the operation + * @from: original sector for the operation + * + * The block operation request @rq in @q has been remapped. The block + * operation request @rq holds the current information and @from hold + * the original sector. + */ +TRACE_EVENT(block_rq_remap, + + TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, + sector_t from), + + TP_ARGS(q, rq, dev, from), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( dev_t, old_dev ) + __field( sector_t, old_sector ) + __array( char, rwbs, 6 ) + ), + + TP_fast_assign( + __entry->dev = disk_devt(rq->rq_disk); + __entry->sector = blk_rq_pos(rq); + __entry->nr_sector = blk_rq_sectors(rq); + __entry->old_dev = dev; + __entry->old_sector = from; + blk_fill_rwbs_rq(__entry->rwbs, rq); + ), + + TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, + MAJOR(__entry->old_dev), MINOR(__entry->old_dev), + (unsigned long long)__entry->old_sector) +); + #endif /* _TRACE_BLOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index acf4cc9cd36d..e5e345fb2a5c 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -1,21 +1,28 @@ -#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_EXT4_H - #undef TRACE_SYSTEM #define TRACE_SYSTEM ext4 +#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EXT4_H + #include <linux/writeback.h> -#include "../../../fs/ext4/ext4.h" -#include "../../../fs/ext4/mballoc.h" #include <linux/tracepoint.h> +struct ext4_allocation_context; +struct ext4_allocation_request; +struct ext4_prealloc_space; +struct ext4_inode_info; +struct mpage_da_data; + +#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) + TRACE_EVENT(ext4_free_inode, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( umode_t, mode ) __field( uid_t, uid ) @@ -24,7 +31,8 @@ TRACE_EVENT(ext4_free_inode, ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->mode = inode->i_mode; __entry->uid = inode->i_uid; @@ -32,9 +40,11 @@ TRACE_EVENT(ext4_free_inode, __entry->blocks = inode->i_blocks; ), - TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode, - __entry->uid, __entry->gid, __entry->blocks) + TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->mode, + __entry->uid, __entry->gid, + (unsigned long long) __entry->blocks) ); TRACE_EVENT(ext4_request_inode, @@ -43,19 +53,22 @@ TRACE_EVENT(ext4_request_inode, TP_ARGS(dir, mode), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, dir ) __field( umode_t, mode ) ), TP_fast_assign( - __entry->dev = dir->i_sb->s_dev; + __entry->dev_major = MAJOR(dir->i_sb->s_dev); + __entry->dev_minor = MINOR(dir->i_sb->s_dev); __entry->dir = dir->i_ino; __entry->mode = mode; ), - TP_printk("dev %s dir %lu mode %d", - jbd2_dev_to_name(__entry->dev), __entry->dir, __entry->mode) + TP_printk("dev %d,%d dir %lu mode 0%o", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->dir, __entry->mode) ); TRACE_EVENT(ext4_allocate_inode, @@ -64,112 +77,180 @@ TRACE_EVENT(ext4_allocate_inode, TP_ARGS(inode, dir, mode), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( ino_t, dir ) __field( umode_t, mode ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->dir = dir->i_ino; __entry->mode = mode; ), - TP_printk("dev %s ino %lu dir %lu mode %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->dir, __entry->mode) + TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + (unsigned long) __entry->dir, __entry->mode) ); -TRACE_EVENT(ext4_write_begin, +TRACE_EVENT(ext4_evict_inode, + TP_PROTO(struct inode *inode), - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), + TP_ARGS(inode), - TP_ARGS(inode, pos, len, flags), + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( int, nlink ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->nlink = inode->i_nlink; + ), + + TP_printk("dev %d,%d ino %lu nlink %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->nlink) +); + +TRACE_EVENT(ext4_drop_inode, + TP_PROTO(struct inode *inode, int drop), + + TP_ARGS(inode, drop), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, flags ) + __field( int, drop ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->flags = flags; + __entry->drop = drop; ), - TP_printk("dev %s ino %lu pos %llu len %u flags %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->flags) + TP_printk("dev %d,%d ino %lu drop %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->drop) ); -TRACE_EVENT(ext4_ordered_write_end, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int copied), +TRACE_EVENT(ext4_mark_inode_dirty, + TP_PROTO(struct inode *inode, unsigned long IP), - TP_ARGS(inode, pos, len, copied), + TP_ARGS(inode, IP), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, copied ) + __field(unsigned long, ip ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->copied = copied; + __entry->ip = IP; + ), + + TP_printk("dev %d,%d ino %lu caller %pF", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, (void *)__entry->ip) +); + +TRACE_EVENT(ext4_begin_ordered_truncate, + TP_PROTO(struct inode *inode, loff_t new_size), + + TP_ARGS(inode, new_size), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( loff_t, new_size ) ), - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->new_size = new_size; + ), + + TP_printk("dev %d,%d ino %lu new_size %lld", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + (long long) __entry->new_size) ); -TRACE_EVENT(ext4_writeback_write_end, +DECLARE_EVENT_CLASS(ext4__write_begin, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int copied), + unsigned int flags), - TP_ARGS(inode, pos, len, copied), + TP_ARGS(inode, pos, len, flags), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( loff_t, pos ) __field( unsigned int, len ) - __field( unsigned int, copied ) + __field( unsigned int, flags ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->pos = pos; __entry->len = len; - __entry->copied = copied; + __entry->flags = flags; ), - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->pos, __entry->len, __entry->flags) ); -TRACE_EVENT(ext4_journalled_write_end, +DEFINE_EVENT(ext4__write_begin, ext4_write_begin, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int copied), + unsigned int flags), + + TP_ARGS(inode, pos, len, flags) +); + +DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags) +); + +DECLARE_EVENT_CLASS(ext4__write_end, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + TP_ARGS(inode, pos, len, copied), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( loff_t, pos ) __field( unsigned int, len ) @@ -177,38 +258,75 @@ TRACE_EVENT(ext4_journalled_write_end, ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->pos = pos; __entry->len = len; __entry->copied = copied; ), - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) + TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->pos, + __entry->len, __entry->copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) ); -TRACE_EVENT(ext4_da_writepage, +DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_da_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +TRACE_EVENT(ext4_writepage, TP_PROTO(struct inode *inode, struct page *page), TP_ARGS(inode, page), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( pgoff_t, index ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->index = page->index; ), - TP_printk("dev %s ino %lu page_index %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index) + TP_printk("dev %d,%d ino %lu page_index %lu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->index) ); TRACE_EVENT(ext4_da_writepages, @@ -217,39 +335,81 @@ TRACE_EVENT(ext4_da_writepages, TP_ARGS(inode, wbc), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( long, nr_to_write ) __field( long, pages_skipped ) __field( loff_t, range_start ) __field( loff_t, range_end ) - __field( char, nonblocking ) __field( char, for_kupdate ) __field( char, for_reclaim ) - __field( char, for_writepages ) __field( char, range_cyclic ) + __field( pgoff_t, writeback_index ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->range_start = wbc->range_start; __entry->range_end = wbc->range_end; - __entry->nonblocking = wbc->nonblocking; __entry->for_kupdate = wbc->for_kupdate; __entry->for_reclaim = wbc->for_reclaim; - __entry->for_writepages = wbc->for_writepages; __entry->range_cyclic = wbc->range_cyclic; + __entry->writeback_index = inode->i_mapping->writeback_index; ), - TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d for_writepages %d range_cyclic %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, + TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " + "range_start %llu range_end %llu " + "for_kupdate %d for_reclaim %d " + "range_cyclic %d writeback_index %lu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, - __entry->range_end, __entry->nonblocking, + __entry->range_end, __entry->for_kupdate, __entry->for_reclaim, - __entry->for_writepages, __entry->range_cyclic) + __entry->range_cyclic, + (unsigned long) __entry->writeback_index) +); + +TRACE_EVENT(ext4_da_write_pages, + TP_PROTO(struct inode *inode, struct mpage_da_data *mpd), + + TP_ARGS(inode, mpd), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( __u64, b_blocknr ) + __field( __u32, b_size ) + __field( __u32, b_state ) + __field( unsigned long, first_page ) + __field( int, io_done ) + __field( int, pages_written ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->b_blocknr = mpd->b_blocknr; + __entry->b_size = mpd->b_size; + __entry->b_state = mpd->b_state; + __entry->first_page = mpd->first_page; + __entry->io_done = mpd->io_done; + __entry->pages_written = mpd->pages_written; + ), + + TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->b_blocknr, __entry->b_size, + __entry->b_state, __entry->first_page, + __entry->io_done, __entry->pages_written) ); TRACE_EVENT(ext4_da_writepages_result, @@ -259,129 +419,33 @@ TRACE_EVENT(ext4_da_writepages_result, TP_ARGS(inode, wbc, ret, pages_written), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( int, ret ) __field( int, pages_written ) __field( long, pages_skipped ) - __field( char, encountered_congestion ) __field( char, more_io ) - __field( char, no_nrwrite_index_update ) + __field( pgoff_t, writeback_index ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; __entry->ret = ret; __entry->pages_written = pages_written; __entry->pages_skipped = wbc->pages_skipped; - __entry->encountered_congestion = wbc->encountered_congestion; __entry->more_io = wbc->more_io; - __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update; + __entry->writeback_index = inode->i_mapping->writeback_index; ), - TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->ret, + TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->ret, __entry->pages_written, __entry->pages_skipped, - __entry->encountered_congestion, __entry->more_io, - __entry->no_nrwrite_index_update) -); - -TRACE_EVENT(ext4_da_write_begin, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), - - TP_ARGS(inode, pos, len, flags), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->flags = flags; - ), - - TP_printk("dev %s ino %lu pos %llu len %u flags %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->flags) -); - -TRACE_EVENT(ext4_da_write_end, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int copied), - - TP_ARGS(inode, pos, len, copied), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, copied ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->copied = copied; - ), - - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, - __entry->copied) -); - -TRACE_EVENT(ext4_normal_writepage, - TP_PROTO(struct inode *inode, struct page *page), - - TP_ARGS(inode, page), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( pgoff_t, index ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->index = page->index; - ), - - TP_printk("dev %s ino %lu page_index %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index) -); - -TRACE_EVENT(ext4_journalled_writepage, - TP_PROTO(struct inode *inode, struct page *page), - - TP_ARGS(inode, page), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( pgoff_t, index ) - - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->index = page->index; - ), - - TP_printk("dev %s ino %lu page_index %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index) + __entry->more_io, + (unsigned long) __entry->writeback_index) ); TRACE_EVENT(ext4_discard_blocks, @@ -391,30 +455,34 @@ TRACE_EVENT(ext4_discard_blocks, TP_ARGS(sb, blk, count), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( __u64, blk ) __field( __u64, count ) ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); __entry->blk = blk; __entry->count = count; ), - TP_printk("dev %s blk %llu count %llu", - jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count) + TP_printk("dev %d,%d blk %llu count %llu", + __entry->dev_major, __entry->dev_minor, + __entry->blk, __entry->count) ); -TRACE_EVENT(ext4_mb_new_inode_pa, +DECLARE_EVENT_CLASS(ext4__mb_new_pa, TP_PROTO(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa), TP_ARGS(ac, pa), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( __u64, pa_pstart ) __field( __u32, pa_len ) @@ -423,55 +491,47 @@ TRACE_EVENT(ext4_mb_new_inode_pa, ), TP_fast_assign( - __entry->dev = ac->ac_sb->s_dev; + __entry->dev_major = MAJOR(ac->ac_sb->s_dev); + __entry->dev_minor = MINOR(ac->ac_sb->s_dev); __entry->ino = ac->ac_inode->i_ino; __entry->pa_pstart = pa->pa_pstart; __entry->pa_len = pa->pa_len; __entry->pa_lstart = pa->pa_lstart; ), - TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, + TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) ); -TRACE_EVENT(ext4_mb_new_group_pa, +DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa, + TP_PROTO(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa), - TP_ARGS(ac, pa), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( __u64, pa_pstart ) - __field( __u32, pa_len ) - __field( __u64, pa_lstart ) + TP_ARGS(ac, pa) +); - ), +DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa, - TP_fast_assign( - __entry->dev = ac->ac_sb->s_dev; - __entry->ino = ac->ac_inode->i_ino; - __entry->pa_pstart = pa->pa_pstart; - __entry->pa_len = pa->pa_len; - __entry->pa_lstart = pa->pa_lstart; - ), + TP_PROTO(struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa), - TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, - __entry->pa_len, __entry->pa_lstart) + TP_ARGS(ac, pa) ); TRACE_EVENT(ext4_mb_release_inode_pa, - TP_PROTO(struct ext4_allocation_context *ac, + TP_PROTO(struct super_block *sb, + struct inode *inode, struct ext4_prealloc_space *pa, unsigned long long block, unsigned int count), - TP_ARGS(ac, pa, block, count), + TP_ARGS(sb, inode, pa, block, count), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( __u64, block ) __field( __u32, count ) @@ -479,40 +539,42 @@ TRACE_EVENT(ext4_mb_release_inode_pa, ), TP_fast_assign( - __entry->dev = ac->ac_sb->s_dev; - __entry->ino = ac->ac_inode->i_ino; + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); + __entry->ino = inode->i_ino; __entry->block = block; __entry->count = count; ), - TP_printk("dev %s ino %lu block %llu count %u", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, - __entry->count) + TP_printk("dev %d,%d ino %lu block %llu count %u", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->block, __entry->count) ); TRACE_EVENT(ext4_mb_release_group_pa, - TP_PROTO(struct ext4_allocation_context *ac, + TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa), - TP_ARGS(ac, pa), + TP_ARGS(sb, pa), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( __u64, pa_pstart ) __field( __u32, pa_len ) ), TP_fast_assign( - __entry->dev = ac->ac_sb->s_dev; - __entry->ino = ac->ac_inode->i_ino; + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); __entry->pa_pstart = pa->pa_pstart; __entry->pa_len = pa->pa_len; ), - TP_printk("dev %s pstart %llu len %u", - jbd2_dev_to_name(__entry->dev), __entry->pa_pstart, __entry->pa_len) + TP_printk("dev %d,%d pstart %llu len %u", + __entry->dev_major, __entry->dev_minor, + __entry->pa_pstart, __entry->pa_len) ); TRACE_EVENT(ext4_discard_preallocations, @@ -521,18 +583,21 @@ TRACE_EVENT(ext4_discard_preallocations, TP_ARGS(inode), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; ), - TP_printk("dev %s ino %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino) + TP_printk("dev %d,%d ino %lu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino) ); TRACE_EVENT(ext4_mb_discard_preallocations, @@ -541,18 +606,20 @@ TRACE_EVENT(ext4_mb_discard_preallocations, TP_ARGS(sb, needed), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( int, needed ) ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); __entry->needed = needed; ), - TP_printk("dev %s needed %d", - jbd2_dev_to_name(__entry->dev), __entry->needed) + TP_printk("dev %d,%d needed %d", + __entry->dev_major, __entry->dev_minor, __entry->needed) ); TRACE_EVENT(ext4_request_blocks, @@ -561,7 +628,8 @@ TRACE_EVENT(ext4_request_blocks, TP_ARGS(ar), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( unsigned int, flags ) __field( unsigned int, len ) @@ -574,7 +642,8 @@ TRACE_EVENT(ext4_request_blocks, ), TP_fast_assign( - __entry->dev = ar->inode->i_sb->s_dev; + __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); + __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev); __entry->ino = ar->inode->i_ino; __entry->flags = ar->flags; __entry->len = ar->len; @@ -586,9 +655,10 @@ TRACE_EVENT(ext4_request_blocks, __entry->pright = ar->pright; ), - TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, - __entry->len, + TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->flags, __entry->len, (unsigned long long) __entry->logical, (unsigned long long) __entry->goal, (unsigned long long) __entry->lleft, @@ -603,7 +673,8 @@ TRACE_EVENT(ext4_allocate_blocks, TP_ARGS(ar, block), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( __u64, block ) __field( unsigned int, flags ) @@ -617,7 +688,8 @@ TRACE_EVENT(ext4_allocate_blocks, ), TP_fast_assign( - __entry->dev = ar->inode->i_sb->s_dev; + __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); + __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev); __entry->ino = ar->inode->i_ino; __entry->block = block; __entry->flags = ar->flags; @@ -630,8 +702,9 @@ TRACE_EVENT(ext4_allocate_blocks, __entry->pright = ar->pright; ), - TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, + TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->flags, __entry->len, __entry->block, (unsigned long long) __entry->logical, (unsigned long long) __entry->goal, @@ -643,54 +716,64 @@ TRACE_EVENT(ext4_allocate_blocks, TRACE_EVENT(ext4_free_blocks, TP_PROTO(struct inode *inode, __u64 block, unsigned long count, - int metadata), + int flags), - TP_ARGS(inode, block, count, metadata), + TP_ARGS(inode, block, count, flags), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) + __field( umode_t, mode ) __field( __u64, block ) __field( unsigned long, count ) - __field( int, metadata ) - + __field( int, flags ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; __entry->block = block; __entry->count = count; - __entry->metadata = metadata; + __entry->flags = flags; ), - TP_printk("dev %s ino %lu block %llu count %lu metadata %d", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, - __entry->count, __entry->metadata) + TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->mode, __entry->block, __entry->count, + __entry->flags) ); TRACE_EVENT(ext4_sync_file, - TP_PROTO(struct file *file, struct dentry *dentry, int datasync), + TP_PROTO(struct file *file, int datasync), - TP_ARGS(file, dentry, datasync), + TP_ARGS(file, datasync), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( ino_t, ino ) __field( ino_t, parent ) __field( int, datasync ) ), TP_fast_assign( - __entry->dev = dentry->d_inode->i_sb->s_dev; + struct dentry *dentry = file->f_path.dentry; + + __entry->dev_major = MAJOR(dentry->d_inode->i_sb->s_dev); + __entry->dev_minor = MINOR(dentry->d_inode->i_sb->s_dev); __entry->ino = dentry->d_inode->i_ino; __entry->datasync = datasync; __entry->parent = dentry->d_parent->d_inode->i_ino; ), - TP_printk("dev %s ino %ld parent %ld datasync %d ", - jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->parent, - __entry->datasync) + TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + (unsigned long) __entry->parent, __entry->datasync) ); TRACE_EVENT(ext4_sync_fs, @@ -699,18 +782,388 @@ TRACE_EVENT(ext4_sync_fs, TP_ARGS(sb, wait), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( int, wait ) ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); __entry->wait = wait; ), - TP_printk("dev %s wait %d", jbd2_dev_to_name(__entry->dev), - __entry->wait) + TP_printk("dev %d,%d wait %d", __entry->dev_major, + __entry->dev_minor, __entry->wait) +); + +TRACE_EVENT(ext4_alloc_da_blocks, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( unsigned int, data_blocks ) + __field( unsigned int, meta_blocks ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + ), + + TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->data_blocks, __entry->meta_blocks) +); + +TRACE_EVENT(ext4_mballoc_alloc, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( __u16, found ) + __field( __u16, groups ) + __field( __u16, buddy ) + __field( __u16, flags ) + __field( __u16, tail ) + __field( __u8, cr ) + __field( __u32, orig_logical ) + __field( int, orig_start ) + __field( __u32, orig_group ) + __field( int, orig_len ) + __field( __u32, goal_logical ) + __field( int, goal_start ) + __field( __u32, goal_group ) + __field( int, goal_len ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev); + __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev); + __entry->ino = ac->ac_inode->i_ino; + __entry->found = ac->ac_found; + __entry->flags = ac->ac_flags; + __entry->groups = ac->ac_groups_scanned; + __entry->buddy = ac->ac_buddy; + __entry->tail = ac->ac_tail; + __entry->cr = ac->ac_criteria; + __entry->orig_logical = ac->ac_o_ex.fe_logical; + __entry->orig_start = ac->ac_o_ex.fe_start; + __entry->orig_group = ac->ac_o_ex.fe_group; + __entry->orig_len = ac->ac_o_ex.fe_len; + __entry->goal_logical = ac->ac_g_ex.fe_logical; + __entry->goal_start = ac->ac_g_ex.fe_start; + __entry->goal_group = ac->ac_g_ex.fe_group; + __entry->goal_len = ac->ac_g_ex.fe_len; + __entry->result_logical = ac->ac_f_ex.fe_logical; + __entry->result_start = ac->ac_f_ex.fe_start; + __entry->result_group = ac->ac_f_ex.fe_group; + __entry->result_len = ac->ac_f_ex.fe_len; + ), + + TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " + "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x " + "tail %u broken %u", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->orig_group, __entry->orig_start, + __entry->orig_len, __entry->orig_logical, + __entry->goal_group, __entry->goal_start, + __entry->goal_len, __entry->goal_logical, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical, + __entry->found, __entry->groups, __entry->cr, + __entry->flags, __entry->tail, + __entry->buddy ? 1 << __entry->buddy : 0) +); + +TRACE_EVENT(ext4_mballoc_prealloc, + TP_PROTO(struct ext4_allocation_context *ac), + + TP_ARGS(ac), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( __u32, orig_logical ) + __field( int, orig_start ) + __field( __u32, orig_group ) + __field( int, orig_len ) + __field( __u32, result_logical ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev); + __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev); + __entry->ino = ac->ac_inode->i_ino; + __entry->orig_logical = ac->ac_o_ex.fe_logical; + __entry->orig_start = ac->ac_o_ex.fe_start; + __entry->orig_group = ac->ac_o_ex.fe_group; + __entry->orig_len = ac->ac_o_ex.fe_len; + __entry->result_logical = ac->ac_b_ex.fe_logical; + __entry->result_start = ac->ac_b_ex.fe_start; + __entry->result_group = ac->ac_b_ex.fe_group; + __entry->result_len = ac->ac_b_ex.fe_len; + ), + + TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->orig_group, __entry->orig_start, + __entry->orig_len, __entry->orig_logical, + __entry->result_group, __entry->result_start, + __entry->result_len, __entry->result_logical) +); + +DECLARE_EVENT_CLASS(ext4__mballoc, + TP_PROTO(struct super_block *sb, + struct inode *inode, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, inode, group, start, len), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( int, result_start ) + __field( __u32, result_group ) + __field( int, result_len ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); + __entry->ino = inode ? inode->i_ino : 0; + __entry->result_start = start; + __entry->result_group = group; + __entry->result_len = len; + ), + + TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->result_group, __entry->result_start, + __entry->result_len) +); + +DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard, + + TP_PROTO(struct super_block *sb, + struct inode *inode, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, inode, group, start, len) +); + +DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free, + + TP_PROTO(struct super_block *sb, + struct inode *inode, + ext4_group_t group, + ext4_grpblk_t start, + ext4_grpblk_t len), + + TP_ARGS(sb, inode, group, start, len) +); + +TRACE_EVENT(ext4_forget, + TP_PROTO(struct inode *inode, int is_metadata, __u64 block), + + TP_ARGS(inode, is_metadata, block), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( int, is_metadata ) + __field( __u64, block ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->is_metadata = is_metadata; + __entry->block = block; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->mode, + __entry->is_metadata, __entry->block) +); + +TRACE_EVENT(ext4_da_update_reserve_space, + TP_PROTO(struct inode *inode, int used_blocks), + + TP_ARGS(inode, used_blocks), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( __u64, i_blocks ) + __field( int, used_blocks ) + __field( int, reserved_data_blocks ) + __field( int, reserved_meta_blocks ) + __field( int, allocated_meta_blocks ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->i_blocks = inode->i_blocks; + __entry->used_blocks = used_blocks; + __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, __entry->mode, + (unsigned long long) __entry->i_blocks, + __entry->used_blocks, __entry->reserved_data_blocks, + __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) +); + +TRACE_EVENT(ext4_da_reserve_space, + TP_PROTO(struct inode *inode, int md_needed), + + TP_ARGS(inode, md_needed), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( __u64, i_blocks ) + __field( int, md_needed ) + __field( int, reserved_data_blocks ) + __field( int, reserved_meta_blocks ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->i_blocks = inode->i_blocks; + __entry->md_needed = md_needed; + __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->mode, (unsigned long long) __entry->i_blocks, + __entry->md_needed, __entry->reserved_data_blocks, + __entry->reserved_meta_blocks) +); + +TRACE_EVENT(ext4_da_release_space, + TP_PROTO(struct inode *inode, int freed_blocks), + + TP_ARGS(inode, freed_blocks), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) + __field( umode_t, mode ) + __field( __u64, i_blocks ) + __field( int, freed_blocks ) + __field( int, reserved_data_blocks ) + __field( int, reserved_meta_blocks ) + __field( int, allocated_meta_blocks ) + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->i_blocks = inode->i_blocks; + __entry->freed_blocks = freed_blocks; + __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; + __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; + __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino, + __entry->mode, (unsigned long long) __entry->i_blocks, + __entry->freed_blocks, __entry->reserved_data_blocks, + __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) +); + +DECLARE_EVENT_CLASS(ext4__bitmap_load, + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( int, dev_major ) + __field( int, dev_minor ) + __field( __u32, group ) + + ), + + TP_fast_assign( + __entry->dev_major = MAJOR(sb->s_dev); + __entry->dev_minor = MINOR(sb->s_dev); + __entry->group = group; + ), + + TP_printk("dev %d,%d group %u", + __entry->dev_major, __entry->dev_minor, __entry->group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) ); #endif /* _TRACE_EXT4_H */ diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h new file mode 100644 index 000000000000..e3615c093741 --- /dev/null +++ b/include/trace/events/gfpflags.h @@ -0,0 +1,37 @@ +/* + * The order of these masks is important. Matching masks will be seen + * first and the left over flags will end up showing by themselves. + * + * For example, if we have GFP_KERNEL before GFP_USER we wil get: + * + * GFP_KERNEL|GFP_HARDWALL + * + * Thus most bits set go first. + */ +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ + {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ + {(unsigned long)GFP_USER, "GFP_USER"}, \ + {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ + {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ + {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ + {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ + {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ + {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ + {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \ + {(unsigned long)__GFP_IO, "GFP_IO"}, \ + {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ + {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ + {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ + {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ + {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ + {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ + {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ + {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ + {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ + {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ + {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ + {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ + ) : "GFP_NOWAIT" + diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index b0c7ede55eb1..1c09820df585 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -1,23 +1,26 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM irq + #if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IRQ_H #include <linux/tracepoint.h> -#include <linux/interrupt.h> -#undef TRACE_SYSTEM -#define TRACE_SYSTEM irq +struct irqaction; +struct softirq_action; #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } -#define show_softirq_name(val) \ - __print_symbolic(val, \ - softirq_name(HI), \ - softirq_name(TIMER), \ - softirq_name(NET_TX), \ - softirq_name(NET_RX), \ - softirq_name(BLOCK), \ - softirq_name(TASKLET), \ - softirq_name(SCHED), \ - softirq_name(HRTIMER), \ +#define show_softirq_name(val) \ + __print_symbolic(val, \ + softirq_name(HI), \ + softirq_name(TIMER), \ + softirq_name(NET_TX), \ + softirq_name(NET_RX), \ + softirq_name(BLOCK), \ + softirq_name(BLOCK_IOPOLL), \ + softirq_name(TASKLET), \ + softirq_name(SCHED), \ + softirq_name(HRTIMER), \ softirq_name(RCU)) /** @@ -47,7 +50,7 @@ TRACE_EVENT(irq_handler_entry, __assign_str(name, action->name); ), - TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) + TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) ); /** @@ -77,66 +80,68 @@ TRACE_EVENT(irq_handler_exit, __entry->ret = ret; ), - TP_printk("irq=%d return=%s", + TP_printk("irq=%d ret=%s", __entry->irq, __entry->ret ? "handled" : "unhandled") ); -/** - * softirq_entry - called immediately before the softirq handler - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array - * - * The @h parameter, contains a pointer to the struct softirq_action - * which has a pointer to the action handler that is called. By subtracting - * the @vec pointer from the @h pointer, we can determine the softirq - * number. Also, when used in combination with the softirq_exit tracepoint - * we can determine the softirq latency. - */ -TRACE_EVENT(softirq_entry, +DECLARE_EVENT_CLASS(softirq, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec), + TP_ARGS(vec_nr), TP_STRUCT__entry( - __field( int, vec ) + __field( unsigned int, vec ) ), TP_fast_assign( - __entry->vec = (int)(h - vec); + __entry->vec = vec_nr; ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%u [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); /** + * softirq_entry - called immediately before the softirq handler + * @vec_nr: softirq vector number + * + * When used in combination with the softirq_exit tracepoint + * we can determine the softirq handler runtine. + */ +DEFINE_EVENT(softirq, softirq_entry, + + TP_PROTO(unsigned int vec_nr), + + TP_ARGS(vec_nr) +); + +/** * softirq_exit - called immediately after the softirq handler returns - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter contains a pointer to the struct softirq_action - * that has handled the softirq. By subtracting the @vec pointer from - * the @h pointer, we can determine the softirq number. Also, when used in - * combination with the softirq_entry tracepoint we can determine the softirq - * latency. + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq handler runtine. */ -TRACE_EVENT(softirq_exit, +DEFINE_EVENT(softirq, softirq_exit, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec), + TP_ARGS(vec_nr) +); - TP_STRUCT__entry( - __field( int, vec ) - ), +/** + * softirq_raise - called immediately when a softirq is raised + * @vec_nr: softirq vector number + * + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq raise to run latency. + */ +DEFINE_EVENT(softirq, softirq_raise, - TP_fast_assign( - __entry->vec = (int)(h - vec); - ), + TP_PROTO(unsigned int vec_nr), - TP_printk("softirq=%d action=%s", __entry->vec, - show_softirq_name(__entry->vec)) + TP_ARGS(vec_nr) ); #endif /* _TRACE_IRQ_H */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 845b0b4b48fd..7447ea9305b5 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -1,11 +1,14 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM jbd2 + #if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_JBD2_H #include <linux/jbd2.h> #include <linux/tracepoint.h> -#undef TRACE_SYSTEM -#define TRACE_SYSTEM jbd2 +struct transaction_chp_stats_s; +struct transaction_run_stats_s; TRACE_EVENT(jbd2_checkpoint, @@ -14,152 +17,230 @@ TRACE_EVENT(jbd2_checkpoint, TP_ARGS(journal, result), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( int, result ) ), TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; + __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); + __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); __entry->result = result; ), - TP_printk("dev %s result %d", - jbd2_dev_to_name(__entry->dev), __entry->result) + TP_printk("dev %d,%d result %d", + __entry->dev_major, __entry->dev_minor, __entry->result) ); -TRACE_EVENT(jbd2_start_commit, +DECLARE_EVENT_CLASS(jbd2_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( char, sync_commit ) __field( int, transaction ) ), TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; + __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); + __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; ), - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_printk("dev %d,%d transaction %d sync %d", + __entry->dev_major, __entry->dev_minor, + __entry->transaction, __entry->sync_commit) +); + +DEFINE_EVENT(jbd2_commit, jbd2_start_commit, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) ); -TRACE_EVENT(jbd2_commit_locking, +DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + TP_ARGS(journal, commit_transaction) +); + +DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, + + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + + TP_ARGS(journal, commit_transaction) +); + +TRACE_EVENT(jbd2_end_commit, + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), + TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( - __field( dev_t, dev ) + __field( int, dev_major ) + __field( int, dev_minor ) __field( char, sync_commit ) __field( int, transaction ) + __field( int, head ) ), TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; + __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); + __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; + __entry->head = journal->j_tail_sequence; ), - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_printk("dev %d,%d transaction %d sync %d head %d", + __entry->dev_major, __entry->dev_minor, + __entry->transaction, __entry->sync_commit, __entry->head) ); -TRACE_EVENT(jbd2_commit_flushing, - - TP_PROTO(journal_t *journal, transaction_t *commit_transaction), +TRACE_EVENT(jbd2_submit_inode_data, + TP_PROTO(struct inode *inode), - TP_ARGS(journal, commit_transaction), + TP_ARGS(inode), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( char, sync_commit ) - __field( int, transaction ) + __field( int, dev_major ) + __field( int, dev_minor ) + __field( ino_t, ino ) ), TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; - __entry->sync_commit = commit_transaction->t_synchronous_commit; - __entry->transaction = commit_transaction->t_tid; + __entry->dev_major = MAJOR(inode->i_sb->s_dev); + __entry->dev_minor = MINOR(inode->i_sb->s_dev); + __entry->ino = inode->i_ino; ), - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_printk("dev %d,%d ino %lu", + __entry->dev_major, __entry->dev_minor, + (unsigned long) __entry->ino) ); -TRACE_EVENT(jbd2_commit_logging, +TRACE_EVENT(jbd2_run_stats, + TP_PROTO(dev_t dev, unsigned long tid, + struct transaction_run_stats_s *stats), - TP_PROTO(journal_t *journal, transaction_t *commit_transaction), - - TP_ARGS(journal, commit_transaction), + TP_ARGS(dev, tid, stats), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( char, sync_commit ) - __field( int, transaction ) + __field( int, dev_major ) + __field( int, dev_minor ) + __field( unsigned long, tid ) + __field( unsigned long, wait ) + __field( unsigned long, running ) + __field( unsigned long, locked ) + __field( unsigned long, flushing ) + __field( unsigned long, logging ) + __field( __u32, handle_count ) + __field( __u32, blocks ) + __field( __u32, blocks_logged ) ), TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; - __entry->sync_commit = commit_transaction->t_synchronous_commit; - __entry->transaction = commit_transaction->t_tid; + __entry->dev_major = MAJOR(dev); + __entry->dev_minor = MINOR(dev); + __entry->tid = tid; + __entry->wait = stats->rs_wait; + __entry->running = stats->rs_running; + __entry->locked = stats->rs_locked; + __entry->flushing = stats->rs_flushing; + __entry->logging = stats->rs_logging; + __entry->handle_count = stats->rs_handle_count; + __entry->blocks = stats->rs_blocks; + __entry->blocks_logged = stats->rs_blocks_logged; ), - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u " + "logging %u handle_count %u blocks %u blocks_logged %u", + __entry->dev_major, __entry->dev_minor, __entry->tid, + jiffies_to_msecs(__entry->wait), + jiffies_to_msecs(__entry->running), + jiffies_to_msecs(__entry->locked), + jiffies_to_msecs(__entry->flushing), + jiffies_to_msecs(__entry->logging), + __entry->handle_count, __entry->blocks, + __entry->blocks_logged) ); -TRACE_EVENT(jbd2_end_commit, - TP_PROTO(journal_t *journal, transaction_t *commit_transaction), +TRACE_EVENT(jbd2_checkpoint_stats, + TP_PROTO(dev_t dev, unsigned long tid, + struct transaction_chp_stats_s *stats), - TP_ARGS(journal, commit_transaction), + TP_ARGS(dev, tid, stats), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( char, sync_commit ) - __field( int, transaction ) - __field( int, head ) + __field( int, dev_major ) + __field( int, dev_minor ) + __field( unsigned long, tid ) + __field( unsigned long, chp_time ) + __field( __u32, forced_to_close ) + __field( __u32, written ) + __field( __u32, dropped ) ), TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; - __entry->sync_commit = commit_transaction->t_synchronous_commit; - __entry->transaction = commit_transaction->t_tid; - __entry->head = journal->j_tail_sequence; + __entry->dev_major = MAJOR(dev); + __entry->dev_minor = MINOR(dev); + __entry->tid = tid; + __entry->chp_time = stats->cs_chp_time; + __entry->forced_to_close= stats->cs_forced_to_close; + __entry->written = stats->cs_written; + __entry->dropped = stats->cs_dropped; ), - TP_printk("dev %s transaction %d sync %d head %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit, __entry->head) + TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " + "written %u dropped %u", + __entry->dev_major, __entry->dev_minor, __entry->tid, + jiffies_to_msecs(__entry->chp_time), + __entry->forced_to_close, __entry->written, __entry->dropped) ); -TRACE_EVENT(jbd2_submit_inode_data, - TP_PROTO(struct inode *inode), +TRACE_EVENT(jbd2_cleanup_journal_tail, - TP_ARGS(inode), + TP_PROTO(journal_t *journal, tid_t first_tid, + unsigned long block_nr, unsigned long freed), + + TP_ARGS(journal, first_tid, block_nr, freed), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) + __field( int, dev_major ) + __field( int, dev_minor ) + __field( tid_t, tail_sequence ) + __field( tid_t, first_tid ) + __field(unsigned long, block_nr ) + __field(unsigned long, freed ) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; + __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); + __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev); + __entry->tail_sequence = journal->j_tail_sequence; + __entry->first_tid = first_tid; + __entry->block_nr = block_nr; + __entry->freed = freed; ), - TP_printk("dev %s ino %lu", - jbd2_dev_to_name(__entry->dev), __entry->ino) + TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", + __entry->dev_major, __entry->dev_minor, + __entry->tail_sequence, __entry->first_tid, + __entry->block_nr, __entry->freed) ); #endif /* _TRACE_JBD2_H */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 9baba50d6512..a9c87ad8331c 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -1,50 +1,14 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kmem + #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_KMEM_H #include <linux/types.h> #include <linux/tracepoint.h> +#include "gfpflags.h" -#undef TRACE_SYSTEM -#define TRACE_SYSTEM kmem - -/* - * The order of these masks is important. Matching masks will be seen - * first and the left over flags will end up showing by themselves. - * - * For example, if we have GFP_KERNEL before GFP_USER we wil get: - * - * GFP_KERNEL|GFP_HARDWALL - * - * Thus most bits set go first. - */ -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ - {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ - {(unsigned long)GFP_USER, "GFP_USER"}, \ - {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ - {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ - {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ - {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ - {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ - {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \ - {(unsigned long)__GFP_IO, "GFP_IO"}, \ - {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ - {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ - {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ - {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ - {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ - {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ - {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ - {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ - {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ - {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ - {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ - ) : "GFP_NOWAIT" - -TRACE_EVENT(kmalloc, +DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, const void *ptr, @@ -78,41 +42,23 @@ TRACE_EVENT(kmalloc, show_gfp_flags(__entry->gfp_flags)) ); -TRACE_EVENT(kmem_cache_alloc, +DEFINE_EVENT(kmem_alloc, kmalloc, - TP_PROTO(unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags), + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) +); - TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) - __field( size_t, bytes_req ) - __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) - ), +DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, - TP_fast_assign( - __entry->call_site = call_site; - __entry->ptr = ptr; - __entry->bytes_req = bytes_req; - __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; - ), + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", - __entry->call_site, - __entry->ptr, - __entry->bytes_req, - __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags)) + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) ); -TRACE_EVENT(kmalloc_node, +DECLARE_EVENT_CLASS(kmem_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, @@ -150,81 +96,212 @@ TRACE_EVENT(kmalloc_node, __entry->node) ); -TRACE_EVENT(kmem_cache_alloc_node, +DEFINE_EVENT(kmem_alloc_node, kmalloc_node, - TP_PROTO(unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags, - int node), + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, + gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) +); + +DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, + gfp_t gfp_flags, int node), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) +); + +DECLARE_EVENT_CLASS(kmem_free, + + TP_PROTO(unsigned long call_site, const void *ptr), + + TP_ARGS(call_site, ptr), TP_STRUCT__entry( __field( unsigned long, call_site ) __field( const void *, ptr ) - __field( size_t, bytes_req ) - __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) - __field( int, node ) ), TP_fast_assign( __entry->call_site = call_site; __entry->ptr = ptr; - __entry->bytes_req = bytes_req; - __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; - __entry->node = node; ), - TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", - __entry->call_site, - __entry->ptr, - __entry->bytes_req, - __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags), - __entry->node) + TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) ); -TRACE_EVENT(kfree, +DEFINE_EVENT(kmem_free, kfree, TP_PROTO(unsigned long call_site, const void *ptr), - TP_ARGS(call_site, ptr), + TP_ARGS(call_site, ptr) +); + +DEFINE_EVENT(kmem_free, kmem_cache_free, + + TP_PROTO(unsigned long call_site, const void *ptr), + + TP_ARGS(call_site, ptr) +); + +TRACE_EVENT(mm_page_free_direct, + + TP_PROTO(struct page *page, unsigned int order), + + TP_ARGS(page, order), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field( struct page *, page ) + __field( unsigned int, order ) ), TP_fast_assign( - __entry->call_site = call_site; - __entry->ptr = ptr; + __entry->page = page; + __entry->order = order; ), - TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) + TP_printk("page=%p pfn=%lu order=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order) ); -TRACE_EVENT(kmem_cache_free, +TRACE_EVENT(mm_pagevec_free, - TP_PROTO(unsigned long call_site, const void *ptr), + TP_PROTO(struct page *page, int cold), - TP_ARGS(call_site, ptr), + TP_ARGS(page, cold), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field( struct page *, page ) + __field( int, cold ) ), TP_fast_assign( - __entry->call_site = call_site; - __entry->ptr = ptr; + __entry->page = page; + __entry->cold = cold; ), - TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) + TP_printk("page=%p pfn=%lu order=0 cold=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->cold) ); + +TRACE_EVENT(mm_page_alloc, + + TP_PROTO(struct page *page, unsigned int order, + gfp_t gfp_flags, int migratetype), + + TP_ARGS(page, order, gfp_flags, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + __field( gfp_t, gfp_flags ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->gfp_flags = gfp_flags; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + show_gfp_flags(__entry->gfp_flags)) +); + +DECLARE_EVENT_CLASS(mm_page, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + __entry->order == 0) +); + +DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype) +); + +DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d", + __entry->page, page_to_pfn(__entry->page), + __entry->order, __entry->migratetype) +); + +TRACE_EVENT(mm_page_alloc_extfrag, + + TP_PROTO(struct page *page, + int alloc_order, int fallback_order, + int alloc_migratetype, int fallback_migratetype), + + TP_ARGS(page, + alloc_order, fallback_order, + alloc_migratetype, fallback_migratetype), + + TP_STRUCT__entry( + __field( struct page *, page ) + __field( int, alloc_order ) + __field( int, fallback_order ) + __field( int, alloc_migratetype ) + __field( int, fallback_migratetype ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->alloc_order = alloc_order; + __entry->fallback_order = fallback_order; + __entry->alloc_migratetype = alloc_migratetype; + __entry->fallback_migratetype = fallback_migratetype; + ), + + TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", + __entry->page, + page_to_pfn(__entry->page), + __entry->alloc_order, + __entry->fallback_order, + pageblock_order, + __entry->alloc_migratetype, + __entry->fallback_migratetype, + __entry->fallback_order < pageblock_order, + __entry->alloc_migratetype == __entry->fallback_migratetype) +); + #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h new file mode 100644 index 000000000000..6dd3a51ab1cb --- /dev/null +++ b/include/trace/events/kvm.h @@ -0,0 +1,191 @@ +#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_MAIN_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +#if defined(__KVM_HAVE_IOAPIC) +TRACE_EVENT(kvm_set_irq, + TP_PROTO(unsigned int gsi, int level, int irq_source_id), + TP_ARGS(gsi, level, irq_source_id), + + TP_STRUCT__entry( + __field( unsigned int, gsi ) + __field( int, level ) + __field( int, irq_source_id ) + ), + + TP_fast_assign( + __entry->gsi = gsi; + __entry->level = level; + __entry->irq_source_id = irq_source_id; + ), + + TP_printk("gsi %u level %d source %d", + __entry->gsi, __entry->level, __entry->irq_source_id) +); + +#define kvm_deliver_mode \ + {0x0, "Fixed"}, \ + {0x1, "LowPrio"}, \ + {0x2, "SMI"}, \ + {0x3, "Res3"}, \ + {0x4, "NMI"}, \ + {0x5, "INIT"}, \ + {0x6, "SIPI"}, \ + {0x7, "ExtINT"} + +TRACE_EVENT(kvm_ioapic_set_irq, + TP_PROTO(__u64 e, int pin, bool coalesced), + TP_ARGS(e, pin, coalesced), + + TP_STRUCT__entry( + __field( __u64, e ) + __field( int, pin ) + __field( bool, coalesced ) + ), + + TP_fast_assign( + __entry->e = e; + __entry->pin = pin; + __entry->coalesced = coalesced; + ), + + TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s", + __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e, + __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), + (__entry->e & (1<<11)) ? "logical" : "physical", + (__entry->e & (1<<15)) ? "level" : "edge", + (__entry->e & (1<<16)) ? "|masked" : "", + __entry->coalesced ? " (coalesced)" : "") +); + +TRACE_EVENT(kvm_msi_set_irq, + TP_PROTO(__u64 address, __u64 data), + TP_ARGS(address, data), + + TP_STRUCT__entry( + __field( __u64, address ) + __field( __u64, data ) + ), + + TP_fast_assign( + __entry->address = address; + __entry->data = data; + ), + + TP_printk("dst %u vec %x (%s|%s|%s%s)", + (u8)(__entry->address >> 12), (u8)__entry->data, + __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode), + (__entry->address & (1<<2)) ? "logical" : "physical", + (__entry->data & (1<<15)) ? "level" : "edge", + (__entry->address & (1<<3)) ? "|rh" : "") +); + +#define kvm_irqchips \ + {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \ + {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \ + {KVM_IRQCHIP_IOAPIC, "IOAPIC"} + +TRACE_EVENT(kvm_ack_irq, + TP_PROTO(unsigned int irqchip, unsigned int pin), + TP_ARGS(irqchip, pin), + + TP_STRUCT__entry( + __field( unsigned int, irqchip ) + __field( unsigned int, pin ) + ), + + TP_fast_assign( + __entry->irqchip = irqchip; + __entry->pin = pin; + ), + + TP_printk("irqchip %s pin %u", + __print_symbolic(__entry->irqchip, kvm_irqchips), + __entry->pin) +); + + + +#endif /* defined(__KVM_HAVE_IOAPIC) */ + +#define KVM_TRACE_MMIO_READ_UNSATISFIED 0 +#define KVM_TRACE_MMIO_READ 1 +#define KVM_TRACE_MMIO_WRITE 2 + +#define kvm_trace_symbol_mmio \ + { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \ + { KVM_TRACE_MMIO_READ, "read" }, \ + { KVM_TRACE_MMIO_WRITE, "write" } + +TRACE_EVENT(kvm_mmio, + TP_PROTO(int type, int len, u64 gpa, u64 val), + TP_ARGS(type, len, gpa, val), + + TP_STRUCT__entry( + __field( u32, type ) + __field( u32, len ) + __field( u64, gpa ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->len = len; + __entry->gpa = gpa; + __entry->val = val; + ), + + TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", + __print_symbolic(__entry->type, kvm_trace_symbol_mmio), + __entry->len, __entry->gpa, __entry->val) +); + +#define kvm_fpu_load_symbol \ + {0, "unload"}, \ + {1, "load"} + +TRACE_EVENT(kvm_fpu, + TP_PROTO(int load), + TP_ARGS(load), + + TP_STRUCT__entry( + __field( u32, load ) + ), + + TP_fast_assign( + __entry->load = load; + ), + + TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol)) +); + +TRACE_EVENT(kvm_age_page, + TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref), + TP_ARGS(hva, slot, ref), + + TP_STRUCT__entry( + __field( u64, hva ) + __field( u64, gfn ) + __field( u8, referenced ) + ), + + TP_fast_assign( + __entry->hva = hva; + __entry->gfn = + slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT); + __entry->referenced = ref; + ), + + TP_printk("hva %llx gfn %llx %s", + __entry->hva, __entry->gfn, + __entry->referenced ? "YOUNG" : "OLD") +); + +#endif /* _TRACE_KVM_MAIN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h new file mode 100644 index 000000000000..2821b86de63b --- /dev/null +++ b/include/trace/events/lock.h @@ -0,0 +1,86 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM lock + +#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LOCK_H + +#include <linux/lockdep.h> +#include <linux/tracepoint.h> + +#ifdef CONFIG_LOCKDEP + +TRACE_EVENT(lock_acquire, + + TP_PROTO(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *next_lock, unsigned long ip), + + TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), + + TP_STRUCT__entry( + __field(unsigned int, flags) + __string(name, lock->name) + __field(void *, lockdep_addr) + ), + + TP_fast_assign( + __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); + __assign_str(name, lock->name); + __entry->lockdep_addr = lock; + ), + + TP_printk("%p %s%s%s", __entry->lockdep_addr, + (__entry->flags & 1) ? "try " : "", + (__entry->flags & 2) ? "read " : "", + __get_str(name)) +); + +DECLARE_EVENT_CLASS(lock, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip), + + TP_STRUCT__entry( + __string( name, lock->name ) + __field( void *, lockdep_addr ) + ), + + TP_fast_assign( + __assign_str(name, lock->name); + __entry->lockdep_addr = lock; + ), + + TP_printk("%p %s", __entry->lockdep_addr, __get_str(name)) +); + +DEFINE_EVENT(lock, lock_release, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +#ifdef CONFIG_LOCK_STAT + +DEFINE_EVENT(lock, lock_contended, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +DEFINE_EVENT(lock, lock_acquired, + + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +#endif +#endif + +#endif /* _TRACE_LOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h deleted file mode 100644 index 0e956c9dfd7e..000000000000 --- a/include/trace/events/lockdep.h +++ /dev/null @@ -1,96 +0,0 @@ -#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_LOCKDEP_H - -#include <linux/lockdep.h> -#include <linux/tracepoint.h> - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM lockdep - -#ifdef CONFIG_LOCKDEP - -TRACE_EVENT(lock_acquire, - - TP_PROTO(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, - struct lockdep_map *next_lock, unsigned long ip), - - TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), - - TP_STRUCT__entry( - __field(unsigned int, flags) - __string(name, lock->name) - ), - - TP_fast_assign( - __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); - __assign_str(name, lock->name); - ), - - TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", - (__entry->flags & 2) ? "read " : "", - __get_str(name)) -); - -TRACE_EVENT(lock_release, - - TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), - - TP_ARGS(lock, nested, ip), - - TP_STRUCT__entry( - __string(name, lock->name) - ), - - TP_fast_assign( - __assign_str(name, lock->name); - ), - - TP_printk("%s", __get_str(name)) -); - -#ifdef CONFIG_LOCK_STAT - -TRACE_EVENT(lock_contended, - - TP_PROTO(struct lockdep_map *lock, unsigned long ip), - - TP_ARGS(lock, ip), - - TP_STRUCT__entry( - __string(name, lock->name) - ), - - TP_fast_assign( - __assign_str(name, lock->name); - ), - - TP_printk("%s", __get_str(name)) -); - -TRACE_EVENT(lock_acquired, - TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), - - TP_ARGS(lock, ip, waittime), - - TP_STRUCT__entry( - __string(name, lock->name) - __field(unsigned long, wait_usec) - __field(unsigned long, wait_nsec_rem) - ), - TP_fast_assign( - __assign_str(name, lock->name); - __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); - __entry->wait_usec = (unsigned long) waittime; - ), - TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, - __entry->wait_nsec_rem) -); - -#endif -#endif - -#endif /* _TRACE_LOCKDEP_H */ - -/* This part must be outside protection */ -#include <trace/define_trace.h> diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h new file mode 100644 index 000000000000..7eee77895cb3 --- /dev/null +++ b/include/trace/events/mce.h @@ -0,0 +1,69 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mce + +#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MCE_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> +#include <asm/mce.h> + +TRACE_EVENT(mce_record, + + TP_PROTO(struct mce *m), + + TP_ARGS(m), + + TP_STRUCT__entry( + __field( u64, mcgcap ) + __field( u64, mcgstatus ) + __field( u8, bank ) + __field( u64, status ) + __field( u64, addr ) + __field( u64, misc ) + __field( u64, ip ) + __field( u8, cs ) + __field( u64, tsc ) + __field( u64, walltime ) + __field( u32, cpu ) + __field( u32, cpuid ) + __field( u32, apicid ) + __field( u32, socketid ) + __field( u8, cpuvendor ) + ), + + TP_fast_assign( + __entry->mcgcap = m->mcgcap; + __entry->mcgstatus = m->mcgstatus; + __entry->bank = m->bank; + __entry->status = m->status; + __entry->addr = m->addr; + __entry->misc = m->misc; + __entry->ip = m->ip; + __entry->cs = m->cs; + __entry->tsc = m->tsc; + __entry->walltime = m->time; + __entry->cpu = m->extcpu; + __entry->cpuid = m->cpuid; + __entry->apicid = m->apicid; + __entry->socketid = m->socketid; + __entry->cpuvendor = m->cpuvendor; + ), + + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", + __entry->cpu, + __entry->mcgcap, __entry->mcgstatus, + __entry->bank, __entry->status, + __entry->addr, __entry->misc, + __entry->cs, __entry->ip, + __entry->tsc, + __entry->cpuvendor, __entry->cpuid, + __entry->walltime, + __entry->socketid, + __entry->apicid) +); + +#endif /* _TRACE_MCE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/module.h b/include/trace/events/module.h new file mode 100644 index 000000000000..c7bb2f0482fe --- /dev/null +++ b/include/trace/events/module.h @@ -0,0 +1,122 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM module + +#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MODULE_H + +#include <linux/tracepoint.h> + +#ifdef CONFIG_MODULES + +struct module; + +#define show_module_flags(flags) __print_flags(flags, "", \ + { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \ + { (1UL << TAINT_FORCED_MODULE), "F" }, \ + { (1UL << TAINT_CRAP), "C" }) + +TRACE_EVENT(module_load, + + TP_PROTO(struct module *mod), + + TP_ARGS(mod), + + TP_STRUCT__entry( + __field( unsigned int, taints ) + __string( name, mod->name ) + ), + + TP_fast_assign( + __entry->taints = mod->taints; + __assign_str(name, mod->name); + ), + + TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints)) +); + +TRACE_EVENT(module_free, + + TP_PROTO(struct module *mod), + + TP_ARGS(mod), + + TP_STRUCT__entry( + __string( name, mod->name ) + ), + + TP_fast_assign( + __assign_str(name, mod->name); + ), + + TP_printk("%s", __get_str(name)) +); + +#ifdef CONFIG_MODULE_UNLOAD +/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */ + +DECLARE_EVENT_CLASS(module_refcnt, + + TP_PROTO(struct module *mod, unsigned long ip), + + TP_ARGS(mod, ip), + + TP_STRUCT__entry( + __field( unsigned long, ip ) + __field( int, refcnt ) + __string( name, mod->name ) + ), + + TP_fast_assign( + __entry->ip = ip; + __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); + __assign_str(name, mod->name); + ), + + TP_printk("%s call_site=%pf refcnt=%d", + __get_str(name), (void *)__entry->ip, __entry->refcnt) +); + +DEFINE_EVENT(module_refcnt, module_get, + + TP_PROTO(struct module *mod, unsigned long ip), + + TP_ARGS(mod, ip) +); + +DEFINE_EVENT(module_refcnt, module_put, + + TP_PROTO(struct module *mod, unsigned long ip), + + TP_ARGS(mod, ip) +); +#endif /* CONFIG_MODULE_UNLOAD */ + +TRACE_EVENT(module_request, + + TP_PROTO(char *name, bool wait, unsigned long ip), + + TP_ARGS(name, wait, ip), + + TP_STRUCT__entry( + __field( bool, wait ) + __field( unsigned long, ip ) + __string( name, name ) + ), + + TP_fast_assign( + __entry->wait = wait; + __entry->ip = ip; + __assign_str(name, name); + ), + + TP_printk("%s wait=%d call_site=%pf", + __get_str(name), (int)__entry->wait, (void *)__entry->ip) +); + +#endif /* CONFIG_MODULES */ + +#endif /* _TRACE_MODULE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> + diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index a8989c4547e7..8fe1e93f531d 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -1,11 +1,38 @@ -#ifndef _TRACE_NAPI_H_ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM napi + +#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NAPI_H_ #include <linux/netdevice.h> #include <linux/tracepoint.h> +#include <linux/ftrace.h> + +#define NO_DEV "(no_device)" + +TRACE_EVENT(napi_poll, -DECLARE_TRACE(napi_poll, TP_PROTO(struct napi_struct *napi), - TP_ARGS(napi)); -#endif + TP_ARGS(napi), + + TP_STRUCT__entry( + __field( struct napi_struct *, napi) + __string( dev_name, napi->dev ? napi->dev->name : NO_DEV) + ), + + TP_fast_assign( + __entry->napi = napi; + __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); + ), + + TP_printk("napi poll on napi struct %p for device %s", + __entry->napi, __get_str(dev_name)) +); + +#undef NO_DEV + +#endif /* _TRACE_NAPI_H_ */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/net.h b/include/trace/events/net.h new file mode 100644 index 000000000000..5f247f5ffc56 --- /dev/null +++ b/include/trace/events/net.h @@ -0,0 +1,82 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM net + +#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NET_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/ip.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(net_dev_xmit, + + TP_PROTO(struct sk_buff *skb, + int rc), + + TP_ARGS(skb, rc), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + __field( unsigned int, len ) + __field( int, rc ) + __string( name, skb->dev->name ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = skb->len; + __entry->rc = rc; + __assign_str(name, skb->dev->name); + ), + + TP_printk("dev=%s skbaddr=%p len=%u rc=%d", + __get_str(name), __entry->skbaddr, __entry->len, __entry->rc) +); + +DECLARE_EVENT_CLASS(net_dev_template, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + __field( unsigned int, len ) + __string( name, skb->dev->name ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = skb->len; + __assign_str(name, skb->dev->name); + ), + + TP_printk("dev=%s skbaddr=%p len=%u", + __get_str(name), __entry->skbaddr, __entry->len) +) + +DEFINE_EVENT(net_dev_template, net_dev_queue, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_template, netif_receive_skb, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb) +); + +DEFINE_EVENT(net_dev_template, netif_rx, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb) +); +#endif /* _TRACE_NET_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/power.h b/include/trace/events/power.h new file mode 100644 index 000000000000..1bcc2a8c00e2 --- /dev/null +++ b/include/trace/events/power.h @@ -0,0 +1,240 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM power + +#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_POWER_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(cpu, + + TP_PROTO(unsigned int state, unsigned int cpu_id), + + TP_ARGS(state, cpu_id), + + TP_STRUCT__entry( + __field( u32, state ) + __field( u32, cpu_id ) + ), + + TP_fast_assign( + __entry->state = state; + __entry->cpu_id = cpu_id; + ), + + TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state, + (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(cpu, cpu_idle, + + TP_PROTO(unsigned int state, unsigned int cpu_id), + + TP_ARGS(state, cpu_id) +); + +/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ +#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING +#define _PWR_EVENT_AVOID_DOUBLE_DEFINING + +#define PWR_EVENT_EXIT -1 +#endif + +DEFINE_EVENT(cpu, cpu_frequency, + + TP_PROTO(unsigned int frequency, unsigned int cpu_id), + + TP_ARGS(frequency, cpu_id) +); + +TRACE_EVENT(machine_suspend, + + TP_PROTO(unsigned int state), + + TP_ARGS(state), + + TP_STRUCT__entry( + __field( u32, state ) + ), + + TP_fast_assign( + __entry->state = state; + ), + + TP_printk("state=%lu", (unsigned long)__entry->state) +); + +/* This code will be removed after deprecation time exceeded (2.6.41) */ +#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED + +/* + * The power events are used for cpuidle & suspend (power_start, power_end) + * and for cpufreq (power_frequency) + */ +DECLARE_EVENT_CLASS(power, + + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), + + TP_ARGS(type, state, cpu_id), + + TP_STRUCT__entry( + __field( u64, type ) + __field( u64, state ) + __field( u64, cpu_id ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->state = state; + __entry->cpu_id = cpu_id; + ), + + TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type, + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(power, power_start, + + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), + + TP_ARGS(type, state, cpu_id) +); + +DEFINE_EVENT(power, power_frequency, + + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), + + TP_ARGS(type, state, cpu_id) +); + +TRACE_EVENT(power_end, + + TP_PROTO(unsigned int cpu_id), + + TP_ARGS(cpu_id), + + TP_STRUCT__entry( + __field( u64, cpu_id ) + ), + + TP_fast_assign( + __entry->cpu_id = cpu_id; + ), + + TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id) + +); + +/* Deprecated dummy functions must be protected against multi-declartion */ +#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED +#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED + +enum { + POWER_NONE = 0, + POWER_CSTATE = 1, + POWER_PSTATE = 2, +}; +#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */ + +#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */ + +#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED +#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED +enum { + POWER_NONE = 0, + POWER_CSTATE = 1, + POWER_PSTATE = 2, +}; + +/* These dummy declaration have to be ripped out when the deprecated + events get removed */ +static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {}; +static inline void trace_power_end(u64 cpuid) {}; +static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {}; +#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */ + +#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */ + +/* + * The clock events are used for clock enable/disable and for + * clock rate change + */ +DECLARE_EVENT_CLASS(clock, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id), + + TP_STRUCT__entry( + __string( name, name ) + __field( u64, state ) + __field( u64, cpu_id ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->state = state; + __entry->cpu_id = cpu_id; + ), + + TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(clock, clock_enable, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +DEFINE_EVENT(clock, clock_disable, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +DEFINE_EVENT(clock, clock_set_rate, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); + +/* + * The power domain events are used for power domains transitions + */ +DECLARE_EVENT_CLASS(power_domain, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id), + + TP_STRUCT__entry( + __string( name, name ) + __field( u64, state ) + __field( u64, cpu_id ) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->state = state; + __entry->cpu_id = cpu_id; +), + + TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) +); + +DEFINE_EVENT(power_domain, power_domain_target, + + TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), + + TP_ARGS(name, state, cpu_id) +); +#endif /* _TRACE_POWER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 24ab5bcff7b2..f6334782a593 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1,12 +1,12 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sched + #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H #include <linux/sched.h> #include <linux/tracepoint.h> -#undef TRACE_SYSTEM -#define TRACE_SYSTEM sched - /* * Tracepoint for calling kthread_stop, performed to end a kthread: */ @@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop, __entry->pid = t->pid; ), - TP_printk("task %s:%d", __entry->comm, __entry->pid) + TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) ); /* @@ -46,54 +46,24 @@ TRACE_EVENT(sched_kthread_stop_ret, __entry->ret = ret; ), - TP_printk("ret %d", __entry->ret) -); - -/* - * Tracepoint for waiting on task to unschedule: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) - */ -TRACE_EVENT(sched_wait_task, - - TP_PROTO(struct rq *rq, struct task_struct *p), - - TP_ARGS(rq, p), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - ), - - TP_printk("task %s:%d [%d]", - __entry->comm, __entry->pid, __entry->prio) + TP_printk("ret=%d", __entry->ret) ); /* * Tracepoint for waking up a task: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ -TRACE_EVENT(sched_wakeup, +DECLARE_EVENT_CLASS(sched_wakeup_template, - TP_PROTO(struct rq *rq, struct task_struct *p, int success), + TP_PROTO(struct task_struct *p, int success), - TP_ARGS(rq, p, success), + TP_ARGS(p, success), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) __field( int, success ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -101,56 +71,51 @@ TRACE_EVENT(sched_wakeup, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success) + __entry->success, __entry->target_cpu) ); +DEFINE_EVENT(sched_wakeup_template, sched_wakeup, + TP_PROTO(struct task_struct *p, int success), + TP_ARGS(p, success)); + /* * Tracepoint for waking up a new task: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ -TRACE_EVENT(sched_wakeup_new, - - TP_PROTO(struct rq *rq, struct task_struct *p, int success), - - TP_ARGS(rq, p, success), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - __field( int, success ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - __entry->success = success; - ), - - TP_printk("task %s:%d [%d] success=%d", - __entry->comm, __entry->pid, __entry->prio, - __entry->success) -); +DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, + TP_PROTO(struct task_struct *p, int success), + TP_ARGS(p, success)); + +#ifdef CREATE_TRACE_POINTS +static inline long __trace_sched_switch_state(struct task_struct *p) +{ + long state = p->state; + +#ifdef CONFIG_PREEMPT + /* + * For all intents and purposes a preempted task is a running task. + */ + if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE) + state = TASK_RUNNING; +#endif + + return state; +} +#endif /* * Tracepoint for task switches, performed by the scheduler: - * - * (NOTE: the 'rq' argument is not used by generic trace events, - * but used by the latency tracer plugin. ) */ TRACE_EVENT(sched_switch, - TP_PROTO(struct rq *rq, struct task_struct *prev, + TP_PROTO(struct task_struct *prev, struct task_struct *next), - TP_ARGS(rq, prev, next), + TP_ARGS(prev, next), TP_STRUCT__entry( __array( char, prev_comm, TASK_COMM_LEN ) @@ -166,13 +131,13 @@ TRACE_EVENT(sched_switch, memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid; __entry->prev_prio = prev->prio; - __entry->prev_state = prev->state; + __entry->prev_state = __trace_sched_switch_state(prev); memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; ), - TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, __entry->prev_state ? __print_flags(__entry->prev_state, "|", @@ -207,15 +172,12 @@ TRACE_EVENT(sched_migrate_task, __entry->dest_cpu = dest_cpu; ), - TP_printk("task %s:%d [%d] from: %d to: %d", + TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", __entry->comm, __entry->pid, __entry->prio, __entry->orig_cpu, __entry->dest_cpu) ); -/* - * Tracepoint for freeing a task: - */ -TRACE_EVENT(sched_process_free, +DECLARE_EVENT_CLASS(sched_process_template, TP_PROTO(struct task_struct *p), @@ -233,34 +195,31 @@ TRACE_EVENT(sched_process_free, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); /* + * Tracepoint for freeing a task: + */ +DEFINE_EVENT(sched_process_template, sched_process_free, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + + +/* * Tracepoint for a task exiting: */ -TRACE_EVENT(sched_process_exit, +DEFINE_EVENT(sched_process_template, sched_process_exit, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); +/* + * Tracepoint for waiting on task to unschedule: + */ +DEFINE_EVENT(sched_process_template, sched_wait_task, TP_PROTO(struct task_struct *p), - - TP_ARGS(p), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - ), - - TP_printk("task %s:%d [%d]", - __entry->comm, __entry->pid, __entry->prio) -); + TP_ARGS(p)); /* * Tracepoint for a waiting task: @@ -283,7 +242,7 @@ TRACE_EVENT(sched_process_wait, __entry->prio = current->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -310,34 +269,126 @@ TRACE_EVENT(sched_process_fork, __entry->child_pid = child->pid; ), - TP_printk("parent %s:%d child %s:%d", + TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", __entry->parent_comm, __entry->parent_pid, __entry->child_comm, __entry->child_pid) ); /* - * Tracepoint for sending a signal: + * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE + * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ -TRACE_EVENT(sched_signal_send, +DECLARE_EVENT_CLASS(sched_stat_template, - TP_PROTO(int sig, struct task_struct *p), + TP_PROTO(struct task_struct *tsk, u64 delay), - TP_ARGS(sig, p), + TP_ARGS(tsk, delay), TP_STRUCT__entry( - __field( int, sig ) - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( u64, delay ) ), TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->sig = sig; + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->delay = delay; + ) + TP_perf_assign( + __perf_count(delay); + ), + + TP_printk("comm=%s pid=%d delay=%Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->delay) +); + + +/* + * Tracepoint for accounting wait time (time the task is runnable + * but not actually running due to scheduler contention). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_wait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting sleep time (time the task is not runnable, + * including iowait, see below). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_sleep, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting iowait time (time the task is not runnable + * due to waiting on IO to complete). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_iowait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting runtime (time the task is executing + * on a CPU). + */ +TRACE_EVENT(sched_stat_runtime, + + TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), + + TP_ARGS(tsk, runtime, vruntime), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( u64, runtime ) + __field( u64, vruntime ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->runtime = runtime; + __entry->vruntime = vruntime; + ) + TP_perf_assign( + __perf_count(runtime); + ), + + TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->runtime, + (unsigned long long)__entry->vruntime) +); + +/* + * Tracepoint for showing priority inheritance modifying a tasks + * priority. + */ +TRACE_EVENT(sched_pi_setprio, + + TP_PROTO(struct task_struct *tsk, int newprio), + + TP_ARGS(tsk, newprio), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, oldprio ) + __field( int, newprio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->oldprio = tsk->prio; + __entry->newprio = newprio; ), - TP_printk("sig: %d task %s:%d", - __entry->sig, __entry->comm, __entry->pid) + TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", + __entry->comm, __entry->pid, + __entry->oldprio, __entry->newprio) ); #endif /* _TRACE_SCHED_H */ diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h new file mode 100644 index 000000000000..25fbefdf2f2e --- /dev/null +++ b/include/trace/events/scsi.h @@ -0,0 +1,345 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM scsi + +#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SCSI_H + +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +#define scsi_opcode_name(opcode) { opcode, #opcode } +#define show_opcode_name(val) \ + __print_symbolic(val, \ + scsi_opcode_name(TEST_UNIT_READY), \ + scsi_opcode_name(REZERO_UNIT), \ + scsi_opcode_name(REQUEST_SENSE), \ + scsi_opcode_name(FORMAT_UNIT), \ + scsi_opcode_name(READ_BLOCK_LIMITS), \ + scsi_opcode_name(REASSIGN_BLOCKS), \ + scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \ + scsi_opcode_name(READ_6), \ + scsi_opcode_name(WRITE_6), \ + scsi_opcode_name(SEEK_6), \ + scsi_opcode_name(READ_REVERSE), \ + scsi_opcode_name(WRITE_FILEMARKS), \ + scsi_opcode_name(SPACE), \ + scsi_opcode_name(INQUIRY), \ + scsi_opcode_name(RECOVER_BUFFERED_DATA), \ + scsi_opcode_name(MODE_SELECT), \ + scsi_opcode_name(RESERVE), \ + scsi_opcode_name(RELEASE), \ + scsi_opcode_name(COPY), \ + scsi_opcode_name(ERASE), \ + scsi_opcode_name(MODE_SENSE), \ + scsi_opcode_name(START_STOP), \ + scsi_opcode_name(RECEIVE_DIAGNOSTIC), \ + scsi_opcode_name(SEND_DIAGNOSTIC), \ + scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \ + scsi_opcode_name(SET_WINDOW), \ + scsi_opcode_name(READ_CAPACITY), \ + scsi_opcode_name(READ_10), \ + scsi_opcode_name(WRITE_10), \ + scsi_opcode_name(SEEK_10), \ + scsi_opcode_name(POSITION_TO_ELEMENT), \ + scsi_opcode_name(WRITE_VERIFY), \ + scsi_opcode_name(VERIFY), \ + scsi_opcode_name(SEARCH_HIGH), \ + scsi_opcode_name(SEARCH_EQUAL), \ + scsi_opcode_name(SEARCH_LOW), \ + scsi_opcode_name(SET_LIMITS), \ + scsi_opcode_name(PRE_FETCH), \ + scsi_opcode_name(READ_POSITION), \ + scsi_opcode_name(SYNCHRONIZE_CACHE), \ + scsi_opcode_name(LOCK_UNLOCK_CACHE), \ + scsi_opcode_name(READ_DEFECT_DATA), \ + scsi_opcode_name(MEDIUM_SCAN), \ + scsi_opcode_name(COMPARE), \ + scsi_opcode_name(COPY_VERIFY), \ + scsi_opcode_name(WRITE_BUFFER), \ + scsi_opcode_name(READ_BUFFER), \ + scsi_opcode_name(UPDATE_BLOCK), \ + scsi_opcode_name(READ_LONG), \ + scsi_opcode_name(WRITE_LONG), \ + scsi_opcode_name(CHANGE_DEFINITION), \ + scsi_opcode_name(WRITE_SAME), \ + scsi_opcode_name(UNMAP), \ + scsi_opcode_name(READ_TOC), \ + scsi_opcode_name(LOG_SELECT), \ + scsi_opcode_name(LOG_SENSE), \ + scsi_opcode_name(XDWRITEREAD_10), \ + scsi_opcode_name(MODE_SELECT_10), \ + scsi_opcode_name(RESERVE_10), \ + scsi_opcode_name(RELEASE_10), \ + scsi_opcode_name(MODE_SENSE_10), \ + scsi_opcode_name(PERSISTENT_RESERVE_IN), \ + scsi_opcode_name(PERSISTENT_RESERVE_OUT), \ + scsi_opcode_name(VARIABLE_LENGTH_CMD), \ + scsi_opcode_name(REPORT_LUNS), \ + scsi_opcode_name(MAINTENANCE_IN), \ + scsi_opcode_name(MAINTENANCE_OUT), \ + scsi_opcode_name(MOVE_MEDIUM), \ + scsi_opcode_name(EXCHANGE_MEDIUM), \ + scsi_opcode_name(READ_12), \ + scsi_opcode_name(WRITE_12), \ + scsi_opcode_name(WRITE_VERIFY_12), \ + scsi_opcode_name(SEARCH_HIGH_12), \ + scsi_opcode_name(SEARCH_EQUAL_12), \ + scsi_opcode_name(SEARCH_LOW_12), \ + scsi_opcode_name(READ_ELEMENT_STATUS), \ + scsi_opcode_name(SEND_VOLUME_TAG), \ + scsi_opcode_name(WRITE_LONG_2), \ + scsi_opcode_name(READ_16), \ + scsi_opcode_name(WRITE_16), \ + scsi_opcode_name(VERIFY_16), \ + scsi_opcode_name(WRITE_SAME_16), \ + scsi_opcode_name(SERVICE_ACTION_IN), \ + scsi_opcode_name(SAI_READ_CAPACITY_16), \ + scsi_opcode_name(SAI_GET_LBA_STATUS), \ + scsi_opcode_name(MI_REPORT_TARGET_PGS), \ + scsi_opcode_name(MO_SET_TARGET_PGS), \ + scsi_opcode_name(READ_32), \ + scsi_opcode_name(WRITE_32), \ + scsi_opcode_name(WRITE_SAME_32), \ + scsi_opcode_name(ATA_16), \ + scsi_opcode_name(ATA_12)) + +#define scsi_hostbyte_name(result) { result, #result } +#define show_hostbyte_name(val) \ + __print_symbolic(val, \ + scsi_hostbyte_name(DID_OK), \ + scsi_hostbyte_name(DID_NO_CONNECT), \ + scsi_hostbyte_name(DID_BUS_BUSY), \ + scsi_hostbyte_name(DID_TIME_OUT), \ + scsi_hostbyte_name(DID_BAD_TARGET), \ + scsi_hostbyte_name(DID_ABORT), \ + scsi_hostbyte_name(DID_PARITY), \ + scsi_hostbyte_name(DID_ERROR), \ + scsi_hostbyte_name(DID_RESET), \ + scsi_hostbyte_name(DID_BAD_INTR), \ + scsi_hostbyte_name(DID_PASSTHROUGH), \ + scsi_hostbyte_name(DID_SOFT_ERROR), \ + scsi_hostbyte_name(DID_IMM_RETRY), \ + scsi_hostbyte_name(DID_REQUEUE), \ + scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \ + scsi_hostbyte_name(DID_TRANSPORT_FAILFAST)) + +#define scsi_driverbyte_name(result) { result, #result } +#define show_driverbyte_name(val) \ + __print_symbolic(val, \ + scsi_driverbyte_name(DRIVER_OK), \ + scsi_driverbyte_name(DRIVER_BUSY), \ + scsi_driverbyte_name(DRIVER_SOFT), \ + scsi_driverbyte_name(DRIVER_MEDIA), \ + scsi_driverbyte_name(DRIVER_ERROR), \ + scsi_driverbyte_name(DRIVER_INVALID), \ + scsi_driverbyte_name(DRIVER_TIMEOUT), \ + scsi_driverbyte_name(DRIVER_HARD), \ + scsi_driverbyte_name(DRIVER_SENSE)) + +#define scsi_msgbyte_name(result) { result, #result } +#define show_msgbyte_name(val) \ + __print_symbolic(val, \ + scsi_msgbyte_name(COMMAND_COMPLETE), \ + scsi_msgbyte_name(EXTENDED_MESSAGE), \ + scsi_msgbyte_name(SAVE_POINTERS), \ + scsi_msgbyte_name(RESTORE_POINTERS), \ + scsi_msgbyte_name(DISCONNECT), \ + scsi_msgbyte_name(INITIATOR_ERROR), \ + scsi_msgbyte_name(ABORT_TASK_SET), \ + scsi_msgbyte_name(MESSAGE_REJECT), \ + scsi_msgbyte_name(NOP), \ + scsi_msgbyte_name(MSG_PARITY_ERROR), \ + scsi_msgbyte_name(LINKED_CMD_COMPLETE), \ + scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \ + scsi_msgbyte_name(TARGET_RESET), \ + scsi_msgbyte_name(ABORT_TASK), \ + scsi_msgbyte_name(CLEAR_TASK_SET), \ + scsi_msgbyte_name(INITIATE_RECOVERY), \ + scsi_msgbyte_name(RELEASE_RECOVERY), \ + scsi_msgbyte_name(CLEAR_ACA), \ + scsi_msgbyte_name(LOGICAL_UNIT_RESET), \ + scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \ + scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \ + scsi_msgbyte_name(ORDERED_QUEUE_TAG), \ + scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \ + scsi_msgbyte_name(ACA), \ + scsi_msgbyte_name(QAS_REQUEST), \ + scsi_msgbyte_name(BUS_DEVICE_RESET), \ + scsi_msgbyte_name(ABORT)) + +#define scsi_statusbyte_name(result) { result, #result } +#define show_statusbyte_name(val) \ + __print_symbolic(val, \ + scsi_statusbyte_name(SAM_STAT_GOOD), \ + scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \ + scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \ + scsi_statusbyte_name(SAM_STAT_BUSY), \ + scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \ + scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \ + scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \ + scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \ + scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \ + scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \ + scsi_statusbyte_name(SAM_STAT_TASK_ABORTED)) + +const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int); +#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len) + +TRACE_EVENT(scsi_dispatch_cmd_start, + + TP_PROTO(struct scsi_cmnd *cmd), + + TP_ARGS(cmd), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( unsigned int, opcode ) + __field( unsigned int, cmd_len ) + __field( unsigned int, data_sglen ) + __field( unsigned int, prot_sglen ) + __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + ), + + TP_fast_assign( + __entry->host_no = cmd->device->host->host_no; + __entry->channel = cmd->device->channel; + __entry->id = cmd->device->id; + __entry->lun = cmd->device->lun; + __entry->opcode = cmd->cmnd[0]; + __entry->cmd_len = cmd->cmd_len; + __entry->data_sglen = scsi_sg_count(cmd); + __entry->prot_sglen = scsi_prot_sg_count(cmd); + memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + ), + + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ + " cmnd=(%s %s raw=%s)", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->data_sglen, __entry->prot_sglen, + show_opcode_name(__entry->opcode), + __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), + __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len)) +); + +TRACE_EVENT(scsi_dispatch_cmd_error, + + TP_PROTO(struct scsi_cmnd *cmd, int rtn), + + TP_ARGS(cmd, rtn), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( int, rtn ) + __field( unsigned int, opcode ) + __field( unsigned int, cmd_len ) + __field( unsigned int, data_sglen ) + __field( unsigned int, prot_sglen ) + __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + ), + + TP_fast_assign( + __entry->host_no = cmd->device->host->host_no; + __entry->channel = cmd->device->channel; + __entry->id = cmd->device->id; + __entry->lun = cmd->device->lun; + __entry->rtn = rtn; + __entry->opcode = cmd->cmnd[0]; + __entry->cmd_len = cmd->cmd_len; + __entry->data_sglen = scsi_sg_count(cmd); + __entry->prot_sglen = scsi_prot_sg_count(cmd); + memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + ), + + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ + " cmnd=(%s %s raw=%s) rtn=%d", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->data_sglen, __entry->prot_sglen, + show_opcode_name(__entry->opcode), + __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), + __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), + __entry->rtn) +); + +DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, + + TP_PROTO(struct scsi_cmnd *cmd), + + TP_ARGS(cmd), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + __field( unsigned int, channel ) + __field( unsigned int, id ) + __field( unsigned int, lun ) + __field( int, result ) + __field( unsigned int, opcode ) + __field( unsigned int, cmd_len ) + __field( unsigned int, data_sglen ) + __field( unsigned int, prot_sglen ) + __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + ), + + TP_fast_assign( + __entry->host_no = cmd->device->host->host_no; + __entry->channel = cmd->device->channel; + __entry->id = cmd->device->id; + __entry->lun = cmd->device->lun; + __entry->result = cmd->result; + __entry->opcode = cmd->cmnd[0]; + __entry->cmd_len = cmd->cmd_len; + __entry->data_sglen = scsi_sg_count(cmd); + __entry->prot_sglen = scsi_prot_sg_count(cmd); + memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + ), + + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \ + "prot_sgl=%u cmnd=(%s %s raw=%s) result=(driver=%s host=%s " \ + "message=%s status=%s)", + __entry->host_no, __entry->channel, __entry->id, + __entry->lun, __entry->data_sglen, __entry->prot_sglen, + show_opcode_name(__entry->opcode), + __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), + __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), + show_driverbyte_name(((__entry->result) >> 24) & 0xff), + show_hostbyte_name(((__entry->result) >> 16) & 0xff), + show_msgbyte_name(((__entry->result) >> 8) & 0xff), + show_statusbyte_name(__entry->result & 0xff)) +); + +DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done, + TP_PROTO(struct scsi_cmnd *cmd), + TP_ARGS(cmd)); + +DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout, + TP_PROTO(struct scsi_cmnd *cmd), + TP_ARGS(cmd)); + +TRACE_EVENT(scsi_eh_wakeup, + + TP_PROTO(struct Scsi_Host *shost), + + TP_ARGS(shost), + + TP_STRUCT__entry( + __field( unsigned int, host_no ) + ), + + TP_fast_assign( + __entry->host_no = shost->host_no; + ), + + TP_printk("host_no=%u", __entry->host_no) +); + +#endif /* _TRACE_SCSI_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h new file mode 100644 index 000000000000..17df43464df0 --- /dev/null +++ b/include/trace/events/signal.h @@ -0,0 +1,166 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM signal + +#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SIGNAL_H + +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/tracepoint.h> + +#define TP_STORE_SIGINFO(__entry, info) \ + do { \ + if (info == SEND_SIG_NOINFO || \ + info == SEND_SIG_FORCED) { \ + __entry->errno = 0; \ + __entry->code = SI_USER; \ + } else if (info == SEND_SIG_PRIV) { \ + __entry->errno = 0; \ + __entry->code = SI_KERNEL; \ + } else { \ + __entry->errno = info->si_errno; \ + __entry->code = info->si_code; \ + } \ + } while (0) + +/** + * signal_generate - called when a signal is generated + * @sig: signal number + * @info: pointer to struct siginfo + * @task: pointer to struct task_struct + * + * Current process sends a 'sig' signal to 'task' process with + * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, + * 'info' is not a pointer and you can't access its field. Instead, + * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV + * means that si_code is SI_KERNEL. + */ +TRACE_EVENT(signal_generate, + + TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), + + TP_ARGS(sig, info, task), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, errno ) + __field( int, code ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + ), + + TP_fast_assign( + __entry->sig = sig; + TP_STORE_SIGINFO(__entry, info); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task->pid; + ), + + TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", + __entry->sig, __entry->errno, __entry->code, + __entry->comm, __entry->pid) +); + +/** + * signal_deliver - called when a signal is delivered + * @sig: signal number + * @info: pointer to struct siginfo + * @ka: pointer to struct k_sigaction + * + * A 'sig' signal is delivered to current process with 'info' siginfo, + * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or + * SIG_DFL. + * Note that some signals reported by signal_generate tracepoint can be + * lost, ignored or modified (by debugger) before hitting this tracepoint. + * This means, this can show which signals are actually delivered, but + * matching generated signals and delivered signals may not be correct. + */ +TRACE_EVENT(signal_deliver, + + TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka), + + TP_ARGS(sig, info, ka), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, errno ) + __field( int, code ) + __field( unsigned long, sa_handler ) + __field( unsigned long, sa_flags ) + ), + + TP_fast_assign( + __entry->sig = sig; + TP_STORE_SIGINFO(__entry, info); + __entry->sa_handler = (unsigned long)ka->sa.sa_handler; + __entry->sa_flags = ka->sa.sa_flags; + ), + + TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx", + __entry->sig, __entry->errno, __entry->code, + __entry->sa_handler, __entry->sa_flags) +); + +DECLARE_EVENT_CLASS(signal_queue_overflow, + + TP_PROTO(int sig, int group, struct siginfo *info), + + TP_ARGS(sig, group, info), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, group ) + __field( int, errno ) + __field( int, code ) + ), + + TP_fast_assign( + __entry->sig = sig; + __entry->group = group; + TP_STORE_SIGINFO(__entry, info); + ), + + TP_printk("sig=%d group=%d errno=%d code=%d", + __entry->sig, __entry->group, __entry->errno, __entry->code) +); + +/** + * signal_overflow_fail - called when signal queue is overflow + * @sig: signal number + * @group: signal to process group or not (bool) + * @info: pointer to struct siginfo + * + * Kernel fails to generate 'sig' signal with 'info' siginfo, because + * siginfo queue is overflow, and the signal is dropped. + * 'group' is not 0 if the signal will be sent to a process group. + * 'sig' is always one of RT signals. + */ +DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, + + TP_PROTO(int sig, int group, struct siginfo *info), + + TP_ARGS(sig, group, info) +); + +/** + * signal_lose_info - called when siginfo is lost + * @sig: signal number + * @group: signal to process group or not (bool) + * @info: pointer to struct siginfo + * + * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo + * queue is overflow. + * 'group' is not 0 if the signal will be sent to a process group. + * 'sig' is always one of non-RT signals. + */ +DEFINE_EVENT(signal_queue_overflow, signal_lose_info, + + TP_PROTO(int sig, int group, struct siginfo *info), + + TP_ARGS(sig, group, info) +); + +#endif /* _TRACE_SIGNAL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 1e8fabb57c06..f10293c41b1e 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -1,12 +1,13 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM skb + #if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SKB_H #include <linux/skbuff.h> +#include <linux/netdevice.h> #include <linux/tracepoint.h> -#undef TRACE_SYSTEM -#define TRACE_SYSTEM skb - /* * Tracepoint for free an sk_buff: */ @@ -24,9 +25,7 @@ TRACE_EVENT(kfree_skb, TP_fast_assign( __entry->skbaddr = skb; - if (skb) { - __entry->protocol = ntohs(skb->protocol); - } + __entry->protocol = ntohs(skb->protocol); __entry->location = location; ), @@ -34,6 +33,42 @@ TRACE_EVENT(kfree_skb, __entry->skbaddr, __entry->protocol, __entry->location) ); +TRACE_EVENT(consume_skb, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field( void *, skbaddr ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + ), + + TP_printk("skbaddr=%p", __entry->skbaddr) +); + +TRACE_EVENT(skb_copy_datagram_iovec, + + TP_PROTO(const struct sk_buff *skb, int len), + + TP_ARGS(skb, len), + + TP_STRUCT__entry( + __field( const void *, skbaddr ) + __field( int, len ) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = len; + ), + + TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len) +); + #endif /* _TRACE_SKB_H */ /* This part must be outside protection */ diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h new file mode 100644 index 000000000000..5a4c04a75b3d --- /dev/null +++ b/include/trace/events/syscalls.h @@ -0,0 +1,75 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM raw_syscalls +#define TRACE_INCLUDE_FILE syscalls + +#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENTS_SYSCALLS_H + +#include <linux/tracepoint.h> + +#include <asm/ptrace.h> +#include <asm/syscall.h> + + +#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS + +extern void syscall_regfunc(void); +extern void syscall_unregfunc(void); + +TRACE_EVENT_FN(sys_enter, + + TP_PROTO(struct pt_regs *regs, long id), + + TP_ARGS(regs, id), + + TP_STRUCT__entry( + __field( long, id ) + __array( unsigned long, args, 6 ) + ), + + TP_fast_assign( + __entry->id = id; + syscall_get_arguments(current, regs, 0, 6, __entry->args); + ), + + TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", + __entry->id, + __entry->args[0], __entry->args[1], __entry->args[2], + __entry->args[3], __entry->args[4], __entry->args[5]), + + syscall_regfunc, syscall_unregfunc +); + +TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY) + +TRACE_EVENT_FN(sys_exit, + + TP_PROTO(struct pt_regs *regs, long ret), + + TP_ARGS(regs, ret), + + TP_STRUCT__entry( + __field( long, id ) + __field( long, ret ) + ), + + TP_fast_assign( + __entry->id = syscall_get_nr(current, regs); + __entry->ret = ret; + ), + + TP_printk("NR %ld = %ld", + __entry->id, __entry->ret), + + syscall_regfunc, syscall_unregfunc +); + +TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY) + +#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ + +#endif /* _TRACE_EVENTS_SYSCALLS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> + diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h new file mode 100644 index 000000000000..425bcfe56c62 --- /dev/null +++ b/include/trace/events/timer.h @@ -0,0 +1,329 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM timer + +#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TIMER_H + +#include <linux/tracepoint.h> +#include <linux/hrtimer.h> +#include <linux/timer.h> + +DECLARE_EVENT_CLASS(timer_class, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer=%p", __entry->timer) +); + +/** + * timer_init - called when the timer is initialized + * @timer: pointer to struct timer_list + */ +DEFINE_EVENT(timer_class, timer_init, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** + * timer_start - called when the timer is started + * @timer: pointer to struct timer_list + * @expires: the timers expiry time + */ +TRACE_EVENT(timer_start, + + TP_PROTO(struct timer_list *timer, unsigned long expires), + + TP_ARGS(timer, expires), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( void *, function ) + __field( unsigned long, expires ) + __field( unsigned long, now ) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->function = timer->function; + __entry->expires = expires; + __entry->now = jiffies; + ), + + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", + __entry->timer, __entry->function, __entry->expires, + (long)__entry->expires - __entry->now) +); + +/** + * timer_expire_entry - called immediately before the timer callback + * @timer: pointer to struct timer_list + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(timer_expire_entry, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field( void *, timer ) + __field( unsigned long, now ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->now = jiffies; + __entry->function = timer->function; + ), + + TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) +); + +/** + * timer_expire_exit - called immediately after the timer callback returns + * @timer: pointer to struct timer_list + * + * When used in combination with the timer_expire_entry tracepoint we can + * determine the runtime of the timer callback function. + * + * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might + * be invalid. We solely track the pointer. + */ +DEFINE_EVENT(timer_class, timer_expire_exit, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** + * timer_cancel - called when the timer is canceled + * @timer: pointer to struct timer_list + */ +DEFINE_EVENT(timer_class, timer_cancel, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** + * hrtimer_init - called when the hrtimer is initialized + * @timer: pointer to struct hrtimer + * @clockid: the hrtimers clock + * @mode: the hrtimers mode + */ +TRACE_EVENT(hrtimer_init, + + TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, + enum hrtimer_mode mode), + + TP_ARGS(hrtimer, clockid, mode), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + __field( clockid_t, clockid ) + __field( enum hrtimer_mode, mode ) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + __entry->clockid = clockid; + __entry->mode = mode; + ), + + TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, + __entry->clockid == CLOCK_REALTIME ? + "CLOCK_REALTIME" : "CLOCK_MONOTONIC", + __entry->mode == HRTIMER_MODE_ABS ? + "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL") +); + +/** + * hrtimer_start - called when the hrtimer is started + * @timer: pointer to struct hrtimer + */ +TRACE_EVENT(hrtimer_start, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + __field( void *, function ) + __field( s64, expires ) + __field( s64, softexpires ) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + __entry->function = hrtimer->function; + __entry->expires = hrtimer_get_expires(hrtimer).tv64; + __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; + ), + + TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", + __entry->hrtimer, __entry->function, + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->expires }), + (unsigned long long)ktime_to_ns((ktime_t) { + .tv64 = __entry->softexpires })) +); + +/** + * htimmer_expire_entry - called immediately before the hrtimer callback + * @timer: pointer to struct hrtimer + * @now: pointer to variable which contains current time of the + * timers base. + * + * Allows to determine the timer latency. + */ +TRACE_EVENT(hrtimer_expire_entry, + + TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), + + TP_ARGS(hrtimer, now), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + __field( s64, now ) + __field( void *, function) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + __entry->now = now->tv64; + __entry->function = hrtimer->function; + ), + + TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, + (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) + ); + +DECLARE_EVENT_CLASS(hrtimer_class, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer), + + TP_STRUCT__entry( + __field( void *, hrtimer ) + ), + + TP_fast_assign( + __entry->hrtimer = hrtimer; + ), + + TP_printk("hrtimer=%p", __entry->hrtimer) +); + +/** + * hrtimer_expire_exit - called immediately after the hrtimer callback returns + * @timer: pointer to struct hrtimer + * + * When used in combination with the hrtimer_expire_entry tracepoint we can + * determine the runtime of the callback function. + */ +DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer) +); + +/** + * hrtimer_cancel - called when the hrtimer is canceled + * @hrtimer: pointer to struct hrtimer + */ +DEFINE_EVENT(hrtimer_class, hrtimer_cancel, + + TP_PROTO(struct hrtimer *hrtimer), + + TP_ARGS(hrtimer) +); + +/** + * itimer_state - called when itimer is started or canceled + * @which: name of the interval timer + * @value: the itimers value, itimer is canceled if value->it_value is + * zero, otherwise it is started + * @expires: the itimers expiry time + */ +TRACE_EVENT(itimer_state, + + TP_PROTO(int which, const struct itimerval *const value, + cputime_t expires), + + TP_ARGS(which, value, expires), + + TP_STRUCT__entry( + __field( int, which ) + __field( cputime_t, expires ) + __field( long, value_sec ) + __field( long, value_usec ) + __field( long, interval_sec ) + __field( long, interval_usec ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->expires = expires; + __entry->value_sec = value->it_value.tv_sec; + __entry->value_usec = value->it_value.tv_usec; + __entry->interval_sec = value->it_interval.tv_sec; + __entry->interval_usec = value->it_interval.tv_usec; + ), + + TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld", + __entry->which, (unsigned long long)__entry->expires, + __entry->value_sec, __entry->value_usec, + __entry->interval_sec, __entry->interval_usec) +); + +/** + * itimer_expire - called when itimer expires + * @which: type of the interval timer + * @pid: pid of the process which owns the timer + * @now: current time, used to calculate the latency of itimer + */ +TRACE_EVENT(itimer_expire, + + TP_PROTO(int which, struct pid *pid, cputime_t now), + + TP_ARGS(which, pid, now), + + TP_STRUCT__entry( + __field( int , which ) + __field( pid_t, pid ) + __field( cputime_t, now ) + ), + + TP_fast_assign( + __entry->which = which; + __entry->now = now; + __entry->pid = pid_nr(pid); + ), + + TP_printk("which=%d pid=%d now=%llu", __entry->which, + (int) __entry->pid, (unsigned long long)__entry->now) +); + +#endif /* _TRACE_TIMER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h new file mode 100644 index 000000000000..c255fcc587bf --- /dev/null +++ b/include/trace/events/vmscan.h @@ -0,0 +1,317 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vmscan + +#if !defined(_TRACE_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_VMSCAN_H + +#include <linux/types.h> +#include <linux/tracepoint.h> +#include "gfpflags.h" + +#define RECLAIM_WB_ANON 0x0001u +#define RECLAIM_WB_FILE 0x0002u +#define RECLAIM_WB_MIXED 0x0010u +#define RECLAIM_WB_SYNC 0x0004u +#define RECLAIM_WB_ASYNC 0x0008u + +#define show_reclaim_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \ + {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \ + {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \ + {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \ + {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \ + ) : "RECLAIM_WB_NONE" + +#define trace_reclaim_flags(page, sync) ( \ + (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ + (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + ) + +#define trace_shrink_flags(file, sync) ( \ + (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ + (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ + (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + ) + +TRACE_EVENT(mm_vmscan_kswapd_sleep, + + TP_PROTO(int nid), + + TP_ARGS(nid), + + TP_STRUCT__entry( + __field( int, nid ) + ), + + TP_fast_assign( + __entry->nid = nid; + ), + + TP_printk("nid=%d", __entry->nid) +); + +TRACE_EVENT(mm_vmscan_kswapd_wake, + + TP_PROTO(int nid, int order), + + TP_ARGS(nid, order), + + TP_STRUCT__entry( + __field( int, nid ) + __field( int, order ) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->order = order; + ), + + TP_printk("nid=%d order=%d", __entry->nid, __entry->order) +); + +TRACE_EVENT(mm_vmscan_wakeup_kswapd, + + TP_PROTO(int nid, int zid, int order), + + TP_ARGS(nid, zid, order), + + TP_STRUCT__entry( + __field( int, nid ) + __field( int, zid ) + __field( int, order ) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->zid = zid; + __entry->order = order; + ), + + TP_printk("nid=%d zid=%d order=%d", + __entry->nid, + __entry->zid, + __entry->order) +); + +DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags), + + TP_STRUCT__entry( + __field( int, order ) + __field( int, may_writepage ) + __field( gfp_t, gfp_flags ) + ), + + TP_fast_assign( + __entry->order = order; + __entry->may_writepage = may_writepage; + __entry->gfp_flags = gfp_flags; + ), + + TP_printk("order=%d may_writepage=%d gfp_flags=%s", + __entry->order, + __entry->may_writepage, + show_gfp_flags(__entry->gfp_flags)) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin, + + TP_PROTO(int order, int may_writepage, gfp_t gfp_flags), + + TP_ARGS(order, may_writepage, gfp_flags) +); + +DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed), + + TP_STRUCT__entry( + __field( unsigned long, nr_reclaimed ) + ), + + TP_fast_assign( + __entry->nr_reclaimed = nr_reclaimed; + ), + + TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed) +); + +DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end, + + TP_PROTO(unsigned long nr_reclaimed), + + TP_ARGS(nr_reclaimed) +); + + +DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + unsigned long nr_lumpy_taken, + unsigned long nr_lumpy_dirty, + unsigned long nr_lumpy_failed, + int isolate_mode), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode), + + TP_STRUCT__entry( + __field(int, order) + __field(unsigned long, nr_requested) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_taken) + __field(unsigned long, nr_lumpy_taken) + __field(unsigned long, nr_lumpy_dirty) + __field(unsigned long, nr_lumpy_failed) + __field(int, isolate_mode) + ), + + TP_fast_assign( + __entry->order = order; + __entry->nr_requested = nr_requested; + __entry->nr_scanned = nr_scanned; + __entry->nr_taken = nr_taken; + __entry->nr_lumpy_taken = nr_lumpy_taken; + __entry->nr_lumpy_dirty = nr_lumpy_dirty; + __entry->nr_lumpy_failed = nr_lumpy_failed; + __entry->isolate_mode = isolate_mode; + ), + + TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu", + __entry->isolate_mode, + __entry->order, + __entry->nr_requested, + __entry->nr_scanned, + __entry->nr_taken, + __entry->nr_lumpy_taken, + __entry->nr_lumpy_dirty, + __entry->nr_lumpy_failed) +); + +DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + unsigned long nr_lumpy_taken, + unsigned long nr_lumpy_dirty, + unsigned long nr_lumpy_failed, + int isolate_mode), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode) + +); + +DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate, + + TP_PROTO(int order, + unsigned long nr_requested, + unsigned long nr_scanned, + unsigned long nr_taken, + unsigned long nr_lumpy_taken, + unsigned long nr_lumpy_dirty, + unsigned long nr_lumpy_failed, + int isolate_mode), + + TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode) + +); + +TRACE_EVENT(mm_vmscan_writepage, + + TP_PROTO(struct page *page, + int reclaim_flags), + + TP_ARGS(page, reclaim_flags), + + TP_STRUCT__entry( + __field(struct page *, page) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->page = page; + __entry->reclaim_flags = reclaim_flags; + ), + + TP_printk("page=%p pfn=%lu flags=%s", + __entry->page, + page_to_pfn(__entry->page), + show_reclaim_flags(__entry->reclaim_flags)) +); + +TRACE_EVENT(mm_vmscan_lru_shrink_inactive, + + TP_PROTO(int nid, int zid, + unsigned long nr_scanned, unsigned long nr_reclaimed, + int priority, int reclaim_flags), + + TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, zid) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_reclaimed) + __field(int, priority) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->zid = zid; + __entry->nr_scanned = nr_scanned; + __entry->nr_reclaimed = nr_reclaimed; + __entry->priority = priority; + __entry->reclaim_flags = reclaim_flags; + ), + + TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s", + __entry->nid, __entry->zid, + __entry->nr_scanned, __entry->nr_reclaimed, + __entry->priority, + show_reclaim_flags(__entry->reclaim_flags)) +); + + +#endif /* _TRACE_VMSCAN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 035f1bff288e..7d497291c85d 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -1,100 +1,121 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM workqueue + #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_WORKQUEUE_H -#include <linux/workqueue.h> -#include <linux/sched.h> #include <linux/tracepoint.h> +#include <linux/workqueue.h> -#undef TRACE_SYSTEM -#define TRACE_SYSTEM workqueue - -TRACE_EVENT(workqueue_insertion, +DECLARE_EVENT_CLASS(workqueue_work, - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), + TP_PROTO(struct work_struct *work), - TP_ARGS(wq_thread, work), + TP_ARGS(work), TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(work_func_t, func) + __field( void *, work ) ), TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->func = work->func; + __entry->work = work; ), - TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, - __entry->thread_pid, __entry->func) + TP_printk("work struct %p", __entry->work) ); -TRACE_EVENT(workqueue_execution, +/** + * workqueue_queue_work - called when a work gets queued + * @req_cpu: the requested cpu + * @cwq: pointer to struct cpu_workqueue_struct + * @work: pointer to struct work_struct + * + * This event occurs when a work is queued immediately or once a + * delayed work is actually queued on a workqueue (ie: once the delay + * has been reached). + */ +TRACE_EVENT(workqueue_queue_work, - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), + TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, + struct work_struct *work), - TP_ARGS(wq_thread, work), + TP_ARGS(req_cpu, cwq, work), TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(work_func_t, func) + __field( void *, work ) + __field( void *, function) + __field( void *, workqueue) + __field( unsigned int, req_cpu ) + __field( unsigned int, cpu ) ), TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->func = work->func; + __entry->work = work; + __entry->function = work->func; + __entry->workqueue = cwq->wq; + __entry->req_cpu = req_cpu; + __entry->cpu = cwq->gcwq->cpu; ), - TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, - __entry->thread_pid, __entry->func) + TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", + __entry->work, __entry->function, __entry->workqueue, + __entry->req_cpu, __entry->cpu) ); -/* Trace the creation of one workqueue thread on a cpu */ -TRACE_EVENT(workqueue_creation, +/** + * workqueue_activate_work - called when a work gets activated + * @work: pointer to struct work_struct + * + * This event occurs when a queued work is put on the active queue, + * which happens immediately after queueing unless @max_active limit + * is reached. + */ +DEFINE_EVENT(workqueue_work, workqueue_activate_work, - TP_PROTO(struct task_struct *wq_thread, int cpu), + TP_PROTO(struct work_struct *work), - TP_ARGS(wq_thread, cpu), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(int, cpu) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->cpu = cpu; - ), - - TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, - __entry->thread_pid, __entry->cpu) + TP_ARGS(work) ); -TRACE_EVENT(workqueue_destruction, +/** + * workqueue_execute_start - called immediately before the workqueue callback + * @work: pointer to struct work_struct + * + * Allows to track workqueue execution. + */ +TRACE_EVENT(workqueue_execute_start, - TP_PROTO(struct task_struct *wq_thread), + TP_PROTO(struct work_struct *work), - TP_ARGS(wq_thread), + TP_ARGS(work), TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) + __field( void *, work ) + __field( void *, function) ), TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; + __entry->work = work; + __entry->function = work->func; ), - TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) + TP_printk("work struct %p: function %pf", __entry->work, __entry->function) +); + +/** + * workqueue_execute_end - called immediately before the workqueue callback + * @work: pointer to struct work_struct + * + * Allows to track workqueue execution. + */ +DEFINE_EVENT(workqueue_work, workqueue_execute_end, + + TP_PROTO(struct work_struct *work), + + TP_ARGS(work) ); -#endif /* _TRACE_WORKQUEUE_H */ +#endif /* _TRACE_WORKQUEUE_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h new file mode 100644 index 000000000000..89a2b2db4375 --- /dev/null +++ b/include/trace/events/writeback.h @@ -0,0 +1,192 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM writeback + +#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_WRITEBACK_H + +#include <linux/backing-dev.h> +#include <linux/device.h> +#include <linux/writeback.h> + +struct wb_writeback_work; + +DECLARE_EVENT_CLASS(writeback_work_class, + TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), + TP_ARGS(bdi, work), + TP_STRUCT__entry( + __array(char, name, 32) + __field(long, nr_pages) + __field(dev_t, sb_dev) + __field(int, sync_mode) + __field(int, for_kupdate) + __field(int, range_cyclic) + __field(int, for_background) + ), + TP_fast_assign( + strncpy(__entry->name, dev_name(bdi->dev), 32); + __entry->nr_pages = work->nr_pages; + __entry->sb_dev = work->sb ? work->sb->s_dev : 0; + __entry->sync_mode = work->sync_mode; + __entry->for_kupdate = work->for_kupdate; + __entry->range_cyclic = work->range_cyclic; + __entry->for_background = work->for_background; + ), + TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " + "kupdate=%d range_cyclic=%d background=%d", + __entry->name, + MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), + __entry->nr_pages, + __entry->sync_mode, + __entry->for_kupdate, + __entry->range_cyclic, + __entry->for_background + ) +); +#define DEFINE_WRITEBACK_WORK_EVENT(name) \ +DEFINE_EVENT(writeback_work_class, name, \ + TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ + TP_ARGS(bdi, work)) +DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread); +DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); +DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); + +TRACE_EVENT(writeback_pages_written, + TP_PROTO(long pages_written), + TP_ARGS(pages_written), + TP_STRUCT__entry( + __field(long, pages) + ), + TP_fast_assign( + __entry->pages = pages_written; + ), + TP_printk("%ld", __entry->pages) +); + +DECLARE_EVENT_CLASS(writeback_class, + TP_PROTO(struct backing_dev_info *bdi), + TP_ARGS(bdi), + TP_STRUCT__entry( + __array(char, name, 32) + ), + TP_fast_assign( + strncpy(__entry->name, dev_name(bdi->dev), 32); + ), + TP_printk("bdi %s", + __entry->name + ) +); +#define DEFINE_WRITEBACK_EVENT(name) \ +DEFINE_EVENT(writeback_class, name, \ + TP_PROTO(struct backing_dev_info *bdi), \ + TP_ARGS(bdi)) + +DEFINE_WRITEBACK_EVENT(writeback_nowork); +DEFINE_WRITEBACK_EVENT(writeback_wake_thread); +DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread); +DEFINE_WRITEBACK_EVENT(writeback_bdi_register); +DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); +DEFINE_WRITEBACK_EVENT(writeback_thread_start); +DEFINE_WRITEBACK_EVENT(writeback_thread_stop); + +DECLARE_EVENT_CLASS(wbc_class, + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), + TP_ARGS(wbc, bdi), + TP_STRUCT__entry( + __array(char, name, 32) + __field(long, nr_to_write) + __field(long, pages_skipped) + __field(int, sync_mode) + __field(int, for_kupdate) + __field(int, for_background) + __field(int, for_reclaim) + __field(int, range_cyclic) + __field(int, more_io) + __field(unsigned long, older_than_this) + __field(long, range_start) + __field(long, range_end) + ), + + TP_fast_assign( + strncpy(__entry->name, dev_name(bdi->dev), 32); + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->sync_mode = wbc->sync_mode; + __entry->for_kupdate = wbc->for_kupdate; + __entry->for_background = wbc->for_background; + __entry->for_reclaim = wbc->for_reclaim; + __entry->range_cyclic = wbc->range_cyclic; + __entry->more_io = wbc->more_io; + __entry->older_than_this = wbc->older_than_this ? + *wbc->older_than_this : 0; + __entry->range_start = (long)wbc->range_start; + __entry->range_end = (long)wbc->range_end; + ), + + TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " + "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx " + "start=0x%lx end=0x%lx", + __entry->name, + __entry->nr_to_write, + __entry->pages_skipped, + __entry->sync_mode, + __entry->for_kupdate, + __entry->for_background, + __entry->for_reclaim, + __entry->range_cyclic, + __entry->more_io, + __entry->older_than_this, + __entry->range_start, + __entry->range_end) +) + +#define DEFINE_WBC_EVENT(name) \ +DEFINE_EVENT(wbc_class, name, \ + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ + TP_ARGS(wbc, bdi)) +DEFINE_WBC_EVENT(wbc_writeback_start); +DEFINE_WBC_EVENT(wbc_writeback_written); +DEFINE_WBC_EVENT(wbc_writeback_wait); +DEFINE_WBC_EVENT(wbc_balance_dirty_start); +DEFINE_WBC_EVENT(wbc_balance_dirty_written); +DEFINE_WBC_EVENT(wbc_balance_dirty_wait); +DEFINE_WBC_EVENT(wbc_writepage); + +DECLARE_EVENT_CLASS(writeback_congest_waited_template, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed), + + TP_STRUCT__entry( + __field( unsigned int, usec_timeout ) + __field( unsigned int, usec_delayed ) + ), + + TP_fast_assign( + __entry->usec_timeout = usec_timeout; + __entry->usec_delayed = usec_delayed; + ), + + TP_printk("usec_timeout=%u usec_delayed=%u", + __entry->usec_timeout, + __entry->usec_delayed) +); + +DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed) +); + +DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed) +); + +#endif /* _TRACE_WRITEBACK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1867553c61e5..e16610c208c9 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -18,14 +18,37 @@ #include <linux/ftrace_event.h> +/* + * DECLARE_EVENT_CLASS can be used to add a generic function + * handlers for events. That is, if all events have the same + * parameters and just have distinct trace points. + * Each tracepoint can be defined with DEFINE_EVENT and that + * will map the DECLARE_EVENT_CLASS to the tracepoint. + * + * TRACE_EVENT is a one to one mapping between tracepoint and template. + */ +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ + DECLARE_EVENT_CLASS(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)); \ + DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); + + #undef __field #define __field(type, item) type item; +#undef __field_ext +#define __field_ext(type, item, filter_type) type item; + #undef __array #define __array(type, item, len) type item[len]; #undef __dynamic_array -#define __dynamic_array(type, item, len) unsigned short __data_loc_##item; +#define __dynamic_array(type, item, len) u32 __data_loc_##item; #undef __string #define __string(item, src) __dynamic_array(char, item, -1) @@ -33,14 +56,35 @@ #undef TP_STRUCT__entry #define TP_STRUCT__entry(args...) args -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - struct ftrace_raw_##name { \ - struct trace_entry ent; \ - tstruct \ - char __data[0]; \ - }; \ - static struct ftrace_event_call event_##name +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ + struct ftrace_raw_##name { \ + struct trace_entry ent; \ + tstruct \ + char __data[0]; \ + }; \ + \ + static struct ftrace_event_class event_class_##name; + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ + static struct ftrace_event_call __used \ + __attribute__((__aligned__(4))) event_##name + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +/* Callbacks are meaningless to ftrace. */ +#undef TRACE_EVENT_FN +#define TRACE_EVENT_FN(name, proto, args, tstruct, \ + assign, print, reg, unreg) \ + TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ + PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ + +#undef TRACE_EVENT_FLAGS +#define TRACE_EVENT_FLAGS(name, value) \ + __TRACE_EVENT_FLAGS(name, value) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -51,113 +95,46 @@ * Include the following: * * struct ftrace_data_offsets_<call> { - * int <item1>; - * int <item2>; + * u32 <item1>; + * u32 <item2>; * [...] * }; * - * The __dynamic_array() macro will create each int <item>, this is + * The __dynamic_array() macro will create each u32 <item>, this is * to keep the offset of each array from the beginning of the event. + * The size of an array is also encoded, in the higher 16 bits of <item>. */ #undef __field -#define __field(type, item); +#define __field(type, item) + +#undef __field_ext +#define __field_ext(type, item, filter_type) #undef __array #define __array(type, item, len) #undef __dynamic_array -#define __dynamic_array(type, item, len) int item; +#define __dynamic_array(type, item, len) u32 item; #undef __string #define __string(item, src) __dynamic_array(char, item, -1) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ struct ftrace_data_offsets_##call { \ tstruct; \ }; -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) -/* - * Setup the showing format of trace point. - * - * int - * ftrace_format_##call(struct trace_seq *s) - * { - * struct ftrace_raw_##call field; - * int ret; - * - * ret = trace_seq_printf(s, #type " " #item ";" - * " offset:%u; size:%u;\n", - * offsetof(struct ftrace_raw_##call, item), - * sizeof(field.type)); - * - * } - */ +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args - -#undef __field -#define __field(type, item) \ - ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%u;\tsize:%u;\n", \ - (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ - if (!ret) \ - return 0; - -#undef __array -#define __array(type, item, len) \ - ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%u;\tsize:%u;\n", \ - (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ - if (!ret) \ - return 0; - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ - "offset:%u;\tsize:%u;\n", \ - (unsigned int)offsetof(typeof(field), \ - __data_loc_##item), \ - (unsigned int)sizeof(field.__data_loc_##item)); \ - if (!ret) \ - return 0; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef __entry -#define __entry REC - -#undef __print_symbolic -#undef __get_dynamic_array -#undef __get_str - -#undef TP_printk -#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) - -#undef TP_fast_assign -#define TP_fast_assign(args...) args - -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ -static int \ -ftrace_format_##call(struct trace_seq *s) \ -{ \ - struct ftrace_raw_##call field __attribute__((unused)); \ - int ret = 0; \ - \ - tstruct; \ - \ - trace_seq_printf(s, "\nprint fmt: " print); \ - \ - return ret; \ -} +#undef TRACE_EVENT_FLAGS +#define TRACE_EVENT_FLAGS(event, flag) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -172,22 +149,22 @@ ftrace_format_##call(struct trace_seq *s) \ * struct trace_seq *s = &iter->seq; * struct ftrace_raw_<call> *field; <-- defined in stage 1 * struct trace_entry *entry; - * struct trace_seq *p; + * struct trace_seq *p = &iter->tmp_seq; * int ret; * * entry = iter->ent; * - * if (entry->type != event_<call>.id) { + * if (entry->type != event_<call>->event.type) { * WARN_ON_ONCE(1); * return TRACE_TYPE_UNHANDLED; * } * * field = (typeof(field))entry; * - * p = get_cpu_var(ftrace_event_seq); * trace_seq_init(p); - * ret = trace_seq_printf(s, <TP_printk> "\n"); - * put_cpu(); + * ret = trace_seq_printf(s, "%s: ", <call>); + * if (ret) + * ret = trace_seq_printf(s, <TP_printk> "\n"); * if (!ret) * return TRACE_TYPE_PARTIAL_LINE; * @@ -207,7 +184,7 @@ ftrace_format_##call(struct trace_seq *s) \ #undef __get_dynamic_array #define __get_dynamic_array(field) \ - ((void *)__entry + __entry->__data_loc_##field) + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) #undef __get_str #define __get_str(field) (char *)__get_dynamic_array(field) @@ -215,9 +192,9 @@ ftrace_format_##call(struct trace_seq *s) \ #undef __print_flags #define __print_flags(flag, delim, flag_array...) \ ({ \ - static const struct trace_print_flags flags[] = \ + static const struct trace_print_flags __flags[] = \ { flag_array, { -1, NULL }}; \ - ftrace_print_flags_seq(p, delim, flag, flags); \ + ftrace_print_flags_seq(p, delim, flag, __flags); \ }) #undef __print_symbolic @@ -228,84 +205,141 @@ ftrace_format_##call(struct trace_seq *s) \ ftrace_print_symbols_seq(p, value, symbols); \ }) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ -enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ +#undef __print_hex +#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace enum print_line_t \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *trace_event) \ { \ + struct ftrace_event_call *event; \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##call *field; \ struct trace_entry *entry; \ - struct trace_seq *p; \ + struct trace_seq *p = &iter->tmp_seq; \ int ret; \ \ + event = container_of(trace_event, struct ftrace_event_call, \ + event); \ + \ entry = iter->ent; \ \ - if (entry->type != event_##call.id) { \ + if (entry->type != event->event.type) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ \ field = (typeof(field))entry; \ \ - p = &get_cpu_var(ftrace_event_seq); \ trace_seq_init(p); \ - ret = trace_seq_printf(s, #call ": " print); \ - put_cpu(); \ + ret = trace_seq_printf(s, "%s: ", event->name); \ + if (ret) \ + ret = trace_seq_printf(s, print); \ if (!ret) \ return TRACE_TYPE_PARTIAL_LINE; \ \ return TRACE_TYPE_HANDLED; \ -} - +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ +static notrace enum print_line_t \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *event) \ +{ \ + struct trace_seq *s = &iter->seq; \ + struct ftrace_raw_##template *field; \ + struct trace_entry *entry; \ + struct trace_seq *p = &iter->tmp_seq; \ + int ret; \ + \ + entry = iter->ent; \ + \ + if (entry->type != event_##call.event.type) { \ + WARN_ON_ONCE(1); \ + return TRACE_TYPE_UNHANDLED; \ + } \ + \ + field = (typeof(field))entry; \ + \ + trace_seq_init(p); \ + ret = trace_seq_printf(s, "%s: ", #call); \ + if (ret) \ + ret = trace_seq_printf(s, print); \ + if (!ret) \ + return TRACE_TYPE_PARTIAL_LINE; \ + \ + return TRACE_TYPE_HANDLED; \ +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef __field -#define __field(type, item) \ +#undef __field_ext +#define __field_ext(type, item, filter_type) \ ret = trace_define_field(event_call, #type, #item, \ offsetof(typeof(field), item), \ - sizeof(field.item), is_signed_type(type)); \ + sizeof(field.item), \ + is_signed_type(type), filter_type); \ if (ret) \ return ret; +#undef __field +#define __field(type, item) __field_ext(type, item, FILTER_OTHER) + #undef __array #define __array(type, item, len) \ - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ - ret = trace_define_field(event_call, #type "[" #len "]", #item, \ + do { \ + mutex_lock(&event_storage_mutex); \ + BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ + snprintf(event_storage, sizeof(event_storage), \ + "%s[%d]", #type, len); \ + ret = trace_define_field(event_call, event_storage, #item, \ offsetof(typeof(field), item), \ - sizeof(field.item), 0); \ - if (ret) \ - return ret; + sizeof(field.item), \ + is_signed_type(type), FILTER_OTHER); \ + mutex_unlock(&event_storage_mutex); \ + if (ret) \ + return ret; \ + } while (0); #undef __dynamic_array #define __dynamic_array(type, item, len) \ - ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ - offsetof(typeof(field), __data_loc_##item), \ - sizeof(field.__data_loc_##item), 0); + ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ + offsetof(typeof(field), __data_loc_##item), \ + sizeof(field.__data_loc_##item), \ + is_signed_type(type), FILTER_OTHER); #undef __string #define __string(item, src) __dynamic_array(char, item, -1) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ -int \ -ftrace_define_fields_##call(void) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ +static int notrace \ +ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ { \ struct ftrace_raw_##call field; \ - struct ftrace_event_call *event_call = &event_##call; \ int ret; \ \ - __common_field(int, type, 1); \ - __common_field(unsigned char, flags, 0); \ - __common_field(unsigned char, preempt_count, 0); \ - __common_field(int, pid, 1); \ - __common_field(int, tgid, 1); \ - \ tstruct; \ \ return ret; \ } +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -318,6 +352,9 @@ ftrace_define_fields_##call(void) \ #undef __field #define __field(type, item) +#undef __field_ext +#define __field_ext(type, item, filter_type) + #undef __array #define __array(type, item, len) @@ -325,14 +362,15 @@ ftrace_define_fields_##call(void) \ #define __dynamic_array(type, item, len) \ __data_offsets->item = __data_size + \ offsetof(typeof(*entry), __data); \ + __data_offsets->item |= (len * sizeof(type)) << 16; \ __data_size += (len) * sizeof(type); #undef __string -#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ +#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ -static inline int ftrace_get_offsets_##call( \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static inline notrace int ftrace_get_offsets_##call( \ struct ftrace_data_offsets_##call *__data_offsets, proto) \ { \ int __data_size = 0; \ @@ -343,6 +381,13 @@ static inline int ftrace_get_offsets_##call( \ return __data_size; \ } +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -350,135 +395,81 @@ static inline int ftrace_get_offsets_##call( \ * * Override the macros in <trace/trace_events.h> to include the following: * - * static void ftrace_event_<call>(proto) - * { - * event_trace_printk(_RET_IP_, "<call>: " <fmt>); - * } - * - * static int ftrace_reg_event_<call>(void) - * { - * int ret; - * - * ret = register_trace_<call>(ftrace_event_<call>); - * if (!ret) - * pr_info("event trace: Could not activate trace point " - * "probe to <call>"); - * return ret; - * } - * - * static void ftrace_unreg_event_<call>(void) - * { - * unregister_trace_<call>(ftrace_event_<call>); - * } - * - * * For those macros defined with TRACE_EVENT: * * static struct ftrace_event_call event_<call>; * - * static void ftrace_raw_event_<call>(proto) + * static void ftrace_raw_event_<call>(void *__data, proto) * { + * struct ftrace_event_call *event_call = __data; + * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ring_buffer_event *event; * struct ftrace_raw_<call> *entry; <-- defined in stage 1 + * struct ring_buffer *buffer; * unsigned long irq_flags; + * int __data_size; * int pc; * * local_save_flags(irq_flags); * pc = preempt_count(); * - * event = trace_current_buffer_lock_reserve(event_<call>.id, - * sizeof(struct ftrace_raw_<call>), + * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); + * + * event = trace_current_buffer_lock_reserve(&buffer, + * event_<call>->event.type, + * sizeof(*entry) + __data_size, * irq_flags, pc); * if (!event) * return; * entry = ring_buffer_event_data(event); * - * <assign>; <-- Here we assign the entries by the __field and - * __array macros. - * - * trace_current_buffer_unlock_commit(event, irq_flags, pc); - * } - * - * static int ftrace_raw_reg_event_<call>(void) - * { - * int ret; - * - * ret = register_trace_<call>(ftrace_raw_event_<call>); - * if (!ret) - * pr_info("event trace: Could not activate trace point " - * "probe to <call>"); - * return ret; - * } + * { <assign>; } <-- Here we assign the entries by the __field and + * __array macros. * - * static void ftrace_unreg_event_<call>(void) - * { - * unregister_trace_<call>(ftrace_raw_event_<call>); + * if (!filter_current_check_discard(buffer, event_call, entry, event)) + * trace_current_buffer_unlock_commit(buffer, + * event, irq_flags, pc); * } * * static struct trace_event ftrace_event_type_<call> = { * .trace = ftrace_raw_output_<call>, <-- stage 2 * }; * - * static int ftrace_raw_init_event_<call>(void) - * { - * int id; + * static const char print_fmt_<call>[] = <TP_printk>; * - * id = register_ftrace_event(&ftrace_event_type_<call>); - * if (!id) - * return -ENODEV; - * event_<call>.id = id; - * return 0; - * } + * static struct ftrace_event_class __used event_class_<template> = { + * .system = "<system>", + * .define_fields = ftrace_define_fields_<call>, + * .fields = LIST_HEAD_INIT(event_class_##call.fields), + * .raw_init = trace_event_raw_init, + * .probe = ftrace_raw_event_##call, + * .reg = ftrace_event_reg, + * }; * * static struct ftrace_event_call __used * __attribute__((__aligned__(4))) * __attribute__((section("_ftrace_events"))) event_<call> = { * .name = "<call>", - * .system = "<system>", - * .raw_init = ftrace_raw_init_event_<call>, - * .regfunc = ftrace_reg_event_<call>, - * .unregfunc = ftrace_unreg_event_<call>, - * .show_format = ftrace_format_<call>, - * } + * .class = event_class_<template>, + * .event = &ftrace_event_type_<call>, + * .print_fmt = print_fmt_<call>, + * }; * */ -#undef TP_FMT -#define TP_FMT(fmt, args...) fmt "\n", ##args +#ifdef CONFIG_PERF_EVENTS -#ifdef CONFIG_EVENT_PROFILE -#define _TRACE_PROFILE(call, proto, args) \ -static void ftrace_profile_##call(proto) \ -{ \ - extern void perf_tpcounter_event(int); \ - perf_tpcounter_event(event_##call.id); \ -} \ - \ -static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ -{ \ - int ret = 0; \ - \ - if (!atomic_inc_return(&event_call->profile_count)) \ - ret = register_trace_##call(ftrace_profile_##call); \ - \ - return ret; \ -} \ - \ -static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ -{ \ - if (atomic_add_negative(-1, &event_call->profile_count)) \ - unregister_trace_##call(ftrace_profile_##call); \ -} +#define _TRACE_PERF_PROTO(call, proto) \ + static notrace void \ + perf_trace_##call(void *__data, proto); -#define _TRACE_PROFILE_INIT(call) \ - .profile_count = ATOMIC_INIT(-1), \ - .profile_enable = ftrace_profile_enable_##call, \ - .profile_disable = ftrace_profile_disable_##call, +#define _TRACE_PERF_INIT(call) \ + .perf_probe = perf_trace_##call, #else -#define _TRACE_PROFILE(call, proto, args) -#define _TRACE_PROFILE_INIT(call) -#endif +#define _TRACE_PERF_PROTO(call, proto) +#define _TRACE_PERF_INIT(call) +#endif /* CONFIG_PERF_EVENTS */ #undef __entry #define __entry entry @@ -500,18 +491,23 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ #define __assign_str(dst, src) \ strcpy(__get_str(dst), src); -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ -_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ - \ -static struct ftrace_event_call event_##call; \ +#undef TP_fast_assign +#define TP_fast_assign(args...) args + +#undef TP_perf_assign +#define TP_perf_assign(args...) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ \ -static void ftrace_raw_event_##call(proto) \ +static notrace void \ +ftrace_raw_event_##call(void *__data, proto) \ { \ + struct ftrace_event_call *event_call = __data; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ - struct ftrace_event_call *event_call = &event_##call; \ struct ring_buffer_event *event; \ struct ftrace_raw_##call *entry; \ + struct ring_buffer *buffer; \ unsigned long irq_flags; \ int __data_size; \ int pc; \ @@ -521,71 +517,244 @@ static void ftrace_raw_event_##call(proto) \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ \ - event = trace_current_buffer_lock_reserve(event_##call.id, \ + event = trace_current_buffer_lock_reserve(&buffer, \ + event_call->event.type, \ sizeof(*entry) + __data_size, \ irq_flags, pc); \ if (!event) \ return; \ entry = ring_buffer_event_data(event); \ \ - \ tstruct \ \ { assign; } \ \ - if (!filter_current_check_discard(event_call, entry, event)) \ - trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ -} \ - \ -static int ftrace_raw_reg_event_##call(void) \ -{ \ - int ret; \ - \ - ret = register_trace_##call(ftrace_raw_event_##call); \ - if (ret) \ - pr_info("event trace: Could not activate trace point " \ - "probe to " #call "\n"); \ - return ret; \ -} \ - \ -static void ftrace_raw_unreg_event_##call(void) \ + if (!filter_current_check_discard(buffer, event_call, entry, event)) \ + trace_nowake_buffer_unlock_commit(buffer, \ + event, irq_flags, pc); \ +} +/* + * The ftrace_test_probe is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the ftrace probe will + * fail to compile unless it too is updated. + */ + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ +static inline void ftrace_test_probe_##call(void) \ { \ - unregister_trace_##call(ftrace_raw_event_##call); \ -} \ + check_trace_callback_type_##call(ftrace_raw_event_##template); \ +} + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef __entry +#define __entry REC + +#undef __print_flags +#undef __print_symbolic +#undef __get_dynamic_array +#undef __get_str + +#undef TP_printk +#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +_TRACE_PERF_PROTO(call, PARAMS(proto)); \ +static const char print_fmt_##call[] = print; \ +static struct ftrace_event_class __used event_class_##call = { \ + .system = __stringify(TRACE_SYSTEM), \ + .define_fields = ftrace_define_fields_##call, \ + .fields = LIST_HEAD_INIT(event_class_##call.fields),\ + .raw_init = trace_event_raw_init, \ + .probe = ftrace_raw_event_##call, \ + .reg = ftrace_event_reg, \ + _TRACE_PERF_INIT(call) \ +}; + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ \ -static struct trace_event ftrace_event_type_##call = { \ - .trace = ftrace_raw_output_##call, \ -}; \ +static struct ftrace_event_call __used \ +__attribute__((__aligned__(4))) \ +__attribute__((section("_ftrace_events"))) event_##call = { \ + .name = #call, \ + .class = &event_class_##template, \ + .event.funcs = &ftrace_event_type_funcs_##template, \ + .print_fmt = print_fmt_##template, \ +}; + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ \ -static int ftrace_raw_init_event_##call(void) \ -{ \ - int id; \ - \ - id = register_ftrace_event(&ftrace_event_type_##call); \ - if (!id) \ - return -ENODEV; \ - event_##call.id = id; \ - INIT_LIST_HEAD(&event_##call.fields); \ - init_preds(&event_##call); \ - return 0; \ -} \ +static const char print_fmt_##call[] = print; \ \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ - .system = __stringify(TRACE_SYSTEM), \ - .event = &ftrace_event_type_##call, \ - .raw_init = ftrace_raw_init_event_##call, \ - .regfunc = ftrace_raw_reg_event_##call, \ - .unregfunc = ftrace_raw_unreg_event_##call, \ - .show_format = ftrace_format_##call, \ - .define_fields = ftrace_define_fields_##call, \ - _TRACE_PROFILE_INIT(call) \ + .class = &event_class_##template, \ + .event.funcs = &ftrace_event_type_funcs_##call, \ + .print_fmt = print_fmt_##call, \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef _TRACE_PROFILE +/* + * Define the insertion callback to perf events + * + * The job is very similar to ftrace_raw_event_<call> except that we don't + * insert in the ring buffer but in a perf counter. + * + * static void ftrace_perf_<call>(proto) + * { + * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; + * struct ftrace_event_call *event_call = &event_<call>; + * extern void perf_tp_event(int, u64, u64, void *, int); + * struct ftrace_raw_##call *entry; + * struct perf_trace_buf *trace_buf; + * u64 __addr = 0, __count = 1; + * unsigned long irq_flags; + * struct trace_entry *ent; + * int __entry_size; + * int __data_size; + * int __cpu + * int pc; + * + * pc = preempt_count(); + * + * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); + * + * // Below we want to get the aligned size by taking into account + * // the u32 field that will later store the buffer size + * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), + * sizeof(u64)); + * __entry_size -= sizeof(u32); + * + * // Protect the non nmi buffer + * // This also protects the rcu read side + * local_irq_save(irq_flags); + * __cpu = smp_processor_id(); + * + * if (in_nmi()) + * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); + * else + * trace_buf = rcu_dereference_sched(perf_trace_buf); + * + * if (!trace_buf) + * goto end; + * + * trace_buf = per_cpu_ptr(trace_buf, __cpu); + * + * // Avoid recursion from perf that could mess up the buffer + * if (trace_buf->recursion++) + * goto end_recursion; + * + * raw_data = trace_buf->buf; + * + * // Make recursion update visible before entering perf_tp_event + * // so that we protect from perf recursions. + * + * barrier(); + * + * //zero dead bytes from alignment to avoid stack leak to userspace: + * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; + * entry = (struct ftrace_raw_<call> *)raw_data; + * ent = &entry->ent; + * tracing_generic_entry_update(ent, irq_flags, pc); + * ent->type = event_call->id; + * + * <tstruct> <- do some jobs with dynamic arrays + * + * <assign> <- affect our values + * + * perf_tp_event(event_call->id, __addr, __count, entry, + * __entry_size); <- submit them to perf counter + * + * } + */ + +#ifdef CONFIG_PERF_EVENTS + +#undef __entry +#define __entry entry + +#undef __get_dynamic_array +#define __get_dynamic_array(field) \ + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) + +#undef __get_str +#define __get_str(field) (char *)__get_dynamic_array(field) + +#undef __perf_addr +#define __perf_addr(a) __addr = (a) + +#undef __perf_count +#define __perf_count(c) __count = (c) + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace void \ +perf_trace_##call(void *__data, proto) \ +{ \ + struct ftrace_event_call *event_call = __data; \ + struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ + struct ftrace_raw_##call *entry; \ + struct pt_regs __regs; \ + u64 __addr = 0, __count = 1; \ + struct hlist_head *head; \ + int __entry_size; \ + int __data_size; \ + int rctx; \ + \ + perf_fetch_caller_regs(&__regs); \ + \ + __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ + __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ + sizeof(u64)); \ + __entry_size -= sizeof(u32); \ + \ + if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ + "profile buffer not large enough")) \ + return; \ + \ + entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ + __entry_size, event_call->event.type, &__regs, &rctx); \ + if (!entry) \ + return; \ + \ + tstruct \ + \ + { assign; } \ + \ + head = this_cpu_ptr(event_call->perf_events); \ + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ + __count, &__regs, head); \ +} + +/* + * This part is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the + * perf probe will fail to compile unless it too is updated. + */ +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ +static inline void perf_test_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(perf_trace_##template); \ +} + + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#endif /* CONFIG_PERF_EVENTS */ + #undef _TRACE_PROFILE_INIT diff --git a/include/trace/power.h b/include/trace/power.h deleted file mode 100644 index ef204666e983..000000000000 --- a/include/trace/power.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef _TRACE_POWER_H -#define _TRACE_POWER_H - -#include <linux/ktime.h> -#include <linux/tracepoint.h> - -enum { - POWER_NONE = 0, - POWER_CSTATE = 1, - POWER_PSTATE = 2, -}; - -struct power_trace { - ktime_t stamp; - ktime_t end; - int type; - int state; -}; - -DECLARE_TRACE(power_start, - TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state), - TP_ARGS(it, type, state)); - -DECLARE_TRACE(power_mark, - TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state), - TP_ARGS(it, type, state)); - -DECLARE_TRACE(power_end, - TP_PROTO(struct power_trace *it), - TP_ARGS(it)); - -#endif /* _TRACE_POWER_H */ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 8cfe515cbc47..31966a4fb8cc 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -1,35 +1,57 @@ #ifndef _TRACE_SYSCALL_H #define _TRACE_SYSCALL_H +#include <linux/tracepoint.h> +#include <linux/unistd.h> +#include <linux/ftrace_event.h> + #include <asm/ptrace.h> + /* * A syscall entry in the ftrace syscalls array. * * @name: name of the syscall + * @syscall_nr: number of the syscall * @nb_args: number of parameters it takes * @types: list of types as strings * @args: list of args as strings (args[i] matches types[i]) + * @enter_event: associated syscall_enter trace event + * @exit_event: associated syscall_exit trace event */ struct syscall_metadata { const char *name; + int syscall_nr; int nb_args; const char **types; const char **args; + struct list_head enter_fields; + + struct ftrace_event_call *enter_event; + struct ftrace_event_call *exit_event; }; #ifdef CONFIG_FTRACE_SYSCALLS -extern void arch_init_ftrace_syscalls(void); -extern struct syscall_metadata *syscall_nr_to_meta(int nr); -extern void start_ftrace_syscalls(void); -extern void stop_ftrace_syscalls(void); -extern void ftrace_syscall_enter(struct pt_regs *regs); -extern void ftrace_syscall_exit(struct pt_regs *regs); -#else -static inline void start_ftrace_syscalls(void) { } -static inline void stop_ftrace_syscalls(void) { } -static inline void ftrace_syscall_enter(struct pt_regs *regs) { } -static inline void ftrace_syscall_exit(struct pt_regs *regs) { } +extern unsigned long arch_syscall_addr(int nr); +extern int init_syscall_trace(struct ftrace_event_call *call); + +extern int reg_event_syscall_enter(struct ftrace_event_call *call); +extern void unreg_event_syscall_enter(struct ftrace_event_call *call); +extern int reg_event_syscall_exit(struct ftrace_event_call *call); +extern void unreg_event_syscall_exit(struct ftrace_event_call *call); +extern int +ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); +enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, + struct trace_event *event); +enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, + struct trace_event *event); +#endif + +#ifdef CONFIG_PERF_EVENTS +int perf_sysenter_enable(struct ftrace_event_call *call); +void perf_sysenter_disable(struct ftrace_event_call *call); +int perf_sysexit_enable(struct ftrace_event_call *call); +void perf_sysexit_disable(struct ftrace_event_call *call); #endif #endif /* _TRACE_SYSCALL_H */ |