summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/bpf_probe.h6
-rw-r--r--include/trace/define_custom_trace.h77
-rw-r--r--include/trace/events/block.h49
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/trace/events/compaction.h26
-rw-r--r--include/trace/events/ext4.h328
-rw-r--r--include/trace/events/io_uring.h333
-rw-r--r--include/trace/events/random.h233
-rw-r--r--include/trace/events/rcu.h9
-rw-r--r--include/trace/events/sched.h11
-rw-r--r--include/trace/events/scmi.h28
-rw-r--r--include/trace/events/sunrpc.h244
-rw-r--r--include/trace/events/vmscan.h10
-rw-r--r--include/trace/events/writeback.h28
-rw-r--r--include/trace/perf.h6
-rw-r--r--include/trace/stages/init.h37
-rw-r--r--include/trace/stages/stage1_defines.h51
-rw-r--r--include/trace/stages/stage2_defines.h54
-rw-r--r--include/trace/stages/stage3_defines.h135
-rw-r--r--include/trace/stages/stage4_defines.h63
-rw-r--r--include/trace/stages/stage5_defines.h89
-rw-r--r--include/trace/stages/stage6_defines.h106
-rw-r--r--include/trace/stages/stage7_defines.h36
-rw-r--r--include/trace/trace_custom_events.h221
-rw-r--r--include/trace/trace_events.h499
25 files changed, 1498 insertions, 1182 deletions
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 7660a7846586..6a13220d2d27 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -21,6 +21,9 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
#undef __get_rel_dynamic_array
#define __get_rel_dynamic_array(field) \
((void *)(&__entry->__rel_loc_##field) + \
@@ -37,6 +40,9 @@
#undef __get_rel_bitmask
#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
#undef __perf_count
#define __perf_count(c) (c)
diff --git a/include/trace/define_custom_trace.h b/include/trace/define_custom_trace.h
new file mode 100644
index 000000000000..5827a4c92c74
--- /dev/null
+++ b/include/trace/define_custom_trace.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace files that want to automate creation of all tracepoints defined
+ * in their file should include this file. The following are macros that the
+ * trace file may define:
+ *
+ * TRACE_SYSTEM defines the system the tracepoint is for
+ *
+ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
+ * This macro may be defined to tell define_trace.h what file to include.
+ * Note, leave off the ".h".
+ *
+ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
+ * then this macro can define the path to use. Note, the path is relative to
+ * define_trace.h, not the file including it. Full path names for out of tree
+ * modules must be used.
+ */
+
+#ifdef CREATE_CUSTOM_TRACE_EVENTS
+
+/* Prevent recursion */
+#undef CREATE_CUSTOM_TRACE_EVENTS
+
+#include <linux/stringify.h>
+
+#undef TRACE_CUSTOM_EVENT
+#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print)
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+
+#undef TRACE_INCLUDE
+#undef __TRACE_INCLUDE
+
+#ifndef TRACE_INCLUDE_FILE
+# define TRACE_INCLUDE_FILE TRACE_SYSTEM
+# define UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifndef TRACE_INCLUDE_PATH
+# define __TRACE_INCLUDE(system) <trace/events/system.h>
+# define UNDEF_TRACE_INCLUDE_PATH
+#else
+# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
+#endif
+
+# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
+
+/* Let the trace headers be reread */
+#define TRACE_CUSTOM_MULTI_READ
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#ifdef TRACEPOINTS_ENABLED
+#include <trace/trace_custom_events.h>
+#endif
+
+#undef TRACE_CUSTOM_EVENT
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#undef DEFINE_CUSTOM_EVENT
+#undef TRACE_CUSTOM_MULTI_READ
+
+/* Only undef what we defined in this file */
+#ifdef UNDEF_TRACE_INCLUDE_FILE
+# undef TRACE_INCLUDE_FILE
+# undef UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifdef UNDEF_TRACE_INCLUDE_PATH
+# undef TRACE_INCLUDE_PATH
+# undef UNDEF_TRACE_INCLUDE_PATH
+#endif
+
+/* We may be processing more files */
+#define CREATE_CUSTOM_TRACE_POINTS
+
+#endif /* CREATE_CUSTOM_TRACE_POINTS */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 27170e40e8c9..7f4dfbdf12a6 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -100,19 +100,7 @@ TRACE_EVENT(block_rq_requeue,
__entry->nr_sector, 0)
);
-/**
- * block_rq_complete - block IO operation completed by device driver
- * @rq: block operations request
- * @error: status code
- * @nr_bytes: number of completed bytes
- *
- * The block_rq_complete tracepoint event indicates that some portion
- * of operation request has been completed by the device driver. If
- * the @rq->bio is %NULL, then there is absolutely no additional work to
- * do for the request. If @rq->bio is non-NULL then there is
- * additional work required to complete the request.
- */
-TRACE_EVENT(block_rq_complete,
+DECLARE_EVENT_CLASS(block_rq_completion,
TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
@@ -144,6 +132,41 @@ TRACE_EVENT(block_rq_complete,
__entry->nr_sector, __entry->error)
);
+/**
+ * block_rq_complete - block IO operation completed by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+ * the @rq->bio is %NULL, then there is absolutely no additional work to
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_complete,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
+/**
+ * block_rq_error - block IO operation error reported by device driver
+ * @rq: block operations request
+ * @error: status code
+ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_error tracepoint event indicates that some portion
+ * of operation request has failed as reported by the device driver.
+ */
+DEFINE_EVENT(block_rq_completion, block_rq_error,
+
+ TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
+
+ TP_ARGS(rq, error, nr_bytes)
+);
+
DECLARE_EVENT_CLASS(block_rq,
TP_PROTO(struct request *rq),
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 0d729664b4b4..f068ff30d654 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -53,6 +53,7 @@ struct btrfs_space_info;
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \
+ { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },\
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
#define show_root_type(obj) \
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 7d48e7079e48..c6d5d70dc7a5 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -67,10 +67,10 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_migratepages,
- TP_PROTO(unsigned long nr_all,
+ TP_PROTO(struct compact_control *cc,
unsigned int nr_succeeded),
- TP_ARGS(nr_all, nr_succeeded),
+ TP_ARGS(cc, nr_succeeded),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
@@ -79,7 +79,7 @@ TRACE_EVENT(mm_compaction_migratepages,
TP_fast_assign(
__entry->nr_migrated = nr_succeeded;
- __entry->nr_failed = nr_all - nr_succeeded;
+ __entry->nr_failed = cc->nr_migratepages - nr_succeeded;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
@@ -88,10 +88,10 @@ TRACE_EVENT(mm_compaction_migratepages,
);
TRACE_EVENT(mm_compaction_begin,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
- unsigned long free_pfn, unsigned long zone_end, bool sync),
+ TP_PROTO(struct compact_control *cc, unsigned long zone_start,
+ unsigned long zone_end, bool sync),
- TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
+ TP_ARGS(cc, zone_start, zone_end, sync),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
@@ -103,8 +103,8 @@ TRACE_EVENT(mm_compaction_begin,
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_pfn = migrate_pfn;
- __entry->free_pfn = free_pfn;
+ __entry->migrate_pfn = cc->migrate_pfn;
+ __entry->free_pfn = cc->free_pfn;
__entry->zone_end = zone_end;
__entry->sync = sync;
),
@@ -118,11 +118,11 @@ TRACE_EVENT(mm_compaction_begin,
);
TRACE_EVENT(mm_compaction_end,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
- unsigned long free_pfn, unsigned long zone_end, bool sync,
+ TP_PROTO(struct compact_control *cc, unsigned long zone_start,
+ unsigned long zone_end, bool sync,
int status),
- TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
+ TP_ARGS(cc, zone_start, zone_end, sync, status),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
@@ -135,8 +135,8 @@ TRACE_EVENT(mm_compaction_end,
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_pfn = migrate_pfn;
- __entry->free_pfn = free_pfn;
+ __entry->migrate_pfn = cc->migrate_pfn;
+ __entry->free_pfn = cc->free_pfn;
__entry->zone_end = zone_end;
__entry->sync = sync;
__entry->status = status;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 19e957b7f941..d06ffffad434 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -95,6 +95,17 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA);
+TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
+
#define show_fc_reason(reason) \
__print_symbolic(reason, \
{ EXT4_FC_REASON_XATTR, "XATTR"}, \
@@ -597,44 +608,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
TP_ARGS(page)
);
-DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length),
+ TP_ARGS(folio, offset, length),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
- __field( unsigned int, offset )
- __field( unsigned int, length )
+ __field( size_t, offset )
+ __field( size_t, length )
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
__entry->offset = offset;
__entry->length = length;
),
- TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
+ TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index,
__entry->offset, __entry->length)
);
-DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length)
+ TP_ARGS(folio, offset, length)
);
-DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
- TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
+DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio,
+ TP_PROTO(struct folio *folio, size_t offset, size_t length),
- TP_ARGS(page, offset, length)
+ TP_ARGS(folio, offset, length)
);
TRACE_EVENT(ext4_discard_blocks,
@@ -2643,7 +2654,7 @@ TRACE_EVENT(ext4_fc_replay_scan,
__entry->off = off;
),
- TP_printk("FC scan pass on dev %d,%d: error %d, off %d",
+ TP_printk("dev %d,%d error %d, off %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->error, __entry->off)
);
@@ -2669,32 +2680,35 @@ TRACE_EVENT(ext4_fc_replay,
__entry->priv2 = priv2;
),
- TP_printk("FC Replay %d,%d: tag %d, ino %d, data1 %d, data2 %d",
+ TP_printk("dev %d,%d: tag %d, ino %d, data1 %d, data2 %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->tag, __entry->ino, __entry->priv1, __entry->priv2)
);
TRACE_EVENT(ext4_fc_commit_start,
- TP_PROTO(struct super_block *sb),
+ TP_PROTO(struct super_block *sb, tid_t commit_tid),
- TP_ARGS(sb),
+ TP_ARGS(sb, commit_tid),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(tid_t, tid)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
+ __entry->tid = commit_tid;
),
- TP_printk("fast_commit started on dev %d,%d",
- MAJOR(__entry->dev), MINOR(__entry->dev))
+ TP_printk("dev %d,%d tid %u", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tid)
);
TRACE_EVENT(ext4_fc_commit_stop,
- TP_PROTO(struct super_block *sb, int nblks, int reason),
+ TP_PROTO(struct super_block *sb, int nblks, int reason,
+ tid_t commit_tid),
- TP_ARGS(sb, nblks, reason),
+ TP_ARGS(sb, nblks, reason, commit_tid),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -2703,6 +2717,7 @@ TRACE_EVENT(ext4_fc_commit_stop,
__field(int, num_fc)
__field(int, num_fc_ineligible)
__field(int, nblks_agg)
+ __field(tid_t, tid)
),
TP_fast_assign(
@@ -2713,128 +2728,193 @@ TRACE_EVENT(ext4_fc_commit_stop,
__entry->num_fc_ineligible =
EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
__entry->nblks_agg = EXT4_SB(sb)->s_fc_stats.fc_numblks;
+ __entry->tid = commit_tid;
),
- TP_printk("fc on [%d,%d] nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d",
+ TP_printk("dev %d,%d nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d, tid %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->nblks, __entry->reason, __entry->num_fc,
- __entry->num_fc_ineligible, __entry->nblks_agg)
+ __entry->num_fc_ineligible, __entry->nblks_agg, __entry->tid)
);
#define FC_REASON_NAME_STAT(reason) \
show_fc_reason(reason), \
- __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason]
+ __entry->fc_ineligible_rc[reason]
TRACE_EVENT(ext4_fc_stats,
- TP_PROTO(struct super_block *sb),
-
- TP_ARGS(sb),
-
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(struct ext4_sb_info *, sbi)
- __field(int, count)
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->sbi = EXT4_SB(sb);
- ),
-
- TP_printk("dev %d:%d fc ineligible reasons:\n"
- "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d; "
- "num_commits:%ld, ineligible: %ld, numblks: %ld",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
- FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
- __entry->sbi->s_fc_stats.fc_num_commits,
- __entry->sbi->s_fc_stats.fc_ineligible_commits,
- __entry->sbi->s_fc_stats.fc_numblks)
-
-);
-
-#define DEFINE_TRACE_DENTRY_EVENT(__type) \
- TRACE_EVENT(ext4_fc_track_##__type, \
- TP_PROTO(struct inode *inode, struct dentry *dentry, int ret), \
- \
- TP_ARGS(inode, dentry, ret), \
- \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(int, ino) \
- __field(int, error) \
- ), \
- \
- TP_fast_assign( \
- __entry->dev = inode->i_sb->s_dev; \
- __entry->ino = inode->i_ino; \
- __entry->error = ret; \
- ), \
- \
- TP_printk("dev %d:%d, inode %d, error %d, fc_%s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, __entry->error, \
- #__type) \
+ TP_PROTO(struct super_block *sb),
+
+ TP_ARGS(sb),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX)
+ __field(unsigned long, fc_commits)
+ __field(unsigned long, fc_ineligible_commits)
+ __field(unsigned long, fc_numblks)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __entry->dev = sb->s_dev;
+ for (i = 0; i < EXT4_FC_REASON_MAX; i++) {
+ __entry->fc_ineligible_rc[i] =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i];
+ }
+ __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits;
+ __entry->fc_ineligible_commits =
+ EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits;
+ __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks;
+ ),
+
+ TP_printk("dev %d,%d fc ineligible reasons:\n"
+ "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u "
+ "num_commits:%lu, ineligible: %lu, numblks: %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
+ FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
+ __entry->fc_commits, __entry->fc_ineligible_commits,
+ __entry->fc_numblks)
+);
+
+DECLARE_EVENT_CLASS(ext4_fc_track_dentry,
+
+ TP_PROTO(handle_t *handle, struct inode *inode,
+ struct dentry *dentry, int ret),
+
+ TP_ARGS(handle, inode, dentry, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d,%d, t_tid %u, ino %lu, i_sync_tid %u, error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error
)
+);
-DEFINE_TRACE_DENTRY_EVENT(create);
-DEFINE_TRACE_DENTRY_EVENT(link);
-DEFINE_TRACE_DENTRY_EVENT(unlink);
+#define DEFINE_EVENT_CLASS_DENTRY(__type) \
+DEFINE_EVENT(ext4_fc_track_dentry, ext4_fc_track_##__type, \
+ TP_PROTO(handle_t *handle, struct inode *inode, \
+ struct dentry *dentry, int ret), \
+ TP_ARGS(handle, inode, dentry, ret) \
+)
+
+DEFINE_EVENT_CLASS_DENTRY(create);
+DEFINE_EVENT_CLASS_DENTRY(link);
+DEFINE_EVENT_CLASS_DENTRY(unlink);
TRACE_EVENT(ext4_fc_track_inode,
- TP_PROTO(struct inode *inode, int ret),
+ TP_PROTO(handle_t *handle, struct inode *inode, int ret),
- TP_ARGS(inode, ret),
+ TP_ARGS(handle, inode, ret),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(int, ino)
- __field(int, error)
- ),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->error = ret;
- ),
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->error = ret;
+ ),
- TP_printk("dev %d:%d, inode %d, error %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino, __entry->error)
+ TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error)
);
TRACE_EVENT(ext4_fc_track_range,
- TP_PROTO(struct inode *inode, long start, long end, int ret),
-
- TP_ARGS(inode, start, end, ret),
-
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(int, ino)
- __field(long, start)
- __field(long, end)
- __field(int, error)
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->start = start;
- __entry->end = end;
- __entry->error = ret;
- ),
-
- TP_printk("dev %d:%d, inode %d, error %d, start %ld, end %ld",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino, __entry->error, __entry->start,
- __entry->end)
+ TP_PROTO(handle_t *handle, struct inode *inode,
+ long start, long end, int ret),
+
+ TP_ARGS(handle, inode, start, end, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(tid_t, t_tid)
+ __field(ino_t, i_ino)
+ __field(tid_t, i_sync_tid)
+ __field(long, start)
+ __field(long, end)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->t_tid = handle->h_transaction->t_tid;
+ __entry->i_ino = inode->i_ino;
+ __entry->i_sync_tid = ei->i_sync_tid;
+ __entry->start = start;
+ __entry->end = end;
+ __entry->error = ret;
+ ),
+
+ TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d, start %ld, end %ld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->t_tid, __entry->i_ino, __entry->i_sync_tid,
+ __entry->error, __entry->start, __entry->end)
+ );
+
+TRACE_EVENT(ext4_fc_cleanup,
+ TP_PROTO(journal_t *journal, int full, tid_t tid),
+
+ TP_ARGS(journal, full, tid),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, j_fc_off)
+ __field(int, full)
+ __field(tid_t, tid)
+ ),
+
+ TP_fast_assign(
+ struct super_block *sb = journal->j_private;
+
+ __entry->dev = sb->s_dev;
+ __entry->j_fc_off = journal->j_fc_off;
+ __entry->full = full;
+ __entry->tid = tid;
+ ),
+
+ TP_printk("dev %d,%d, j_fc_off %d, full %d, tid %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->j_fc_off, __entry->full, __entry->tid)
);
TRACE_EVENT(ext4_update_sb,
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 7346f0164cf4..cddf5b6fbeb4 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -29,22 +29,22 @@ TRACE_EVENT(io_uring_create,
TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
TP_STRUCT__entry (
- __field( int, fd )
- __field( void *, ctx )
+ __field( int, fd )
+ __field( void *, ctx )
__field( u32, sq_entries )
__field( u32, cq_entries )
__field( u32, flags )
),
TP_fast_assign(
- __entry->fd = fd;
+ __entry->fd = fd;
__entry->ctx = ctx;
__entry->sq_entries = sq_entries;
__entry->cq_entries = cq_entries;
__entry->flags = flags;
),
- TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
+ TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
__entry->ctx, __entry->fd, __entry->sq_entries,
__entry->cq_entries, __entry->flags)
);
@@ -57,10 +57,9 @@ TRACE_EVENT(io_uring_create,
* @opcode: describes which operation to perform
* @nr_user_files: number of registered files
* @nr_user_bufs: number of registered buffers
- * @cq_ev_fd: whether eventfs registered or not
* @ret: return code
*
- * Allows to trace fixed files/buffers/eventfds, that could be registered to
+ * Allows to trace fixed files/buffers, that could be registered to
* avoid an overhead of getting references to them for every operation. This
* event, together with io_uring_file_get, can provide a full picture of how
* much overhead one can reduce via fixing.
@@ -68,17 +67,16 @@ TRACE_EVENT(io_uring_create,
TRACE_EVENT(io_uring_register,
TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
- unsigned nr_bufs, bool eventfd, long ret),
+ unsigned nr_bufs, long ret),
- TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
+ TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( unsigned, opcode )
- __field( unsigned, nr_files )
- __field( unsigned, nr_bufs )
- __field( bool, eventfd )
- __field( long, ret )
+ __field( void *, ctx )
+ __field( unsigned, opcode )
+ __field( unsigned, nr_files)
+ __field( unsigned, nr_bufs )
+ __field( long, ret )
),
TP_fast_assign(
@@ -86,20 +84,21 @@ TRACE_EVENT(io_uring_register,
__entry->opcode = opcode;
__entry->nr_files = nr_files;
__entry->nr_bufs = nr_bufs;
- __entry->eventfd = eventfd;
__entry->ret = ret;
),
TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
- "eventfd %d, ret %ld",
+ "ret %ld",
__entry->ctx, __entry->opcode, __entry->nr_files,
- __entry->nr_bufs, __entry->eventfd, __entry->ret)
+ __entry->nr_bufs, __entry->ret)
);
/**
* io_uring_file_get - called before getting references to an SQE file
*
* @ctx: pointer to a ring context structure
+ * @req: pointer to a submitted request
+ * @user_data: user data associated with the request
* @fd: SQE file descriptor
*
* Allows to trace out how often an SQE file reference is obtained, which can
@@ -108,59 +107,71 @@ TRACE_EVENT(io_uring_register,
*/
TRACE_EVENT(io_uring_file_get,
- TP_PROTO(void *ctx, int fd),
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd),
- TP_ARGS(ctx, fd),
+ TP_ARGS(ctx, req, user_data, fd),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( int, fd )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( u64, user_data )
+ __field( int, fd )
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->user_data = user_data;
__entry->fd = fd;
),
- TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+ TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
);
/**
* io_uring_queue_async_work - called before submitting a new async work
*
* @ctx: pointer to a ring context structure
- * @hashed: type of workqueue, hashed or normal
* @req: pointer to a submitted request
+ * @user_data: user data associated with the request
+ * @opcode: opcode of request
+ * @flags request flags
* @work: pointer to a submitted io_wq_work
+ * @rw: type of workqueue, hashed or normal
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
- TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
- unsigned int flags),
+ TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode,
+ unsigned int flags, struct io_wq_work *work, int rw),
- TP_ARGS(ctx, rw, req, work, flags),
+ TP_ARGS(ctx, req, user_data, flags, opcode, work, rw),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( int, rw )
- __field( void *, req )
- __field( struct io_wq_work *, work )
- __field( unsigned int, flags )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( u64, user_data )
+ __field( u8, opcode )
+ __field( unsigned int, flags )
+ __field( struct io_wq_work *, work )
+ __field( int, rw )
),
TP_fast_assign(
- __entry->ctx = ctx;
- __entry->rw = rw;
- __entry->req = req;
- __entry->work = work;
- __entry->flags = flags;
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->user_data = user_data;
+ __entry->flags = flags;
+ __entry->opcode = opcode;
+ __entry->work = work;
+ __entry->rw = rw;
),
- TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
- __entry->ctx, __entry->req, __entry->flags,
- __entry->rw ? "hashed" : "normal", __entry->work)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d, flags 0x%x, %s queue, work %p",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
+ __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
);
/**
@@ -169,30 +180,33 @@ TRACE_EVENT(io_uring_queue_async_work,
* @ctx: pointer to a ring context structure
* @req: pointer to a deferred request
* @user_data: user data associated with the request
+ * @opcode: opcode of request
*
* Allows to track deferred requests, to get an insight about what requests are
* not started immediately.
*/
TRACE_EVENT(io_uring_defer,
- TP_PROTO(void *ctx, void *req, unsigned long long user_data),
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode),
- TP_ARGS(ctx, req, user_data),
+ TP_ARGS(ctx, req, user_data, opcode),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
- __field( unsigned long long, data )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, data )
+ __field( u8, opcode )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
__entry->data = user_data;
+ __entry->opcode = opcode;
),
- TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
- __entry->req, __entry->data)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d",
+ __entry->ctx, __entry->req, __entry->data, __entry->opcode)
);
/**
@@ -250,7 +264,7 @@ TRACE_EVENT(io_uring_cqring_wait,
),
TP_fast_assign(
- __entry->ctx = ctx;
+ __entry->ctx = ctx;
__entry->min_events = min_events;
),
@@ -260,7 +274,10 @@ TRACE_EVENT(io_uring_cqring_wait,
/**
* io_uring_fail_link - called before failing a linked request
*
+ * @ctx: pointer to a ring context structure
* @req: request, which links were cancelled
+ * @user_data: user data associated with the request
+ * @opcode: opcode of request
* @link: cancelled link
*
* Allows to track linked requests cancellation, to see not only that some work
@@ -268,27 +285,36 @@ TRACE_EVENT(io_uring_cqring_wait,
*/
TRACE_EVENT(io_uring_fail_link,
- TP_PROTO(void *req, void *link),
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link),
- TP_ARGS(req, link),
+ TP_ARGS(ctx, req, user_data, opcode, link),
TP_STRUCT__entry (
- __field( void *, req )
- __field( void *, link )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( void *, link )
),
TP_fast_assign(
- __entry->req = req;
- __entry->link = link;
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->user_data = user_data;
+ __entry->opcode = opcode;
+ __entry->link = link;
),
- TP_printk("request %p, link %p", __entry->req, __entry->link)
+ TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d, link %p",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
+ __entry->link)
);
/**
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
+ * @req: pointer to a submitted request
* @user_data: user data associated with the request
* @res: result of the request
* @cflags: completion flags
@@ -296,12 +322,13 @@ TRACE_EVENT(io_uring_fail_link,
*/
TRACE_EVENT(io_uring_complete,
- TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
+ TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags),
- TP_ARGS(ctx, user_data, res, cflags),
+ TP_ARGS(ctx, req, user_data, res, cflags),
TP_STRUCT__entry (
__field( void *, ctx )
+ __field( void *, req )
__field( u64, user_data )
__field( int, res )
__field( unsigned, cflags )
@@ -309,14 +336,16 @@ TRACE_EVENT(io_uring_complete,
TP_fast_assign(
__entry->ctx = ctx;
+ __entry->req = req;
__entry->user_data = user_data;
__entry->res = res;
__entry->cflags = cflags;
),
- TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
- __entry->ctx, (unsigned long long)__entry->user_data,
- __entry->res, __entry->cflags)
+ TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x",
+ __entry->ctx, __entry->req,
+ __entry->user_data,
+ __entry->res, __entry->cflags)
);
/**
@@ -324,8 +353,8 @@ TRACE_EVENT(io_uring_complete,
*
* @ctx: pointer to a ring context structure
* @req: pointer to a submitted request
- * @opcode: opcode of request
* @user_data: user data associated with the request
+ * @opcode: opcode of request
* @flags request flags
* @force_nonblock: whether a context blocking or not
* @sq_thread: true if sq_thread has submitted this SQE
@@ -335,34 +364,34 @@ TRACE_EVENT(io_uring_complete,
*/
TRACE_EVENT(io_uring_submit_sqe,
- TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags,
bool force_nonblock, bool sq_thread),
- TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
+ TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( u32, flags )
- __field( bool, force_nonblock )
- __field( bool, sq_thread )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( u32, flags )
+ __field( bool, force_nonblock )
+ __field( bool, sq_thread )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->opcode = opcode;
__entry->user_data = user_data;
+ __entry->opcode = opcode;
__entry->flags = flags;
__entry->force_nonblock = force_nonblock;
__entry->sq_thread = sq_thread;
),
- TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, flags 0x%x, "
"non block %d, sq_thread %d", __entry->ctx, __entry->req,
- __entry->opcode, (unsigned long long)__entry->user_data,
+ __entry->user_data, __entry->opcode,
__entry->flags, __entry->force_nonblock, __entry->sq_thread)
);
@@ -371,8 +400,8 @@ TRACE_EVENT(io_uring_submit_sqe,
*
* @ctx: pointer to a ring context structure
* @req: pointer to the armed request
- * @opcode: opcode of request
* @user_data: user data associated with the request
+ * @opcode: opcode of request
* @mask: request poll events mask
* @events: registered events of interest
*
@@ -381,155 +410,110 @@ TRACE_EVENT(io_uring_submit_sqe,
*/
TRACE_EVENT(io_uring_poll_arm,
- TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
+ TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode,
int mask, int events),
- TP_ARGS(ctx, req, opcode, user_data, mask, events),
+ TP_ARGS(ctx, req, user_data, opcode, mask, events),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( int, mask )
- __field( int, events )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( int, mask )
+ __field( int, events )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->opcode = opcode;
__entry->user_data = user_data;
+ __entry->opcode = opcode;
__entry->mask = mask;
__entry->events = events;
),
- TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
- __entry->ctx, __entry->req, __entry->opcode,
- (unsigned long long) __entry->user_data,
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, mask 0x%x, events 0x%x",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
__entry->mask, __entry->events)
);
-TRACE_EVENT(io_uring_poll_wake,
-
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
-
- TP_ARGS(ctx, opcode, user_data, mask),
-
- TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( int, mask )
- ),
-
- TP_fast_assign(
- __entry->ctx = ctx;
- __entry->opcode = opcode;
- __entry->user_data = user_data;
- __entry->mask = mask;
- ),
-
- TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->mask)
-);
-
-TRACE_EVENT(io_uring_task_add,
-
- TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
-
- TP_ARGS(ctx, opcode, user_data, mask),
-
- TP_STRUCT__entry (
- __field( void *, ctx )
- __field( u8, opcode )
- __field( u64, user_data )
- __field( int, mask )
- ),
-
- TP_fast_assign(
- __entry->ctx = ctx;
- __entry->opcode = opcode;
- __entry->user_data = user_data;
- __entry->mask = mask;
- ),
-
- TP_printk("ring %p, op %d, data 0x%llx, mask %x",
- __entry->ctx, __entry->opcode,
- (unsigned long long) __entry->user_data,
- __entry->mask)
-);
-
/*
- * io_uring_task_run - called when task_work_run() executes the poll events
- * notification callbacks
+ * io_uring_task_add - called after adding a task
*
* @ctx: pointer to a ring context structure
- * @req: pointer to the armed request
- * @opcode: opcode of request
+ * @req: pointer to request
* @user_data: user data associated with the request
+ * @opcode: opcode of request
+ * @mask: request poll events mask
*
- * Allows to track when notified poll events are processed
*/
-TRACE_EVENT(io_uring_task_run,
+TRACE_EVENT(io_uring_task_add,
- TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask),
- TP_ARGS(ctx, req, opcode, user_data),
+ TP_ARGS(ctx, req, user_data, opcode, mask),
TP_STRUCT__entry (
- __field( void *, ctx )
- __field( void *, req )
- __field( u8, opcode )
- __field( u64, user_data )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( int, mask )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->opcode = opcode;
__entry->user_data = user_data;
+ __entry->opcode = opcode;
+ __entry->mask = mask;
),
- TP_printk("ring %p, req %p, op %d, data 0x%llx",
- __entry->ctx, __entry->req, __entry->opcode,
- (unsigned long long) __entry->user_data)
+ TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, mask %x",
+ __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
+ __entry->mask)
);
/*
* io_uring_req_failed - called when an sqe is errored dring submission
*
* @sqe: pointer to the io_uring_sqe that failed
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to request
* @error: error it failed with
*
* Allows easier diagnosing of malformed requests in production systems.
*/
TRACE_EVENT(io_uring_req_failed,
- TP_PROTO(const struct io_uring_sqe *sqe, int error),
+ TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error),
- TP_ARGS(sqe, error),
+ TP_ARGS(sqe, ctx, req, error),
TP_STRUCT__entry (
- __field( u8, opcode )
- __field( u8, flags )
- __field( u8, ioprio )
- __field( u64, off )
- __field( u64, addr )
- __field( u32, len )
- __field( u32, op_flags )
- __field( u64, user_data )
- __field( u16, buf_index )
- __field( u16, personality )
- __field( u32, file_index )
- __field( u64, pad1 )
- __field( u64, pad2 )
- __field( int, error )
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, user_data )
+ __field( u8, opcode )
+ __field( u8, flags )
+ __field( u8, ioprio )
+ __field( u64, off )
+ __field( u64, addr )
+ __field( u32, len )
+ __field( u32, op_flags )
+ __field( u16, buf_index )
+ __field( u16, personality )
+ __field( u32, file_index )
+ __field( u64, pad1 )
+ __field( u64, pad2 )
+ __field( int, error )
),
TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->user_data = sqe->user_data;
__entry->opcode = sqe->opcode;
__entry->flags = sqe->flags;
__entry->ioprio = sqe->ioprio;
@@ -537,7 +521,6 @@ TRACE_EVENT(io_uring_req_failed,
__entry->addr = sqe->addr;
__entry->len = sqe->len;
__entry->op_flags = sqe->rw_flags;
- __entry->user_data = sqe->user_data;
__entry->buf_index = sqe->buf_index;
__entry->personality = sqe->personality;
__entry->file_index = sqe->file_index;
@@ -546,13 +529,15 @@ TRACE_EVENT(io_uring_req_failed,
__entry->error = error;
),
- TP_printk("op %d, flags=0x%x, prio=%d, off=%llu, addr=%llu, "
- "len=%u, rw_flags=0x%x, user_data=0x%llx, buf_index=%d, "
+ TP_printk("ring %p, req %p, user_data 0x%llx, "
+ "op %d, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
+ "len=%u, rw_flags=0x%x, buf_index=%d, "
"personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d",
+ __entry->ctx, __entry->req, __entry->user_data,
__entry->opcode, __entry->flags, __entry->ioprio,
(unsigned long long)__entry->off,
(unsigned long long) __entry->addr, __entry->len,
- __entry->op_flags, (unsigned long long) __entry->user_data,
+ __entry->op_flags,
__entry->buf_index, __entry->personality, __entry->file_index,
(unsigned long long) __entry->pad1,
(unsigned long long) __entry->pad2, __entry->error)
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
deleted file mode 100644
index a2d9aa16a5d7..000000000000
--- a/include/trace/events/random.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM random
-
-#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_RANDOM_H
-
-#include <linux/writeback.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(add_device_randomness,
- TP_PROTO(int bytes, unsigned long IP),
-
- TP_ARGS(bytes, IP),
-
- TP_STRUCT__entry(
- __field( int, bytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->bytes = bytes;
- __entry->IP = IP;
- ),
-
- TP_printk("bytes %d caller %pS",
- __entry->bytes, (void *)__entry->IP)
-);
-
-DECLARE_EVENT_CLASS(random__mix_pool_bytes,
- TP_PROTO(int bytes, unsigned long IP),
-
- TP_ARGS(bytes, IP),
-
- TP_STRUCT__entry(
- __field( int, bytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->bytes = bytes;
- __entry->IP = IP;
- ),
-
- TP_printk("input pool: bytes %d caller %pS",
- __entry->bytes, (void *)__entry->IP)
-);
-
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
- TP_PROTO(int bytes, unsigned long IP),
-
- TP_ARGS(bytes, IP)
-);
-
-DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
- TP_PROTO(int bytes, unsigned long IP),
-
- TP_ARGS(bytes, IP)
-);
-
-TRACE_EVENT(credit_entropy_bits,
- TP_PROTO(int bits, int entropy_count, unsigned long IP),
-
- TP_ARGS(bits, entropy_count, IP),
-
- TP_STRUCT__entry(
- __field( int, bits )
- __field( int, entropy_count )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->bits = bits;
- __entry->entropy_count = entropy_count;
- __entry->IP = IP;
- ),
-
- TP_printk("input pool: bits %d entropy_count %d caller %pS",
- __entry->bits, __entry->entropy_count, (void *)__entry->IP)
-);
-
-TRACE_EVENT(debit_entropy,
- TP_PROTO(int debit_bits),
-
- TP_ARGS( debit_bits),
-
- TP_STRUCT__entry(
- __field( int, debit_bits )
- ),
-
- TP_fast_assign(
- __entry->debit_bits = debit_bits;
- ),
-
- TP_printk("input pool: debit_bits %d", __entry->debit_bits)
-);
-
-TRACE_EVENT(add_input_randomness,
- TP_PROTO(int input_bits),
-
- TP_ARGS(input_bits),
-
- TP_STRUCT__entry(
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("input_pool_bits %d", __entry->input_bits)
-);
-
-TRACE_EVENT(add_disk_randomness,
- TP_PROTO(dev_t dev, int input_bits),
-
- TP_ARGS(dev, input_bits),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, input_bits )
- ),
-
- TP_fast_assign(
- __entry->dev = dev;
- __entry->input_bits = input_bits;
- ),
-
- TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
- MINOR(__entry->dev), __entry->input_bits)
-);
-
-DECLARE_EVENT_CLASS(random__get_random_bytes,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP),
-
- TP_STRUCT__entry(
- __field( int, nbytes )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->nbytes = nbytes;
- __entry->IP = IP;
- ),
-
- TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
-);
-
-DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP)
-);
-
-DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
- TP_PROTO(int nbytes, unsigned long IP),
-
- TP_ARGS(nbytes, IP)
-);
-
-DECLARE_EVENT_CLASS(random__extract_entropy,
- TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
-
- TP_ARGS(nbytes, entropy_count, IP),
-
- TP_STRUCT__entry(
- __field( int, nbytes )
- __field( int, entropy_count )
- __field(unsigned long, IP )
- ),
-
- TP_fast_assign(
- __entry->nbytes = nbytes;
- __entry->entropy_count = entropy_count;
- __entry->IP = IP;
- ),
-
- TP_printk("input pool: nbytes %d entropy_count %d caller %pS",
- __entry->nbytes, __entry->entropy_count, (void *)__entry->IP)
-);
-
-
-DEFINE_EVENT(random__extract_entropy, extract_entropy,
- TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
-
- TP_ARGS(nbytes, entropy_count, IP)
-);
-
-TRACE_EVENT(urandom_read,
- TP_PROTO(int got_bits, int pool_left, int input_left),
-
- TP_ARGS(got_bits, pool_left, input_left),
-
- TP_STRUCT__entry(
- __field( int, got_bits )
- __field( int, pool_left )
- __field( int, input_left )
- ),
-
- TP_fast_assign(
- __entry->got_bits = got_bits;
- __entry->pool_left = pool_left;
- __entry->input_left = input_left;
- ),
-
- TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
- "input_entropy_left %d", __entry->got_bits,
- __entry->pool_left, __entry->input_left)
-);
-
-TRACE_EVENT(prandom_u32,
-
- TP_PROTO(unsigned int ret),
-
- TP_ARGS(ret),
-
- TP_STRUCT__entry(
- __field( unsigned int, ret)
- ),
-
- TP_fast_assign(
- __entry->ret = ret;
- ),
-
- TP_printk("ret=%u" , __entry->ret)
-);
-
-#endif /* _TRACE_RANDOM_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 670e41783edd..90b2fb0292cb 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -794,16 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read,
* Tracepoint for rcu_barrier() execution. The string "s" describes
* the rcu_barrier phase:
* "Begin": rcu_barrier() started.
+ * "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
* "Inc1": rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks.
- * "OnlineQ": rcu_barrier() found online CPU with callbacks.
- * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
+ * "Inc2": rcu_barrier() piggyback check counter incremented.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
- * "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": rcu_barrier() piggyback check counter incremented.
+ * "NQ": rcu_barrier() found a CPU with no callbacks.
+ * "OnlineQ": rcu_barrier() found online CPU with callbacks.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 94640482cfe7..65e786756321 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -187,7 +187,9 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS
-static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
+static inline long __trace_sched_switch_state(bool preempt,
+ unsigned int prev_state,
+ struct task_struct *p)
{
unsigned int state;
@@ -208,7 +210,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
* it for left shift operation to get the correct task->state
* mapping.
*/
- state = task_state_index(p);
+ state = __task_state_index(prev_state, p->exit_state);
return state ? (1 << (state - 1)) : state;
}
@@ -220,10 +222,11 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
TRACE_EVENT(sched_switch,
TP_PROTO(bool preempt,
+ unsigned int prev_state,
struct task_struct *prev,
struct task_struct *next),
- TP_ARGS(preempt, prev, next),
+ TP_ARGS(preempt, prev_state, prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
@@ -239,7 +242,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
- __entry->prev_state = __trace_sched_switch_state(preempt, prev);
+ __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f3a4b4d60714..cee4b2b64ae4 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -33,6 +33,34 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->seq, __entry->poll)
);
+TRACE_EVENT(scmi_xfer_response_wait,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u32 timeout, bool poll),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u32, timeout)
+ __field(bool, poll)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->timeout = timeout;
+ __entry->poll = poll;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->timeout, __entry->poll)
+);
+
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
int status),
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 29982d60b68a..ab8ae1f6ba84 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1625,26 +1625,53 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
{ SVC_PENDING, "SVC_PENDING" }, \
{ SVC_COMPLETE, "SVC_COMPLETE" })
+#define SVC_RQST_ENDPOINT_FIELDS(r) \
+ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \
+ __sockaddr(client, (r)->rq_xprt->xpt_remotelen) \
+ __field(unsigned int, netns_ino) \
+ __field(u32, xid)
+
+#define SVC_RQST_ENDPOINT_ASSIGNMENTS(r) \
+ do { \
+ struct svc_xprt *xprt = (r)->rq_xprt; \
+ __assign_sockaddr(server, &xprt->xpt_local, \
+ xprt->xpt_locallen); \
+ __assign_sockaddr(client, &xprt->xpt_remote, \
+ xprt->xpt_remotelen); \
+ __entry->netns_ino = xprt->xpt_net->ns.inum; \
+ __entry->xid = be32_to_cpu((r)->rq_xid); \
+ } while (0)
+
+#define SVC_RQST_ENDPOINT_FORMAT \
+ "xid=0x%08x server=%pISpc client=%pISpc"
+
+#define SVC_RQST_ENDPOINT_VARARGS \
+ __entry->xid, __get_sockaddr(server), __get_sockaddr(client)
+
TRACE_EVENT(svc_authenticate,
TP_PROTO(const struct svc_rqst *rqst, int auth_res),
TP_ARGS(rqst, auth_res),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(unsigned long, svc_status)
__field(unsigned long, auth_stat)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->svc_status = auth_res;
__entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat);
),
- TP_printk("xid=0x%08x auth_res=%s auth_stat=%s",
- __entry->xid, svc_show_status(__entry->svc_status),
- rpc_show_auth_stat(__entry->auth_stat))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT
+ " auth_res=%s auth_stat=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ svc_show_status(__entry->svc_status),
+ rpc_show_auth_stat(__entry->auth_stat))
);
TRACE_EVENT(svc_process,
@@ -1680,7 +1707,6 @@ TRACE_EVENT(svc_process,
);
DECLARE_EVENT_CLASS(svc_rqst_event,
-
TP_PROTO(
const struct svc_rqst *rqst
),
@@ -1688,20 +1714,20 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_ARGS(rqst),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x flags=%s",
- __get_str(addr), __entry->xid,
- show_rqstp_flags(__entry->flags))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " flags=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ show_rqstp_flags(__entry->flags))
);
#define DEFINE_SVC_RQST_EVENT(name) \
DEFINE_EVENT(svc_rqst_event, svc_##name, \
@@ -1714,34 +1740,63 @@ DEFINE_SVC_RQST_EVENT(defer);
DEFINE_SVC_RQST_EVENT(drop);
DECLARE_EVENT_CLASS(svc_rqst_status,
-
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ int status
+ ),
TP_ARGS(rqst, status),
TP_STRUCT__entry(
- __field(u32, xid)
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
__field(int, status)
__field(unsigned long, flags)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
__entry->status = status;
__entry->flags = rqst->rq_flags;
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x status=%d flags=%s",
- __get_str(addr), __entry->xid,
- __entry->status, show_rqstp_flags(__entry->flags))
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " status=%d flags=%s",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __entry->status, show_rqstp_flags(__entry->flags))
);
DEFINE_EVENT(svc_rqst_status, svc_send,
- TP_PROTO(struct svc_rqst *rqst, int status),
+ TP_PROTO(const struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
+TRACE_EVENT(svc_stats_latency,
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ SVC_RQST_ENDPOINT_FIELDS(rqst)
+
+ __field(unsigned long, execute)
+ __string(procedure, svc_proc_name(rqst))
+ ),
+
+ TP_fast_assign(
+ SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
+
+ __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_stime));
+ __assign_str(procedure, svc_proc_name(rqst));
+ ),
+
+ TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu",
+ SVC_RQST_ENDPOINT_VARARGS,
+ __get_str(procedure), __entry->execute)
+);
+
#define show_svc_xprt_flags(flags) \
__print_flags(flags, "|", \
{ (1UL << XPT_BUSY), "XPT_BUSY"}, \
@@ -1774,65 +1829,114 @@ TRACE_EVENT(svc_xprt_create_err,
__field(long, error)
__string(program, program)
__string(protocol, protocol)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __sockaddr(addr, salen)
),
TP_fast_assign(
__entry->error = PTR_ERR(xprt);
__assign_str(program, program);
__assign_str(protocol, protocol);
- memcpy(__entry->addr, sap, min(salen, sizeof(__entry->addr)));
+ __assign_sockaddr(addr, sap, salen);
),
TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
- __entry->addr, __get_str(program), __get_str(protocol),
+ __get_sockaddr(addr), __get_str(program), __get_str(protocol),
__entry->error)
);
+#define SVC_XPRT_ENDPOINT_FIELDS(x) \
+ __sockaddr(server, (x)->xpt_locallen) \
+ __sockaddr(client, (x)->xpt_remotelen) \
+ __field(unsigned long, flags) \
+ __field(unsigned int, netns_ino)
+
+#define SVC_XPRT_ENDPOINT_ASSIGNMENTS(x) \
+ do { \
+ __assign_sockaddr(server, &(x)->xpt_local, \
+ (x)->xpt_locallen); \
+ __assign_sockaddr(client, &(x)->xpt_remote, \
+ (x)->xpt_remotelen); \
+ __entry->flags = (x)->xpt_flags; \
+ __entry->netns_ino = (x)->xpt_net->ns.inum; \
+ } while (0)
+
+#define SVC_XPRT_ENDPOINT_FORMAT \
+ "server=%pISpc client=%pISpc flags=%s"
+
+#define SVC_XPRT_ENDPOINT_VARARGS \
+ __get_sockaddr(server), __get_sockaddr(client), \
+ show_svc_xprt_flags(__entry->flags)
+
TRACE_EVENT(svc_xprt_enqueue,
- TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const struct svc_rqst *rqst
+ ),
TP_ARGS(xprt, rqst),
TP_STRUCT__entry(
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
+
__field(int, pid)
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
+
__entry->pid = rqst? rqst->rq_task->pid : 0;
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("addr=%s pid=%d flags=%s", __get_str(addr),
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->pid)
+);
+
+TRACE_EVENT(svc_xprt_dequeue,
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt)
+
+ __field(unsigned long, wakeup)
+ ),
+
+ TP_fast_assign(
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
+ rqst->rq_qtime));
+ ),
+
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup)
);
DECLARE_EVENT_CLASS(svc_xprt_event,
- TP_PROTO(struct svc_xprt *xprt),
+ TP_PROTO(
+ const struct svc_xprt *xprt
+ ),
TP_ARGS(xprt),
TP_STRUCT__entry(
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
),
TP_fast_assign(
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
),
- TP_printk("addr=%s flags=%s", __get_str(addr),
- show_svc_xprt_flags(__entry->flags))
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
#define DEFINE_SVC_XPRT_EVENT(name) \
DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \
TP_PROTO( \
- struct svc_xprt *xprt \
+ const struct svc_xprt *xprt \
), \
TP_ARGS(xprt))
@@ -1850,44 +1954,25 @@ TRACE_EVENT(svc_xprt_accept,
TP_ARGS(xprt, service),
TP_STRUCT__entry(
- __string(addr, xprt->xpt_remotebuf)
+ SVC_XPRT_ENDPOINT_FIELDS(xprt)
+
__string(protocol, xprt->xpt_class->xcl_name)
__string(service, service)
),
TP_fast_assign(
- __assign_str(addr, xprt->xpt_remotebuf);
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
+
__assign_str(protocol, xprt->xpt_class->xcl_name);
__assign_str(service, service);
),
- TP_printk("addr=%s protocol=%s service=%s",
- __get_str(addr), __get_str(protocol), __get_str(service)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s",
+ SVC_XPRT_ENDPOINT_VARARGS,
+ __get_str(protocol), __get_str(service)
)
);
-TRACE_EVENT(svc_xprt_dequeue,
- TP_PROTO(struct svc_rqst *rqst),
-
- TP_ARGS(rqst),
-
- TP_STRUCT__entry(
- __field(unsigned long, flags)
- __field(unsigned long, wakeup)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
- ),
-
- TP_fast_assign(
- __entry->flags = rqst->rq_xprt->xpt_flags;
- __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_qtime));
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
- ),
-
- TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr),
- show_svc_xprt_flags(__entry->flags), __entry->wakeup)
-);
-
TRACE_EVENT(svc_wake_up,
TP_PROTO(int pid),
@@ -1922,31 +2007,6 @@ TRACE_EVENT(svc_alloc_arg_err,
TP_printk("pages=%u", __entry->pages)
);
-TRACE_EVENT(svc_stats_latency,
- TP_PROTO(const struct svc_rqst *rqst),
-
- TP_ARGS(rqst),
-
- TP_STRUCT__entry(
- __field(u32, xid)
- __field(unsigned long, execute)
- __string(procedure, svc_proc_name(rqst))
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
- ),
-
- TP_fast_assign(
- __entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_stime));
- __assign_str(procedure, svc_proc_name(rqst));
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
- ),
-
- TP_printk("addr=%s xid=0x%08x proc=%s execute-us=%lu",
- __get_str(addr), __entry->xid, __get_str(procedure),
- __entry->execute)
-);
-
DECLARE_EVENT_CLASS(svc_deferred_event,
TP_PROTO(
const struct svc_deferred_req *dr
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ca2e9009a651..de136dbd623a 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -327,11 +327,11 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__print_symbolic(__entry->lru, LRU_NAMES))
);
-TRACE_EVENT(mm_vmscan_writepage,
+TRACE_EVENT(mm_vmscan_write_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
__field(unsigned long, pfn)
@@ -339,9 +339,9 @@ TRACE_EVENT(mm_vmscan_writepage,
),
TP_fast_assign(
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = folio_pfn(folio);
__entry->reclaim_flags = trace_reclaim_flags(
- page_is_file_lru(page));
+ folio_is_file_lru(folio));
),
TP_printk("page=%p pfn=0x%lx flags=%s",
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index a345b1e12daf..86b2a82da546 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -735,34 +735,6 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
)
);
-DECLARE_EVENT_CLASS(writeback_congest_waited_template,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed),
-
- TP_STRUCT__entry(
- __field( unsigned int, usec_timeout )
- __field( unsigned int, usec_delayed )
- ),
-
- TP_fast_assign(
- __entry->usec_timeout = usec_timeout;
- __entry->usec_delayed = usec_delayed;
- ),
-
- TP_printk("usec_timeout=%u usec_delayed=%u",
- __entry->usec_timeout,
- __entry->usec_delayed)
-);
-
-DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
-
- TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
-
- TP_ARGS(usec_timeout, usec_delayed)
-);
-
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 5d48c46a3008..5800d13146c3 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -21,6 +21,9 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
#undef __get_rel_dynamic_array
#define __get_rel_dynamic_array(field) \
((void *)__entry + \
@@ -38,6 +41,9 @@
#undef __get_rel_bitmask
#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
#undef __perf_count
#define __perf_count(c) (__count = (c))
diff --git a/include/trace/stages/init.h b/include/trace/stages/init.h
new file mode 100644
index 000000000000..000bcfc8dd2e
--- /dev/null
+++ b/include/trace/stages/init.h
@@ -0,0 +1,37 @@
+
+#define __app__(x, y) str__##x##y
+#define __app(x, y) __app__(x, y)
+
+#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
+
+#define TRACE_MAKE_SYSTEM_STR() \
+ static const char TRACE_SYSTEM_STRING[] = \
+ __stringify(TRACE_SYSTEM)
+
+TRACE_MAKE_SYSTEM_STR();
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = #a, \
+ .eval_value = a \
+ }; \
+ static struct trace_eval_map __used \
+ __section("_ftrace_eval_map") \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = "sizeof(" #a ")", \
+ .eval_value = sizeof(a) \
+ }; \
+ static struct trace_eval_map __used \
+ __section("_ftrace_eval_map") \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
diff --git a/include/trace/stages/stage1_defines.h b/include/trace/stages/stage1_defines.h
new file mode 100644
index 000000000000..a16783419687
--- /dev/null
+++ b/include/trace/stages/stage1_defines.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 1 definitions for creating trace events */
+
+#undef __field
+#define __field(type, item) type item;
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type) type item;
+
+#undef __field_struct
+#define __field_struct(type, item) type item;
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type) type item;
+
+#undef __array
+#define __array(type, item, len) type item[len];
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 __data_loc_##item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
diff --git a/include/trace/stages/stage2_defines.h b/include/trace/stages/stage2_defines.h
new file mode 100644
index 000000000000..42fd1e8813ec
--- /dev/null
+++ b/include/trace/stages/stage2_defines.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 2 definitions for creating trace events */
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a)
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a)
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage3_defines.h b/include/trace/stages/stage3_defines.h
new file mode 100644
index 000000000000..e3b183e9d18e
--- /dev/null
+++ b/include/trace/stages/stage3_defines.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 3 definitions for creating trace events */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) ((char *)__get_dynamic_array(field))
+
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)__entry + \
+ offsetof(typeof(*__entry), __rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_bitmask
+#define __get_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_rel_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_rel_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
+#undef __print_flags
+#define __print_flags(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags __flags[] = \
+ { flag_array, { -1, NULL }}; \
+ trace_print_flags_seq(p, delim, flag, __flags); \
+ })
+
+#undef __print_symbolic
+#define __print_symbolic(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags symbols[] = \
+ { symbol_array, { -1, NULL }}; \
+ trace_print_symbols_seq(p, value, symbols); \
+ })
+
+#undef __print_flags_u64
+#undef __print_symbolic_u64
+#if BITS_PER_LONG == 32
+#define __print_flags_u64(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 __flags[] = \
+ { flag_array, { -1, NULL } }; \
+ trace_print_flags_seq_u64(p, delim, flag, __flags); \
+ })
+
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array, { -1, NULL } }; \
+ trace_print_symbols_seq_u64(p, value, symbols); \
+ })
+#else
+#define __print_flags_u64(flag, delim, flag_array...) \
+ __print_flags(flag, delim, flag_array)
+
+#define __print_symbolic_u64(value, symbol_array...) \
+ __print_symbolic(value, symbol_array)
+#endif
+
+#undef __print_hex
+#define __print_hex(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, false)
+
+#undef __print_hex_str
+#define __print_hex_str(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, true)
+
+#undef __print_array
+#define __print_array(array, count, el_size) \
+ ({ \
+ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
+ el_size != 4 && el_size != 8); \
+ trace_print_array_seq(p, array, count, el_size); \
+ })
+
+#undef __print_hex_dump
+#define __print_hex_dump(prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii) \
+ trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ do_div(____val, NSEC_PER_SEC); \
+ ____val; \
+ })
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(value) \
+ ({ \
+ u64 ____val = (u64)(value); \
+ (u32) do_div(____val, NSEC_PER_SEC); \
+ })
diff --git a/include/trace/stages/stage4_defines.h b/include/trace/stages/stage4_defines.h
new file mode 100644
index 000000000000..e80cdc397a43
--- /dev/null
+++ b/include/trace/stages/stage4_defines.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 4 definitions for creating trace events */
+
+#undef __field_ext
+#define __field_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = __alignof__(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = _filter_type },
+
+#undef __field_struct_ext
+#define __field_struct_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = __alignof__(_type), \
+ 0, .filter_type = _filter_type },
+
+#undef __field
+#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
+
+#undef __field_struct
+#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
+
+#undef __array
+#define __array(_type, _item, _len) { \
+ .type = #_type"["__stringify(_len)"]", .name = #_item, \
+ .size = sizeof(_type[_len]), .align = __alignof__(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __dynamic_array
+#define __dynamic_array(_type, _item, _len) { \
+ .type = "__data_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(_type, _item, _len) { \
+ .type = "__rel_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage5_defines.h b/include/trace/stages/stage5_defines.h
new file mode 100644
index 000000000000..7ee5931300e6
--- /dev/null
+++ b/include/trace/stages/stage5_defines.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 5 definitions for creating trace events */
+
+/*
+ * remember the offset of each array from the beginning of the event.
+ */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, \
+ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data) - \
+ offsetof(typeof(*entry), __rel_loc_##item) - \
+ sizeof(u32); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, \
+ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)
+/*
+ * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
+ * num_possible_cpus().
+ */
+#define __bitmask_size_in_bytes_raw(nr_bits) \
+ (((nr_bits) + 7) / 8)
+
+#define __bitmask_size_in_longs(nr_bits) \
+ ((__bitmask_size_in_bytes_raw(nr_bits) + \
+ ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
+
+/*
+ * __bitmask_size_in_bytes is the number of bytes needed to hold
+ * num_possible_cpus() padded out to the nearest long. This is what
+ * is saved in the buffer, just to be consistent.
+ */
+#define __bitmask_size_in_bytes(nr_bits) \
+ (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
diff --git a/include/trace/stages/stage6_defines.h b/include/trace/stages/stage6_defines.h
new file mode 100644
index 000000000000..e1724f73594b
--- /dev/null
+++ b/include/trace/stages/stage6_defines.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 6 definitions for creating trace events */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __string_len
+#define __string_len(item, src, len) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __assign_str_len
+#define __assign_str_len(dst, src, len) \
+ do { \
+ memcpy(__get_str(dst), (src), (len)); \
+ __get_str(dst)[len] = '\0'; \
+ } while(0)
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef __sockaddr
+#define __sockaddr(field, len) __dynamic_array(u8, field, len)
+
+#undef __get_sockaddr
+#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
+
+#undef __assign_sockaddr
+#define __assign_sockaddr(dest, src, len) \
+ memcpy(__get_dynamic_array(dest), src, len)
+
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __entry->__rel_loc_##item = __data_offsets.item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __assign_rel_str
+#define __assign_rel_str(dst, src) \
+ strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __assign_rel_str_len
+#define __assign_rel_str_len(dst, src, len) \
+ do { \
+ memcpy(__get_rel_str(dst), (src), (len)); \
+ __get_rel_str(dst)[len] = '\0'; \
+ } while (0)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __assign_rel_bitmask
+#define __assign_rel_bitmask(dst, src, nr_bits) \
+ memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef __rel_sockaddr
+#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
+
+#undef __get_rel_sockaddr
+#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
+
+#undef __assign_rel_sockaddr
+#define __assign_rel_sockaddr(dest, src, len) \
+ memcpy(__get_rel_dynamic_array(dest), src, len)
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
diff --git a/include/trace/stages/stage7_defines.h b/include/trace/stages/stage7_defines.h
new file mode 100644
index 000000000000..8a7ec24c246d
--- /dev/null
+++ b/include/trace/stages/stage7_defines.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Stage 7 definitions for creating trace events */
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __print_hex_str
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __get_sockaddr
+#undef __get_rel_dynamic_array
+#undef __get_rel_dynamic_array_len
+#undef __get_rel_str
+#undef __get_rel_bitmask
+#undef __get_rel_sockaddr
+#undef __print_array
+#undef __print_hex_dump
+
+/*
+ * The below is not executed in the kernel. It is only what is
+ * displayed in the print format for userspace to parse.
+ */
+#undef __print_ns_to_secs
+#define __print_ns_to_secs(val) (val) / 1000000000UL
+
+#undef __print_ns_without_secs
+#define __print_ns_without_secs(val) (val) % 1000000000UL
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
diff --git a/include/trace/trace_custom_events.h b/include/trace/trace_custom_events.h
new file mode 100644
index 000000000000..b567c7202339
--- /dev/null
+++ b/include/trace/trace_custom_events.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This is similar to the trace_events.h file, but is to only
+ * create custom trace events to be attached to existing tracepoints.
+ * Where as the TRACE_EVENT() macro (from trace_events.h) will create
+ * both the trace event and the tracepoint it will attach the event to,
+ * TRACE_CUSTOM_EVENT() is to create only a custom version of an existing
+ * trace event (created by TRACE_EVENT() or DEFINE_EVENT()), and will
+ * be placed in the "custom" system.
+ */
+
+#include <linux/trace_events.h>
+
+/* All custom events are placed in the custom group */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM custom
+
+#ifndef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR TRACE_SYSTEM
+#endif
+
+/* The init stage creates the system string and enum mappings */
+
+#include "stages/init.h"
+
+#undef TRACE_CUSTOM_EVENT
+#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) \
+ DECLARE_CUSTOM_EVENT_CLASS(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print)); \
+ DEFINE_CUSTOM_EVENT(name, name, PARAMS(proto), PARAMS(args));
+
+/* Stage 1 creates the structure of the recorded event layout */
+
+#include "stages/stage1_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
+ struct trace_custom_event_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ char __data[]; \
+ }; \
+ \
+ static struct trace_event_class custom_event_class_##name;
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args) \
+ static struct trace_event_call __used \
+ __attribute__((__aligned__(4))) custom_event_##name
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 2 creates the custom class */
+
+#include "stages/stage2_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ struct trace_custom_event_data_offsets_##call { \
+ tstruct; \
+ };
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 3 create the way to print the custom event */
+
+#include "stages/stage3_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace enum print_line_t \
+trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
+{ \
+ struct trace_seq *s = &iter->seq; \
+ struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
+ struct trace_custom_event_raw_##call *field; \
+ int ret; \
+ \
+ field = (typeof(field))iter->ent; \
+ \
+ ret = trace_raw_output_prep(iter, trace_event); \
+ if (ret != TRACE_TYPE_HANDLED) \
+ return ret; \
+ \
+ trace_event_printf(iter, print); \
+ \
+ return trace_handle_return(s); \
+} \
+static struct trace_event_functions trace_custom_event_type_funcs_##call = { \
+ .trace = trace_custom_raw_output_##call, \
+};
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 4 creates the offset layout for the fields */
+
+#include "stages/stage4_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \
+static struct trace_event_fields trace_custom_event_fields_##call[] = { \
+ tstruct \
+ {} };
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 5 creates the helper function for dynamic fields */
+
+#include "stages/stage5_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static inline notrace int trace_custom_event_get_offsets_##call( \
+ struct trace_custom_event_data_offsets_##call *__data_offsets, proto) \
+{ \
+ int __data_size = 0; \
+ int __maybe_unused __item_length; \
+ struct trace_custom_event_raw_##call __maybe_unused *entry; \
+ \
+ tstruct; \
+ \
+ return __data_size; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 6 creates the probe function that records the event */
+
+#include "stages/stage6_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_custom_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_custom_event_data_offsets_##call __maybe_unused __data_offsets; \
+ struct trace_event_buffer fbuffer; \
+ struct trace_custom_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_custom_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_custom_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \
+static inline void ftrace_test_custom_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_custom_event_raw_event_##template); \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Stage 7 creates the actual class and event structure for the custom event */
+
+#include "stages/stage7_defines.h"
+
+#undef DECLARE_CUSTOM_EVENT_CLASS
+#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static char custom_print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata custom_event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .fields_array = trace_custom_event_fields_##call, \
+ .fields = LIST_HEAD_INIT(custom_event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_custom_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+};
+
+#undef DEFINE_CUSTOM_EVENT
+#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used custom_event_##call = { \
+ .name = #call, \
+ .class = &custom_event_class_##template, \
+ .event.funcs = &trace_custom_event_type_funcs_##template, \
+ .print_fmt = custom_print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_CUSTOM, \
+}; \
+static inline int trace_custom_event_##call##_update(struct tracepoint *tp) \
+{ \
+ if (tp->name && strcmp(tp->name, #call) == 0) { \
+ custom_event_##call.tp = tp; \
+ custom_event_##call.flags = TRACE_EVENT_FL_TRACEPOINT; \
+ return 1; \
+ } \
+ return 0; \
+} \
+static struct trace_event_call __used \
+__section("_ftrace_events") *__custom_event_##call = &custom_event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 3d29919045af..8a8cd66cc6d5 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -24,42 +24,7 @@
#define TRACE_SYSTEM_VAR TRACE_SYSTEM
#endif
-#define __app__(x, y) str__##x##y
-#define __app(x, y) __app__(x, y)
-
-#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
-
-#define TRACE_MAKE_SYSTEM_STR() \
- static const char TRACE_SYSTEM_STRING[] = \
- __stringify(TRACE_SYSTEM)
-
-TRACE_MAKE_SYSTEM_STR();
-
-#undef TRACE_DEFINE_ENUM
-#define TRACE_DEFINE_ENUM(a) \
- static struct trace_eval_map __used __initdata \
- __##TRACE_SYSTEM##_##a = \
- { \
- .system = TRACE_SYSTEM_STRING, \
- .eval_string = #a, \
- .eval_value = a \
- }; \
- static struct trace_eval_map __used \
- __section("_ftrace_eval_map") \
- *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
-
-#undef TRACE_DEFINE_SIZEOF
-#define TRACE_DEFINE_SIZEOF(a) \
- static struct trace_eval_map __used __initdata \
- __##TRACE_SYSTEM##_##a = \
- { \
- .system = TRACE_SYSTEM_STRING, \
- .eval_string = "sizeof(" #a ")", \
- .eval_value = sizeof(a) \
- }; \
- static struct trace_eval_map __used \
- __section("_ftrace_eval_map") \
- *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+#include "stages/init.h"
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
@@ -80,48 +45,7 @@ TRACE_MAKE_SYSTEM_STR();
PARAMS(print)); \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
-
-#undef __field
-#define __field(type, item) type item;
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type) type item;
-
-#undef __field_struct
-#define __field_struct(type, item) type item;
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type) type item;
-
-#undef __array
-#define __array(type, item, len) type item[len];
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 __data_loc_##item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __string_len
-#define __string_len(item, src, len) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
-
-#undef __rel_dynamic_array
-#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item;
-
-#undef __rel_string
-#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_string_len
-#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_bitmask
-#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1)
-
-#undef TP_STRUCT__entry
-#define TP_STRUCT__entry(args...) args
+#include "stages/stage1_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
@@ -185,50 +109,7 @@ TRACE_MAKE_SYSTEM_STR();
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
-#undef TRACE_DEFINE_ENUM
-#define TRACE_DEFINE_ENUM(a)
-
-#undef TRACE_DEFINE_SIZEOF
-#define TRACE_DEFINE_SIZEOF(a)
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) u32 item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __string_len
-#define __string_len(item, src, len) __dynamic_array(char, item, -1)
-
-#undef __rel_dynamic_array
-#define __rel_dynamic_array(type, item, len) u32 item;
-
-#undef __rel_string
-#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_string_len
-#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_bitmask
-#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+#include "stages/stage2_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -300,131 +181,7 @@ TRACE_MAKE_SYSTEM_STR();
* in binary.
*/
-#undef __entry
-#define __entry field
-
-#undef TP_printk
-#define TP_printk(fmt, args...) fmt "\n", args
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) ((char *)__get_dynamic_array(field))
-
-#undef __get_rel_dynamic_array
-#define __get_rel_dynamic_array(field) \
- ((void *)__entry + \
- offsetof(typeof(*__entry), __rel_loc_##field) + \
- sizeof(__entry->__rel_loc_##field) + \
- (__entry->__rel_loc_##field & 0xffff))
-
-#undef __get_rel_dynamic_array_len
-#define __get_rel_dynamic_array_len(field) \
- ((__entry->__rel_loc_##field >> 16) & 0xffff)
-
-#undef __get_rel_str
-#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
-
-#undef __get_bitmask
-#define __get_bitmask(field) \
- ({ \
- void *__bitmask = __get_dynamic_array(field); \
- unsigned int __bitmask_size; \
- __bitmask_size = __get_dynamic_array_len(field); \
- trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
- })
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) \
- ({ \
- void *__bitmask = __get_rel_dynamic_array(field); \
- unsigned int __bitmask_size; \
- __bitmask_size = __get_rel_dynamic_array_len(field); \
- trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
- })
-
-#undef __print_flags
-#define __print_flags(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags __flags[] = \
- { flag_array, { -1, NULL }}; \
- trace_print_flags_seq(p, delim, flag, __flags); \
- })
-
-#undef __print_symbolic
-#define __print_symbolic(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags symbols[] = \
- { symbol_array, { -1, NULL }}; \
- trace_print_symbols_seq(p, value, symbols); \
- })
-
-#undef __print_flags_u64
-#undef __print_symbolic_u64
-#if BITS_PER_LONG == 32
-#define __print_flags_u64(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags_u64 __flags[] = \
- { flag_array, { -1, NULL } }; \
- trace_print_flags_seq_u64(p, delim, flag, __flags); \
- })
-
-#define __print_symbolic_u64(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags_u64 symbols[] = \
- { symbol_array, { -1, NULL } }; \
- trace_print_symbols_seq_u64(p, value, symbols); \
- })
-#else
-#define __print_flags_u64(flag, delim, flag_array...) \
- __print_flags(flag, delim, flag_array)
-
-#define __print_symbolic_u64(value, symbol_array...) \
- __print_symbolic(value, symbol_array)
-#endif
-
-#undef __print_hex
-#define __print_hex(buf, buf_len) \
- trace_print_hex_seq(p, buf, buf_len, false)
-
-#undef __print_hex_str
-#define __print_hex_str(buf, buf_len) \
- trace_print_hex_seq(p, buf, buf_len, true)
-
-#undef __print_array
-#define __print_array(array, count, el_size) \
- ({ \
- BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
- el_size != 4 && el_size != 8); \
- trace_print_array_seq(p, array, count, el_size); \
- })
-
-#undef __print_hex_dump
-#define __print_hex_dump(prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii) \
- trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii)
-
-#undef __print_ns_to_secs
-#define __print_ns_to_secs(value) \
- ({ \
- u64 ____val = (u64)(value); \
- do_div(____val, NSEC_PER_SEC); \
- ____val; \
- })
-
-#undef __print_ns_without_secs
-#define __print_ns_without_secs(value) \
- ({ \
- u64 ____val = (u64)(value); \
- (u32) do_div(____val, NSEC_PER_SEC); \
- })
+#include "stages/stage3_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -479,59 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __field_ext
-#define __field_ext(_type, _item, _filter_type) { \
- .type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
- .is_signed = is_signed_type(_type), .filter_type = _filter_type },
-
-#undef __field_struct_ext
-#define __field_struct_ext(_type, _item, _filter_type) { \
- .type = #_type, .name = #_item, \
- .size = sizeof(_type), .align = __alignof__(_type), \
- 0, .filter_type = _filter_type },
-
-#undef __field
-#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
-
-#undef __field_struct
-#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
-
-#undef __array
-#define __array(_type, _item, _len) { \
- .type = #_type"["__stringify(_len)"]", .name = #_item, \
- .size = sizeof(_type[_len]), .align = __alignof__(_type), \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __dynamic_array
-#define __dynamic_array(_type, _item, _len) { \
- .type = "__data_loc " #_type "[]", .name = #_item, \
- .size = 4, .align = 4, \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __string_len
-#define __string_len(item, src, len) __dynamic_array(char, item, -1)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __rel_dynamic_array
-#define __rel_dynamic_array(_type, _item, _len) { \
- .type = "__rel_loc " #_type "[]", .name = #_item, \
- .size = 4, .align = 4, \
- .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
-
-#undef __rel_string
-#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_string_len
-#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_bitmask
-#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+#include "stages/stage4_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
@@ -544,85 +249,7 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * remember the offset of each array from the beginning of the event.
- */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __item_length = (len) * sizeof(type); \
- __data_offsets->item = __data_size + \
- offsetof(typeof(*entry), __data); \
- __data_offsets->item |= __item_length << 16; \
- __data_size += __item_length;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, \
- strlen((src) ? (const char *)(src) : "(null)") + 1)
-
-#undef __string_len
-#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
-
-#undef __rel_dynamic_array
-#define __rel_dynamic_array(type, item, len) \
- __item_length = (len) * sizeof(type); \
- __data_offsets->item = __data_size + \
- offsetof(typeof(*entry), __data) - \
- offsetof(typeof(*entry), __rel_loc_##item) - \
- sizeof(u32); \
- __data_offsets->item |= __item_length << 16; \
- __data_size += __item_length;
-
-#undef __rel_string
-#define __rel_string(item, src) __rel_dynamic_array(char, item, \
- strlen((src) ? (const char *)(src) : "(null)") + 1)
-
-#undef __rel_string_len
-#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)
-/*
- * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
- * num_possible_cpus().
- */
-#define __bitmask_size_in_bytes_raw(nr_bits) \
- (((nr_bits) + 7) / 8)
-
-#define __bitmask_size_in_longs(nr_bits) \
- ((__bitmask_size_in_bytes_raw(nr_bits) + \
- ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
-
-/*
- * __bitmask_size_in_bytes is the number of bytes needed to hold
- * num_possible_cpus() padded out to the nearest long. This is what
- * is saved in the buffer, just to be consistent.
- */
-#define __bitmask_size_in_bytes(nr_bits) \
- (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
- __bitmask_size_in_longs(nr_bits))
-
-#undef __rel_bitmask
-#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \
- __bitmask_size_in_longs(nr_bits))
+#include "stages/stage5_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -745,88 +372,7 @@ static inline notrace int trace_event_get_offsets_##call( \
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __string_len
-#define __string_len(item, src, len) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __assign_str_len
-#define __assign_str_len(dst, src, len) \
- do { \
- memcpy(__get_str(dst), (src), (len)); \
- __get_str(dst)[len] = '\0'; \
- } while(0)
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef __rel_dynamic_array
-#define __rel_dynamic_array(type, item, len) \
- __entry->__rel_loc_##item = __data_offsets.item;
-
-#undef __rel_string
-#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
-
-#undef __rel_string_len
-#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
-
-#undef __assign_rel_str
-#define __assign_rel_str(dst, src) \
- strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __assign_rel_str_len
-#define __assign_rel_str_len(dst, src, len) \
- do { \
- memcpy(__get_rel_str(dst), (src), (len)); \
- __get_rel_str(dst)[len] = '\0'; \
- } while (0)
-
-#undef __rel_bitmask
-#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
-
-#undef __get_rel_bitmask
-#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
-
-#undef __assign_rel_bitmask
-#define __assign_rel_bitmask(dst, src, nr_bits) \
- memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
+#include "stages/stage6_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -872,36 +418,7 @@ static inline void ftrace_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __print_hex_str
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __get_rel_dynamic_array
-#undef __get_rel_dynamic_array_len
-#undef __get_rel_str
-#undef __get_rel_bitmask
-#undef __print_array
-#undef __print_hex_dump
-
-/*
- * The below is not executed in the kernel. It is only what is
- * displayed in the print format for userspace to parse.
- */
-#undef __print_ns_to_secs
-#define __print_ns_to_secs(val) (val) / 1000000000UL
-
-#undef __print_ns_without_secs
-#define __print_ns_without_secs(val) (val) % 1000000000UL
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+#include "stages/stage7_defines.h"
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \