summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-02-06 08:12:11 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2020-02-06 08:12:11 +0100
commite310396bb8d7db977a0e10ef7b5040e98b89c34c (patch)
tree461357ada35c637292884f0af1dc9fc41c0203ff
parentMerge tag 'io_uring-5.6-2020-02-05' of git://git.kernel.dk/linux-block (diff)
parentbootconfig: Show the number of nodes on boot message (diff)
downloadlinux-e310396bb8d7db977a0e10ef7b5040e98b89c34c.tar.xz
linux-e310396bb8d7db977a0e10ef7b5040e98b89c34c.zip
Merge tag 'trace-v5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: - Added new "bootconfig". This looks for a file appended to initrd to add boot config options, and has been discussed thoroughly at Linux Plumbers. Very useful for adding kprobes at bootup. Only enabled if "bootconfig" is on the real kernel command line. - Created dynamic event creation. Merges common code between creating synthetic events and kprobe events. - Rename perf "ring_buffer" structure to "perf_buffer" - Rename ftrace "ring_buffer" structure to "trace_buffer" Had to rename existing "trace_buffer" to "array_buffer" - Allow trace_printk() to work withing (some) tracing code. - Sort of tracing configs to be a little better organized - Fixed bug where ftrace_graph hash was not being protected properly - Various other small fixes and clean ups * tag 'trace-v5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (88 commits) bootconfig: Show the number of nodes on boot message tools/bootconfig: Show the number of bootconfig nodes bootconfig: Add more parse error messages bootconfig: Use bootconfig instead of boot config ftrace: Protect ftrace_graph_hash with ftrace_sync ftrace: Add comment to why rcu_dereference_sched() is open coded tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu tracing: Annotate ftrace_graph_hash pointer with __rcu bootconfig: Only load bootconfig if "bootconfig" is on the kernel cmdline tracing: Use seq_buf for building dynevent_cmd string tracing: Remove useless code in dynevent_arg_pair_add() tracing: Remove check_arg() callbacks from dynevent args tracing: Consolidate some synth_event_trace code tracing: Fix now invalid var_ref_vals assumption in trace action tracing: Change trace_boot to use synth_event interface tracing: Move tracing selftests to bottom of menu tracing: Move mmio tracer config up with the other tracers tracing: Move tracing test module configs together tracing: Move all function tracing configs together tracing: Documentation for in-kernel synthetic event API ...
-rw-r--r--Documentation/admin-guide/bootconfig.rst190
-rw-r--r--Documentation/admin-guide/index.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/trace/boottime-trace.rst184
-rw-r--r--Documentation/trace/events.rst515
-rw-r--r--Documentation/trace/index.rst1
-rw-r--r--Documentation/trace/kprobetrace.rst1
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/bootconfig.c89
-rw-r--r--include/linux/bootconfig.h224
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/ring_buffer.h110
-rw-r--r--include/linux/trace_events.h131
-rw-r--r--include/trace/trace_events.h11
-rw-r--r--init/Kconfig14
-rw-r--r--init/main.c229
-rw-r--r--kernel/events/core.c42
-rw-r--r--kernel/events/internal.h34
-rw-r--r--kernel/events/ring_buffer.c54
-rw-r--r--kernel/trace/Kconfig360
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--kernel/trace/blktrace.c8
-rw-r--r--kernel/trace/ftrace.c37
-rw-r--r--kernel/trace/kprobe_event_gen_test.c225
-rw-r--r--kernel/trace/ring_buffer.c135
-rw-r--r--kernel/trace/ring_buffer_benchmark.c2
-rw-r--r--kernel/trace/synth_event_gen_test.c523
-rw-r--r--kernel/trace/trace.c453
-rw-r--r--kernel/trace/trace.h98
-rw-r--r--kernel/trace/trace_boot.c334
-rw-r--r--kernel/trace/trace_branch.c6
-rw-r--r--kernel/trace/trace_dynevent.c212
-rw-r--r--kernel/trace/trace_dynevent.h32
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_events.c106
-rw-r--r--kernel/trace/trace_events_hist.c1030
-rw-r--r--kernel/trace/trace_events_trigger.c7
-rw-r--r--kernel/trace/trace_functions.c8
-rw-r--r--kernel/trace/trace_functions_graph.c14
-rw-r--r--kernel/trace/trace_hwlat.c2
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_kdb.c8
-rw-r--r--kernel/trace/trace_kprobe.c238
-rw-r--r--kernel/trace/trace_mmiotrace.c12
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c20
-rw-r--r--kernel/trace/trace_selftest.c26
-rw-r--r--kernel/trace/trace_seq.c3
-rw-r--r--kernel/trace/trace_stat.c31
-rw-r--r--kernel/trace/trace_syscalls.c8
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bootconfig.c814
-rw-r--r--tools/Makefile11
-rw-r--r--tools/bootconfig/.gitignore1
-rw-r--r--tools/bootconfig/Makefile23
-rw-r--r--tools/bootconfig/include/linux/bootconfig.h7
-rw-r--r--tools/bootconfig/include/linux/bug.h12
-rw-r--r--tools/bootconfig/include/linux/ctype.h7
-rw-r--r--tools/bootconfig/include/linux/errno.h7
-rw-r--r--tools/bootconfig/include/linux/kernel.h18
-rw-r--r--tools/bootconfig/include/linux/printk.h17
-rw-r--r--tools/bootconfig/include/linux/string.h32
-rw-r--r--tools/bootconfig/main.c354
-rw-r--r--tools/bootconfig/samples/bad-array-space-comment.bconf5
-rw-r--r--tools/bootconfig/samples/bad-array.bconf2
-rw-r--r--tools/bootconfig/samples/bad-dotword.bconf4
-rw-r--r--tools/bootconfig/samples/bad-empty.bconf1
-rw-r--r--tools/bootconfig/samples/bad-keyerror.bconf2
-rw-r--r--tools/bootconfig/samples/bad-longkey.bconf1
-rw-r--r--tools/bootconfig/samples/bad-manywords.bconf1
-rw-r--r--tools/bootconfig/samples/bad-no-keyword.bconf2
-rw-r--r--tools/bootconfig/samples/bad-nonprintable.bconf2
-rw-r--r--tools/bootconfig/samples/bad-spaceword.bconf2
-rw-r--r--tools/bootconfig/samples/bad-tree.bconf5
-rw-r--r--tools/bootconfig/samples/bad-value.bconf3
-rw-r--r--tools/bootconfig/samples/escaped.bconf3
-rw-r--r--tools/bootconfig/samples/good-array-space-comment.bconf4
-rw-r--r--tools/bootconfig/samples/good-comment-after-value.bconf1
-rw-r--r--tools/bootconfig/samples/good-printables.bconf2
-rw-r--r--tools/bootconfig/samples/good-simple.bconf11
-rw-r--r--tools/bootconfig/samples/good-single.bconf4
-rw-r--r--tools/bootconfig/samples/good-space-after-value.bconf1
-rw-r--r--tools/bootconfig/samples/good-tree.bconf12
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh105
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc32
90 files changed, 6490 insertions, 836 deletions
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
new file mode 100644
index 000000000000..b342a6796392
--- /dev/null
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -0,0 +1,190 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _bootconfig:
+
+==================
+Boot Configuration
+==================
+
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
+
+Overview
+========
+
+The boot configuration expands the current kernel command line to support
+additional key-value data when booting the kernel in an efficient way.
+This allows administrators to pass a structured-Key config file.
+
+Config File Syntax
+==================
+
+The boot config syntax is a simple structured key-value. Each key consists
+of dot-connected-words, and key and value are connected by ``=``. The value
+has to be terminated by semi-colon (``;``) or newline (``\n``).
+For array value, array entries are separated by comma (``,``). ::
+
+KEY[.WORD[...]] = VALUE[, VALUE2[...]][;]
+
+Unlike the kernel command line syntax, spaces are OK around the comma and ``=``.
+
+Each key word must contain only alphabets, numbers, dash (``-``) or underscore
+(``_``). And each value only contains printable characters or spaces except
+for delimiters such as semi-colon (``;``), new-line (``\n``), comma (``,``),
+hash (``#``) and closing brace (``}``).
+
+If you want to use those delimiters in a value, you can use either double-
+quotes (``"VALUE"``) or single-quotes (``'VALUE'``) to quote it. Note that
+you can not escape these quotes.
+
+There can be a key which doesn't have value or has an empty value. Those keys
+are used for checking if the key exists or not (like a boolean).
+
+Key-Value Syntax
+----------------
+
+The boot config file syntax allows user to merge partially same word keys
+by brace. For example::
+
+ foo.bar.baz = value1
+ foo.bar.qux.quux = value2
+
+These can be written also in::
+
+ foo.bar {
+ baz = value1
+ qux.quux = value2
+ }
+
+Or more shorter, written as following::
+
+ foo.bar { baz = value1; qux.quux = value2 }
+
+In both styles, same key words are automatically merged when parsing it
+at boot time. So you can append similar trees or key-values.
+
+Comments
+--------
+
+The config syntax accepts shell-script style comments. The comments starting
+with hash ("#") until newline ("\n") will be ignored.
+
+::
+
+ # comment line
+ foo = value # value is set to foo.
+ bar = 1, # 1st element
+ 2, # 2nd element
+ 3 # 3rd element
+
+This is parsed as below::
+
+ foo = value
+ bar = 1, 2, 3
+
+Note that you can not put a comment between value and delimiter(``,`` or
+``;``). This means following config has a syntax error ::
+
+ key = 1 # comment
+ ,2
+
+
+/proc/bootconfig
+================
+
+/proc/bootconfig is a user-space interface of the boot config.
+Unlike /proc/cmdline, this file shows the key-value style list.
+Each key-value pair is shown in each line with following style::
+
+ KEY[.WORDS...] = "[VALUE]"[,"VALUE2"...]
+
+
+Boot Kernel With a Boot Config
+==============================
+
+Since the boot configuration file is loaded with initrd, it will be added
+to the end of the initrd (initramfs) image file. The Linux kernel decodes
+the last part of the initrd image in memory to get the boot configuration
+data.
+Because of this "piggyback" method, there is no need to change or
+update the boot loader and the kernel image itself.
+
+To do this operation, Linux kernel provides "bootconfig" command under
+tools/bootconfig, which allows admin to apply or delete the config file
+to/from initrd image. You can build it by the following command::
+
+ # make -C tools/bootconfig
+
+To add your boot config file to initrd image, run bootconfig as below
+(Old data is removed automatically if exists)::
+
+ # tools/bootconfig/bootconfig -a your-config /boot/initrd.img-X.Y.Z
+
+To remove the config from the image, you can use -d option as below::
+
+ # tools/bootconfig/bootconfig -d /boot/initrd.img-X.Y.Z
+
+Then add "bootconfig" on the normal kernel command line to tell the
+kernel to look for the bootconfig at the end of the initrd file.
+
+Config File Limitation
+======================
+
+Currently the maximum config size size is 32KB and the total key-words (not
+key-value entries) must be under 1024 nodes.
+Note: this is not the number of entries but nodes, an entry must consume
+more than 2 nodes (a key-word and a value). So theoretically, it will be
+up to 512 key-value pairs. If keys contains 3 words in average, it can
+contain 256 key-value pairs. In most cases, the number of config items
+will be under 100 entries and smaller than 8KB, so it would be enough.
+If the node number exceeds 1024, parser returns an error even if the file
+size is smaller than 32KB.
+Anyway, since bootconfig command verifies it when appending a boot config
+to initrd image, user can notice it before boot.
+
+
+Bootconfig APIs
+===============
+
+User can query or loop on key-value pairs, also it is possible to find
+a root (prefix) key node and find key-values under that node.
+
+If you have a key string, you can query the value directly with the key
+using xbc_find_value(). If you want to know what keys exist in the boot
+config, you can use xbc_for_each_key_value() to iterate key-value pairs.
+Note that you need to use xbc_array_for_each_value() for accessing
+each array's value, e.g.::
+
+ vnode = NULL;
+ xbc_find_value("key.word", &vnode);
+ if (vnode && xbc_node_is_array(vnode))
+ xbc_array_for_each_value(vnode, value) {
+ printk("%s ", value);
+ }
+
+If you want to focus on keys which have a prefix string, you can use
+xbc_find_node() to find a node by the prefix string, and iterate
+keys under the prefix node with xbc_node_for_each_key_value().
+
+But the most typical usage is to get the named value under prefix
+or get the named array under prefix as below::
+
+ root = xbc_find_node("key.prefix");
+ value = xbc_node_find_value(root, "option", &vnode);
+ ...
+ xbc_node_for_each_array_value(root, "array-option", value, anode) {
+ ...
+ }
+
+This accesses a value of "key.prefix.option" and an array of
+"key.prefix.array-option".
+
+Locking is not needed, since after initialization, the config becomes
+read-only. All data and keys must be copied if you need to modify it.
+
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/bootconfig.h
+.. kernel-doc:: lib/bootconfig.c
+
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 4433f3929481..f1d0ccffbe72 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -64,6 +64,7 @@ configure specific aspects of kernel behavior to your liking.
binderfs
binfmt-misc
blockdev/index
+ bootconfig
braille-console
btmrvl
cgroup-v1/index
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index ddc5ccdd4cd1..dbc22d684627 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -437,6 +437,12 @@
no delay (0).
Format: integer
+ bootconfig [KNL]
+ Extended command line options can be added to an initrd
+ and this will cause the kernel to look for it.
+
+ See Documentation/admin-guide/bootconfig.rst
+
bert_disable [ACPI]
Disable BERT OS support on buggy BIOSes.
diff --git a/Documentation/trace/boottime-trace.rst b/Documentation/trace/boottime-trace.rst
new file mode 100644
index 000000000000..dcb390075ca1
--- /dev/null
+++ b/Documentation/trace/boottime-trace.rst
@@ -0,0 +1,184 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Boot-time tracing
+=================
+
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
+
+Overview
+========
+
+Boot-time tracing allows users to trace boot-time process including
+device initialization with full features of ftrace including per-event
+filter and actions, histograms, kprobe-events and synthetic-events,
+and trace instances.
+Since kernel command line is not enough to control these complex features,
+this uses bootconfig file to describe tracing feature programming.
+
+Options in the Boot Config
+==========================
+
+Here is the list of available options list for boot time tracing in
+boot config file [1]_. All options are under "ftrace." or "kernel."
+prefix. See kernel parameters for the options which starts
+with "kernel." prefix [2]_.
+
+.. [1] See :ref:`Documentation/admin-guide/bootconfig.rst <bootconfig>`
+.. [2] See :ref:`Documentation/admin-guide/kernel-parameters.rst <kernelparameters>`
+
+Ftrace Global Options
+---------------------
+
+Ftrace global options have "kernel." prefix in boot config, which means
+these options are passed as a part of kernel legacy command line.
+
+kernel.tp_printk
+ Output trace-event data on printk buffer too.
+
+kernel.dump_on_oops [= MODE]
+ Dump ftrace on Oops. If MODE = 1 or omitted, dump trace buffer
+ on all CPUs. If MODE = 2, dump a buffer on a CPU which kicks Oops.
+
+kernel.traceoff_on_warning
+ Stop tracing if WARN_ON() occurs.
+
+kernel.fgraph_max_depth = MAX_DEPTH
+ Set MAX_DEPTH to maximum depth of fgraph tracer.
+
+kernel.fgraph_filters = FILTER[, FILTER2...]
+ Add fgraph tracing function filters.
+
+kernel.fgraph_notraces = FILTER[, FILTER2...]
+ Add fgraph non-tracing function filters.
+
+
+Ftrace Per-instance Options
+---------------------------
+
+These options can be used for each instance including global ftrace node.
+
+ftrace.[instance.INSTANCE.]options = OPT1[, OPT2[...]]
+ Enable given ftrace options.
+
+ftrace.[instance.INSTANCE.]trace_clock = CLOCK
+ Set given CLOCK to ftrace's trace_clock.
+
+ftrace.[instance.INSTANCE.]buffer_size = SIZE
+ Configure ftrace buffer size to SIZE. You can use "KB" or "MB"
+ for that SIZE.
+
+ftrace.[instance.INSTANCE.]alloc_snapshot
+ Allocate snapshot buffer.
+
+ftrace.[instance.INSTANCE.]cpumask = CPUMASK
+ Set CPUMASK as trace cpu-mask.
+
+ftrace.[instance.INSTANCE.]events = EVENT[, EVENT2[...]]
+ Enable given events on boot. You can use a wild card in EVENT.
+
+ftrace.[instance.INSTANCE.]tracer = TRACER
+ Set TRACER to current tracer on boot. (e.g. function)
+
+ftrace.[instance.INSTANCE.]ftrace.filters
+ This will take an array of tracing function filter rules.
+
+ftrace.[instance.INSTANCE.]ftrace.notraces
+ This will take an array of NON-tracing function filter rules.
+
+
+Ftrace Per-Event Options
+------------------------
+
+These options are setting per-event options.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.enable
+ Enable GROUP:EVENT tracing.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.filter = FILTER
+ Set FILTER rule to the GROUP:EVENT.
+
+ftrace.[instance.INSTANCE.]event.GROUP.EVENT.actions = ACTION[, ACTION2[...]]
+ Set ACTIONs to the GROUP:EVENT.
+
+ftrace.[instance.INSTANCE.]event.kprobes.EVENT.probes = PROBE[, PROBE2[...]]
+ Defines new kprobe event based on PROBEs. It is able to define
+ multiple probes on one event, but those must have same type of
+ arguments. This option is available only for the event which
+ group name is "kprobes".
+
+ftrace.[instance.INSTANCE.]event.synthetic.EVENT.fields = FIELD[, FIELD2[...]]
+ Defines new synthetic event with FIELDs. Each field should be
+ "type varname".
+
+Note that kprobe and synthetic event definitions can be written under
+instance node, but those are also visible from other instances. So please
+take care for event name conflict.
+
+
+Examples
+========
+
+For example, to add filter and actions for each event, define kprobe
+events, and synthetic events with histogram, write a boot config like
+below::
+
+ ftrace.event {
+ task.task_newtask {
+ filter = "pid < 128"
+ enable
+ }
+ kprobes.vfs_read {
+ probes = "vfs_read $arg1 $arg2"
+ filter = "common_pid < 200"
+ enable
+ }
+ synthetic.initcall_latency {
+ fields = "unsigned long func", "u64 lat"
+ actions = "hist:keys=func.sym,lat:vals=lat:sort=lat"
+ }
+ initcall.initcall_start {
+ actions = "hist:keys=func:ts0=common_timestamp.usecs"
+ }
+ initcall.initcall_finish {
+ actions = "hist:keys=func:lat=common_timestamp.usecs-$ts0:onmatch(initcall.initcall_start).initcall_latency(func,$lat)"
+ }
+ }
+
+Also, boot-time tracing supports "instance" node, which allows us to run
+several tracers for different purpose at once. For example, one tracer
+is for tracing functions starting with "user\_", and others tracing
+"kernel\_" functions, you can write boot config as below::
+
+ ftrace.instance {
+ foo {
+ tracer = "function"
+ ftrace.filters = "user_*"
+ }
+ bar {
+ tracer = "function"
+ ftrace.filters = "kernel_*"
+ }
+ }
+
+The instance node also accepts event nodes so that each instance
+can customize its event tracing.
+
+This boot-time tracing also supports ftrace kernel parameters via boot
+config.
+For example, following kernel parameters::
+
+ trace_options=sym-addr trace_event=initcall:* tp_printk trace_buf_size=1M ftrace=function ftrace_filter="vfs*"
+
+This can be written in boot config like below::
+
+ kernel {
+ trace_options = sym-addr
+ trace_event = "initcall:*"
+ tp_printk
+ trace_buf_size = 1M
+ ftrace = function
+ ftrace_filter = "vfs*"
+ }
+
+Note that parameters start with "kernel" prefix instead of "ftrace".
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index f7e1fcc0953c..ed79b220bd07 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -525,3 +525,518 @@ The following commands are supported:
event counts (hitcount).
See Documentation/trace/histogram.rst for details and examples.
+
+6.3 In-kernel trace event API
+-----------------------------
+
+In most cases, the command-line interface to trace events is more than
+sufficient. Sometimes, however, applications might find the need for
+more complex relationships than can be expressed through a simple
+series of linked command-line expressions, or putting together sets of
+commands may be simply too cumbersome. An example might be an
+application that needs to 'listen' to the trace stream in order to
+maintain an in-kernel state machine detecting, for instance, when an
+illegal kernel state occurs in the scheduler.
+
+The trace event subsystem provides an in-kernel API allowing modules
+or other kernel code to generate user-defined 'synthetic' events at
+will, which can be used to either augment the existing trace stream
+and/or signal that a particular important state has occurred.
+
+A similar in-kernel API is also available for creating kprobe and
+kretprobe events.
+
+Both the synthetic event and k/ret/probe event APIs are built on top
+of a lower-level "dynevent_cmd" event command API, which is also
+available for more specialized applications, or as the basis of other
+higher-level trace event APIs.
+
+The API provided for these purposes is describe below and allows the
+following:
+
+ - dynamically creating synthetic event definitions
+ - dynamically creating kprobe and kretprobe event definitions
+ - tracing synthetic events from in-kernel code
+ - the low-level "dynevent_cmd" API
+
+6.3.1 Dyamically creating synthetic event definitions
+-----------------------------------------------------
+
+There are a couple ways to create a new synthetic event from a kernel
+module or other kernel code.
+
+The first creates the event in one step, using synth_event_create().
+In this method, the name of the event to create and an array defining
+the fields is supplied to synth_event_create(). If successful, a
+synthetic event with that name and fields will exist following that
+call. For example, to create a new "schedtest" synthetic event:
+
+ ret = synth_event_create("schedtest", sched_fields,
+ ARRAY_SIZE(sched_fields), THIS_MODULE);
+
+The sched_fields param in this example points to an array of struct
+synth_field_desc, each of which describes an event field by type and
+name:
+
+ static struct synth_field_desc sched_fields[] = {
+ { .type = "pid_t", .name = "next_pid_field" },
+ { .type = "char[16]", .name = "next_comm_field" },
+ { .type = "u64", .name = "ts_ns" },
+ { .type = "u64", .name = "ts_ms" },
+ { .type = "unsigned int", .name = "cpu" },
+ { .type = "char[64]", .name = "my_string_field" },
+ { .type = "int", .name = "my_int_field" },
+ };
+
+See synth_field_size() for available types. If field_name contains [n]
+the field is considered to be an array.
+
+If the event is created from within a module, a pointer to the module
+must be passed to synth_event_create(). This will ensure that the
+trace buffer won't contain unreadable events when the module is
+removed.
+
+At this point, the event object is ready to be used for generating new
+events.
+
+In the second method, the event is created in several steps. This
+allows events to be created dynamically and without the need to create
+and populate an array of fields beforehand.
+
+To use this method, an empty or partially empty synthetic event should
+first be created using synth_event_gen_cmd_start() or
+synth_event_gen_cmd_array_start(). For synth_event_gen_cmd_start(),
+the name of the event along with one or more pairs of args each pair
+representing a 'type field_name;' field specification should be
+supplied. For synth_event_gen_cmd_array_start(), the name of the
+event along with an array of struct synth_field_desc should be
+supplied. Before calling synth_event_gen_cmd_start() or
+synth_event_gen_cmd_array_start(), the user should create and
+initialize a dynevent_cmd object using synth_event_cmd_init().
+
+For example, to create a new "schedtest" synthetic event with two
+fields:
+
+ struct dynevent_cmd cmd;
+ char *buf;
+
+ /* Create a buffer to hold the generated command */
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+ /* Before generating the command, initialize the cmd object */
+ synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ ret = synth_event_gen_cmd_start(&cmd, "schedtest", THIS_MODULE,
+ "pid_t", "next_pid_field",
+ "u64", "ts_ns");
+
+Alternatively, using an array of struct synth_field_desc fields
+containing the same information:
+
+ ret = synth_event_gen_cmd_array_start(&cmd, "schedtest", THIS_MODULE,
+ fields, n_fields);
+
+Once the synthetic event object has been created, it can then be
+populated with more fields. Fields are added one by one using
+synth_event_add_field(), supplying the dynevent_cmd object, a field
+type, and a field name. For example, to add a new int field named
+"intfield", the following call should be made:
+
+ ret = synth_event_add_field(&cmd, "int", "intfield");
+
+See synth_field_size() for available types. If field_name contains [n]
+the field is considered to be an array.
+
+A group of fields can also be added all at once using an array of
+synth_field_desc with add_synth_fields(). For example, this would add
+just the first four sched_fields:
+
+ ret = synth_event_add_fields(&cmd, sched_fields, 4);
+
+If you already have a string of the form 'type field_name',
+synth_event_add_field_str() can be used to add it as-is; it will
+also automatically append a ';' to the string.
+
+Once all the fields have been added, the event should be finalized and
+registered by calling the synth_event_gen_cmd_end() function:
+
+ ret = synth_event_gen_cmd_end(&cmd);
+
+At this point, the event object is ready to be used for tracing new
+events.
+
+6.3.3 Tracing synthetic events from in-kernel code
+--------------------------------------------------
+
+To trace a synthetic event, there are several options. The first
+option is to trace the event in one call, using synth_event_trace()
+with a variable number of values, or synth_event_trace_array() with an
+array of values to be set. A second option can be used to avoid the
+need for a pre-formed array of values or list of arguments, via
+synth_event_trace_start() and synth_event_trace_end() along with
+synth_event_add_next_val() or synth_event_add_val() to add the values
+piecewise.
+
+6.3.3.1 Tracing a synthetic event all at once
+---------------------------------------------
+
+To trace a synthetic event all at once, the synth_event_trace() or
+synth_event_trace_array() functions can be used.
+
+The synth_event_trace() function is passed the trace_event_file
+representing the synthetic event (which can be retrieved using
+trace_get_event_file() using the synthetic event name, "synthetic" as
+the system name, and the trace instance name (NULL if using the global
+trace array)), along with an variable number of u64 args, one for each
+synthetic event field, and the number of values being passed.
+
+So, to trace an event corresponding to the synthetic event definition
+above, code like the following could be used:
+
+ ret = synth_event_trace(create_synth_test, 7, /* number of values */
+ 444, /* next_pid_field */
+ (u64)"clackers", /* next_comm_field */
+ 1000000, /* ts_ns */
+ 1000, /* ts_ms */
+ smp_processor_id(),/* cpu */
+ (u64)"Thneed", /* my_string_field */
+ 999); /* my_int_field */
+
+All vals should be cast to u64, and string vals are just pointers to
+strings, cast to u64. Strings will be copied into space reserved in
+the event for the string, using these pointers.
+
+Alternatively, the synth_event_trace_array() function can be used to
+accomplish the same thing. It is passed the trace_event_file
+representing the synthetic event (which can be retrieved using
+trace_get_event_file() using the synthetic event name, "synthetic" as
+the system name, and the trace instance name (NULL if using the global
+trace array)), along with an array of u64, one for each synthetic
+event field.
+
+To trace an event corresponding to the synthetic event definition
+above, code like the following could be used:
+
+ u64 vals[7];
+
+ vals[0] = 777; /* next_pid_field */
+ vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
+ vals[2] = 1000000; /* ts_ns */
+ vals[3] = 1000; /* ts_ms */
+ vals[4] = smp_processor_id(); /* cpu */
+ vals[5] = (u64)"thneed"; /* my_string_field */
+ vals[6] = 398; /* my_int_field */
+
+The 'vals' array is just an array of u64, the number of which must
+match the number of field in the synthetic event, and which must be in
+the same order as the synthetic event fields.
+
+All vals should be cast to u64, and string vals are just pointers to
+strings, cast to u64. Strings will be copied into space reserved in
+the event for the string, using these pointers.
+
+In order to trace a synthetic event, a pointer to the trace event file
+is needed. The trace_get_event_file() function can be used to get
+it - it will find the file in the given trace instance (in this case
+NULL since the top trace array is being used) while at the same time
+preventing the instance containing it from going away:
+
+ schedtest_event_file = trace_get_event_file(NULL, "synthetic",
+ "schedtest");
+
+Before tracing the event, it should be enabled in some way, otherwise
+the synthetic event won't actually show up in the trace buffer.
+
+To enable a synthetic event from the kernel, trace_array_set_clr_event()
+can be used (which is not specific to synthetic events, so does need
+the "synthetic" system name to be specified explicitly).
+
+To enable the event, pass 'true' to it:
+
+ trace_array_set_clr_event(schedtest_event_file->tr,
+ "synthetic", "schedtest", true);
+
+To disable it pass false:
+
+ trace_array_set_clr_event(schedtest_event_file->tr,
+ "synthetic", "schedtest", false);
+
+Finally, synth_event_trace_array() can be used to actually trace the
+event, which should be visible in the trace buffer afterwards:
+
+ ret = synth_event_trace_array(schedtest_event_file, vals,
+ ARRAY_SIZE(vals));
+
+To remove the synthetic event, the event should be disabled, and the
+trace instance should be 'put' back using trace_put_event_file():
+
+ trace_array_set_clr_event(schedtest_event_file->tr,
+ "synthetic", "schedtest", false);
+ trace_put_event_file(schedtest_event_file);
+
+If those have been successful, synth_event_delete() can be called to
+remove the event:
+
+ ret = synth_event_delete("schedtest");
+
+6.3.3.1 Tracing a synthetic event piecewise
+-------------------------------------------
+
+To trace a synthetic using the piecewise method described above, the
+synth_event_trace_start() function is used to 'open' the synthetic
+event trace:
+
+ struct synth_trace_state trace_state;
+
+ ret = synth_event_trace_start(schedtest_event_file, &trace_state);
+
+It's passed the trace_event_file representing the synthetic event
+using the same methods as described above, along with a pointer to a
+struct synth_trace_state object, which will be zeroed before use and
+used to maintain state between this and following calls.
+
+Once the event has been opened, which means space for it has been
+reserved in the trace buffer, the individual fields can be set. There
+are two ways to do that, either one after another for each field in
+the event, which requires no lookups, or by name, which does. The
+tradeoff is flexibility in doing the assignments vs the cost of a
+lookup per field.
+
+To assign the values one after the other without lookups,
+synth_event_add_next_val() should be used. Each call is passed the
+same synth_trace_state object used in the synth_event_trace_start(),
+along with the value to set the next field in the event. After each
+field is set, the 'cursor' points to the next field, which will be set
+by the subsequent call, continuing until all the fields have been set
+in order. The same sequence of calls as in the above examples using
+this method would be (without error-handling code):
+
+ /* next_pid_field */
+ ret = synth_event_add_next_val(777, &trace_state);
+
+ /* next_comm_field */
+ ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+
+ /* ts_ns */
+ ret = synth_event_add_next_val(1000000, &trace_state);
+
+ /* ts_ms */
+ ret = synth_event_add_next_val(1000, &trace_state);
+
+ /* cpu */
+ ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+
+ /* my_string_field */
+ ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+
+ /* my_int_field */
+ ret = synth_event_add_next_val(395, &trace_state);
+
+To assign the values in any order, synth_event_add_val() should be
+used. Each call is passed the same synth_trace_state object used in
+the synth_event_trace_start(), along with the field name of the field
+to set and the value to set it to. The same sequence of calls as in
+the above examples using this method would be (without error-handling
+code):
+
+ ret = synth_event_add_val("next_pid_field", 777, &trace_state);
+ ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+ &trace_state);
+ ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
+ ret = synth_event_add_val("ts_ms", 1000, &trace_state);
+ ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+ ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+ &trace_state);
+ ret = synth_event_add_val("my_int_field", 3999, &trace_state);
+
+Note that synth_event_add_next_val() and synth_event_add_val() are
+incompatible if used within the same trace of an event - either one
+can be used but not both at the same time.
+
+Finally, the event won't be actually traced until it's 'closed',
+which is done using synth_event_trace_end(), which takes only the
+struct synth_trace_state object used in the previous calls:
+
+ ret = synth_event_trace_end(&trace_state);
+
+Note that synth_event_trace_end() must be called at the end regardless
+of whether any of the add calls failed (say due to a bad field name
+being passed in).
+
+6.3.4 Dyamically creating kprobe and kretprobe event definitions
+----------------------------------------------------------------
+
+To create a kprobe or kretprobe trace event from kernel code, the
+kprobe_event_gen_cmd_start() or kretprobe_event_gen_cmd_start()
+functions can be used.
+
+To create a kprobe event, an empty or partially empty kprobe event
+should first be created using kprobe_event_gen_cmd_start(). The name
+of the event and the probe location should be specfied along with one
+or args each representing a probe field should be supplied to this
+function. Before calling kprobe_event_gen_cmd_start(), the user
+should create and initialize a dynevent_cmd object using
+kprobe_event_cmd_init().
+
+For example, to create a new "schedtest" kprobe event with two fields:
+
+ struct dynevent_cmd cmd;
+ char *buf;
+
+ /* Create a buffer to hold the generated command */
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+ /* Before generating the command, initialize the cmd object */
+ kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ /*
+ * Define the gen_kprobe_test event with the first 2 kprobe
+ * fields.
+ */
+ ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test", "do_sys_open",
+ "dfd=%ax", "filename=%dx");
+
+Once the kprobe event object has been created, it can then be
+populated with more fields. Fields can be added using
+kprobe_event_add_fields(), supplying the dynevent_cmd object along
+with a variable arg list of probe fields. For example, to add a
+couple additional fields, the following call could be made:
+
+ ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+
+Once all the fields have been added, the event should be finalized and
+registered by calling the kprobe_event_gen_cmd_end() or
+kretprobe_event_gen_cmd_end() functions, depending on whether a kprobe
+or kretprobe command was started:
+
+ ret = kprobe_event_gen_cmd_end(&cmd);
+
+or
+
+ ret = kretprobe_event_gen_cmd_end(&cmd);
+
+At this point, the event object is ready to be used for tracing new
+events.
+
+Similarly, a kretprobe event can be created using
+kretprobe_event_gen_cmd_start() with a probe name and location and
+additional params such as $retval:
+
+ ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
+ "do_sys_open", "$retval");
+
+Similar to the synthetic event case, code like the following can be
+used to enable the newly created kprobe event:
+
+ gen_kprobe_test = trace_get_event_file(NULL, "kprobes", "gen_kprobe_test");
+
+ ret = trace_array_set_clr_event(gen_kprobe_test->tr,
+ "kprobes", "gen_kprobe_test", true);
+
+Finally, also similar to synthetic events, the following code can be
+used to give the kprobe event file back and delete the event:
+
+ trace_put_event_file(gen_kprobe_test);
+
+ ret = kprobe_event_delete("gen_kprobe_test");
+
+6.3.4 The "dynevent_cmd" low-level API
+--------------------------------------
+
+Both the in-kernel synthetic event and kprobe interfaces are built on
+top of a lower-level "dynevent_cmd" interface. This interface is
+meant to provide the basis for higher-level interfaces such as the
+synthetic and kprobe interfaces, which can be used as examples.
+
+The basic idea is simple and amounts to providing a general-purpose
+layer that can be used to generate trace event commands. The
+generated command strings can then be passed to the command-parsing
+and event creation code that already exists in the trace event
+subystem for creating the corresponding trace events.
+
+In a nutshell, the way it works is that the higher-level interface
+code creates a struct dynevent_cmd object, then uses a couple
+functions, dynevent_arg_add() and dynevent_arg_pair_add() to build up
+a command string, which finally causes the command to be executed
+using the dynevent_create() function. The details of the interface
+are described below.
+
+The first step in building a new command string is to create and
+initialize an instance of a dynevent_cmd. Here, for instance, we
+create a dynevent_cmd on the stack and initialize it:
+
+ struct dynevent_cmd cmd;
+ char *buf;
+ int ret;
+
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+
+ dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_FOO,
+ foo_event_run_command);
+
+The dynevent_cmd initialization needs to be given a user-specified
+buffer and the length of the buffer (MAX_DYNEVENT_CMD_LEN can be used
+for this purpose - at 2k it's generally too big to be comfortably put
+on the stack, so is dynamically allocated), a dynevent type id, which
+is meant to be used to check that further API calls are for the
+correct command type, and a pointer to an event-specific run_command()
+callback that will be called to actually execute the event-specific
+command function.
+
+Once that's done, the command string can by built up by successive
+calls to argument-adding functions.
+
+To add a single argument, define and initialize a struct dynevent_arg
+or struct dynevent_arg_pair object. Here's an example of the simplest
+possible arg addition, which is simply to append the given string as
+a whitespace-separated argument to the command:
+
+ struct dynevent_arg arg;
+
+ dynevent_arg_init(&arg, NULL, 0);
+
+ arg.str = name;
+
+ ret = dynevent_arg_add(cmd, &arg);
+
+The arg object is first initialized using dynevent_arg_init() and in
+this case the parameters are NULL or 0, which means there's no
+optional sanity-checking function or separator appended to the end of
+the arg.
+
+Here's another more complicated example using an 'arg pair', which is
+used to create an argument that consists of a couple components added
+together as a unit, for example, a 'type field_name;' arg or a simple
+expression arg e.g. 'flags=%cx':
+
+ struct dynevent_arg_pair arg_pair;
+
+ dynevent_arg_pair_init(&arg_pair, dynevent_foo_check_arg_fn, 0, ';');
+
+ arg_pair.lhs = type;
+ arg_pair.rhs = name;
+
+ ret = dynevent_arg_pair_add(cmd, &arg_pair);
+
+Again, the arg_pair is first initialized, in this case with a callback
+function used to check the sanity of the args (for example, that
+neither part of the pair is NULL), along with a character to be used
+to add an operator between the pair (here none) and a separator to be
+appended onto the end of the arg pair (here ';').
+
+There's also a dynevent_str_add() function that can be used to simply
+add a string as-is, with no spaces, delimeters, or arg check.
+
+Any number of dynevent_*_add() calls can be made to build up the string
+(until its length surpasses cmd->maxlen). When all the arguments have
+been added and the command string is complete, the only thing left to
+do is run the command, which happens by simply calling
+dynevent_create():
+
+ ret = dynevent_create(&cmd);
+
+At that point, if the return value is 0, the dynamic event has been
+created and is ready to use.
+
+See the dynevent_cmd function definitions themselves for the details
+of the API.
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index 04acd277c5f6..fa9e1c730f6a 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -19,6 +19,7 @@ Linux Tracing Technologies
events-msr
mmiotrace
histogram
+ boottime-trace
hwlat_detector
intel_th
stm
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index 55993055902c..705d73087099 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -97,6 +97,7 @@ which shows given pointer in "symbol+offset" style.
For $comm, the default type is "string"; any other type is invalid.
.. _user_mem_access:
+
User Memory Access
------------------
Kprobe events supports user-space memory access. For that purpose, you can use
diff --git a/MAINTAINERS b/MAINTAINERS
index 777f87ac2551..5c87b133dfa6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15934,6 +15934,15 @@ S: Supported
F: Documentation/networking/device_drivers/stmicro/
F: drivers/net/ethernet/stmicro/stmmac/
+EXTRA BOOT CONFIG
+M: Masami Hiramatsu <mhiramat@kernel.org>
+S: Maintained
+F: lib/bootconfig.c
+F: fs/proc/bootconfig.c
+F: include/linux/bootconfig.h
+F: tools/bootconfig/*
+F: Documentation/admin-guide/bootconfig.rst
+
SUN3/3X
M: Sam Creasey <sammy@sammy.net>
W: http://sammy.net/sun3/
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index eda2633a393d..9210a95cb4e6 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -32,7 +32,7 @@
#define OP_BUFFER_FLAGS 0
-static struct ring_buffer *op_ring_buffer;
+static struct trace_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index ead487e80510..bd08616ed8ba 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -33,3 +33,4 @@ proc-$(CONFIG_PROC_KCORE) += kcore.o
proc-$(CONFIG_PROC_VMCORE) += vmcore.o
proc-$(CONFIG_PRINTK) += kmsg.o
proc-$(CONFIG_PROC_PAGE_MONITOR) += page.o
+proc-$(CONFIG_BOOT_CONFIG) += bootconfig.o
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
new file mode 100644
index 000000000000..9955d75c0585
--- /dev/null
+++ b/fs/proc/bootconfig.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * /proc/bootconfig - Extra boot configuration
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/bootconfig.h>
+#include <linux/slab.h>
+
+static char *saved_boot_config;
+
+static int boot_config_proc_show(struct seq_file *m, void *v)
+{
+ if (saved_boot_config)
+ seq_puts(m, saved_boot_config);
+ return 0;
+}
+
+/* Rest size of buffer */
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+/* Return the needed total length if @size is 0 */
+static int __init copy_xbc_key_value_list(char *dst, size_t size)
+{
+ struct xbc_node *leaf, *vnode;
+ const char *val;
+ char *key, *end = dst + size;
+ int ret = 0;
+
+ key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+
+ xbc_for_each_key_value(leaf, val) {
+ ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
+ if (ret < 0)
+ break;
+ ret = snprintf(dst, rest(dst, end), "%s = ", key);
+ if (ret < 0)
+ break;
+ dst += ret;
+ vnode = xbc_node_get_child(leaf);
+ if (vnode && xbc_node_is_array(vnode)) {
+ xbc_array_for_each_value(vnode, val) {
+ ret = snprintf(dst, rest(dst, end), "\"%s\"%s",
+ val, vnode->next ? ", " : "\n");
+ if (ret < 0)
+ goto out;
+ dst += ret;
+ }
+ } else {
+ ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val);
+ if (ret < 0)
+ break;
+ dst += ret;
+ }
+ }
+out:
+ kfree(key);
+
+ return ret < 0 ? ret : dst - (end - size);
+}
+
+static int __init proc_boot_config_init(void)
+{
+ int len;
+
+ len = copy_xbc_key_value_list(NULL, 0);
+ if (len < 0)
+ return len;
+
+ if (len > 0) {
+ saved_boot_config = kzalloc(len + 1, GFP_KERNEL);
+ if (!saved_boot_config)
+ return -ENOMEM;
+
+ len = copy_xbc_key_value_list(saved_boot_config, len + 1);
+ if (len < 0) {
+ kfree(saved_boot_config);
+ return len;
+ }
+ }
+
+ proc_create_single("bootconfig", 0, NULL, boot_config_proc_show);
+
+ return 0;
+}
+fs_initcall(proc_boot_config_init);
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
new file mode 100644
index 000000000000..7e18c939663e
--- /dev/null
+++ b/include/linux/bootconfig.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_XBC_H
+#define _LINUX_XBC_H
+/*
+ * Extra Boot Config
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* XBC tree node */
+struct xbc_node {
+ u16 next;
+ u16 child;
+ u16 parent;
+ u16 data;
+} __attribute__ ((__packed__));
+
+#define XBC_KEY 0
+#define XBC_VALUE (1 << 15)
+/* Maximum size of boot config is 32KB - 1 */
+#define XBC_DATA_MAX (XBC_VALUE - 1)
+
+#define XBC_NODE_MAX 1024
+#define XBC_KEYLEN_MAX 256
+#define XBC_DEPTH_MAX 16
+
+/* Node tree access raw APIs */
+struct xbc_node * __init xbc_root_node(void);
+int __init xbc_node_index(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
+const char * __init xbc_node_get_data(struct xbc_node *node);
+
+/**
+ * xbc_node_is_value() - Test the node is a value node
+ * @node: An XBC node.
+ *
+ * Test the @node is a value node and return true if a value node, false if not.
+ */
+static inline __init bool xbc_node_is_value(struct xbc_node *node)
+{
+ return node->data & XBC_VALUE;
+}
+
+/**
+ * xbc_node_is_key() - Test the node is a key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a key node and return true if a key node, false if not.
+ */
+static inline __init bool xbc_node_is_key(struct xbc_node *node)
+{
+ return !xbc_node_is_value(node);
+}
+
+/**
+ * xbc_node_is_array() - Test the node is an arraied value node
+ * @node: An XBC node.
+ *
+ * Test the @node is an arraied value node.
+ */
+static inline __init bool xbc_node_is_array(struct xbc_node *node)
+{
+ return xbc_node_is_value(node) && node->next != 0;
+}
+
+/**
+ * xbc_node_is_leaf() - Test the node is a leaf key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a leaf key node which is a key node and has a value node
+ * or no child. Returns true if it is a leaf node, or false if not.
+ */
+static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
+{
+ return xbc_node_is_key(node) &&
+ (!node->child || xbc_node_is_value(xbc_node_get_child(node)));
+}
+
+/* Tree-based key-value access APIs */
+struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+ const char *key);
+
+const char * __init xbc_node_find_value(struct xbc_node *parent,
+ const char *key,
+ struct xbc_node **vnode);
+
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *leaf);
+
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf);
+
+/**
+ * xbc_find_value() - Find a value which matches the key
+ * @key: Search key
+ * @vnode: A container pointer of XBC value node.
+ *
+ * Search a value whose key matches @key from whole of XBC tree and return
+ * the value if found. Found value node is stored in *@vnode.
+ * Note that this can return 0-length string and store NULL in *@vnode for
+ * key-only (non-value) entry.
+ */
+static inline const char * __init
+xbc_find_value(const char *key, struct xbc_node **vnode)
+{
+ return xbc_node_find_value(NULL, key, vnode);
+}
+
+/**
+ * xbc_find_node() - Find a node which matches the key
+ * @key: Search key
+ *
+ * Search a (key) node whose key matches @key from whole of XBC tree and
+ * return the node if found. If not found, returns NULL.
+ */
+static inline struct xbc_node * __init xbc_find_node(const char *key)
+{
+ return xbc_node_find_child(NULL, key);
+}
+
+/**
+ * xbc_array_for_each_value() - Iterate value nodes on an array
+ * @anode: An XBC arraied value node
+ * @value: A value
+ *
+ * Iterate array value nodes and values starts from @anode. This is expected to
+ * be used with xbc_find_value() and xbc_node_find_value(), so that user can
+ * process each array entry node.
+ */
+#define xbc_array_for_each_value(anode, value) \
+ for (value = xbc_node_get_data(anode); anode != NULL ; \
+ anode = xbc_node_get_next(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_child() - Iterate child nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate child nodes of @parent. Each child nodes are stored to @child.
+ */
+#define xbc_node_for_each_child(parent, child) \
+ for (child = xbc_node_get_child(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_array_value() - Iterate array entries of geven key
+ * @node: An XBC node.
+ * @key: A key string searched under @node
+ * @anode: Iterated XBC node of array entry.
+ * @value: Iterated value of array entry.
+ *
+ * Iterate array entries of given @key under @node. Each array entry node
+ * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * it does nothing.
+ * Note that even if the found key node has only one value (not array)
+ * this executes block once. Hoever, if the found key node has no value
+ * (key-only node), this does nothing. So don't use this for testing the
+ * key-value pair existence.
+ */
+#define xbc_node_for_each_array_value(node, key, anode, value) \
+ for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
+ anode = xbc_node_get_next(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_key_value() - Iterate key-value pairs under a node
+ * @node: An XBC node.
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs under @node. Each key node and value string are
+ * stored in @knode and @value respectively.
+ */
+#define xbc_node_for_each_key_value(node, knode, value) \
+ for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
+ knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
+
+/**
+ * xbc_for_each_key_value() - Iterate key-value pairs
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs in whole XBC tree. Each key node and value string
+ * are stored in @knode and @value respectively.
+ */
+#define xbc_for_each_key_value(knode, value) \
+ xbc_node_for_each_key_value(NULL, knode, value)
+
+/* Compose partial key */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node, char *buf, size_t size);
+
+/**
+ * xbc_node_compose_key() - Compose full key string of the XBC node
+ * @node: An XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the full-length key of the @node into @buf. Returns the total
+ * length of the key stored in @buf. Or returns -EINVAL if @node is NULL,
+ * and -ERANGE if the key depth is deeper than max depth.
+ */
+static inline int __init xbc_node_compose_key(struct xbc_node *node,
+ char *buf, size_t size)
+{
+ return xbc_node_compose_key_after(NULL, node, buf, size);
+}
+
+/* XBC node initializer */
+int __init xbc_init(char *buf);
+
+/* XBC cleanup data structures */
+void __init xbc_destroy_all(void);
+
+/* Debug dump functions */
+void __init xbc_debug_dump(void);
+
+#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6d4c22aee384..cf65763af0cb 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -582,7 +582,7 @@ struct swevent_hlist {
#define PERF_ATTACH_ITRACE 0x10
struct perf_cgroup;
-struct ring_buffer;
+struct perf_buffer;
struct pmu_event_list {
raw_spinlock_t lock;
@@ -694,7 +694,7 @@ struct perf_event {
struct mutex mmap_mutex;
atomic_t mmap_count;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
struct list_head rb_entry;
unsigned long rcu_batches;
int rcu_pending;
@@ -854,7 +854,7 @@ struct perf_cpu_context {
struct perf_output_handle {
struct perf_event *event;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
u64 aux_flags;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 1a40277b512c..df0124eabece 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -6,7 +6,7 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
-struct ring_buffer;
+struct trace_buffer;
struct ring_buffer_iter;
/*
@@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
* else
* ring_buffer_unlock_commit(buffer, event);
*/
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);
/*
* size is in bytes for each per CPU buffer.
*/
-struct ring_buffer *
+struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
/*
@@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
#define RING_BUFFER_ALL_CPUS -1
-void ring_buffer_free(struct ring_buffer *buffer);
+void ring_buffer_free(struct trace_buffer *buffer);
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
-struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length);
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data);
-void ring_buffer_nest_start(struct ring_buffer *buffer);
-void ring_buffer_nest_end(struct ring_buffer *buffer);
+void ring_buffer_nest_start(struct trace_buffer *buffer);
+void ring_buffer_nest_end(struct trace_buffer *buffer);
struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
@@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_reset(struct ring_buffer *buffer);
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu);
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu);
#else
static inline int
-ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu)
+ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu)
{
return -ENODEV;
}
#endif
-bool ring_buffer_empty(struct ring_buffer *buffer);
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
-
-void ring_buffer_record_disable(struct ring_buffer *buffer);
-void ring_buffer_record_enable(struct ring_buffer *buffer);
-void ring_buffer_record_off(struct ring_buffer *buffer);
-void ring_buffer_record_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_entries(struct ring_buffer *buffer);
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+bool ring_buffer_empty(struct trace_buffer *buffer);
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct trace_buffer *buffer);
+void ring_buffer_record_enable(struct trace_buffer *buffer);
+void ring_buffer_record_off(struct trace_buffer *buffer);
+void ring_buffer_record_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_entries(struct trace_buffer *buffer);
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts);
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void));
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
+int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
struct trace_seq;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 13ea7f7d54ac..af2c85d3a1dd 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -11,7 +11,7 @@
#include <linux/tracepoint.h>
struct trace_array;
-struct trace_buffer;
+struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
@@ -79,7 +79,7 @@ struct trace_entry {
struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
- struct trace_buffer *trace_buffer;
+ struct array_buffer *array_buffer;
void *private;
int cpu_file;
struct mutex mutex;
@@ -153,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry,
struct trace_event_file;
struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
@@ -226,12 +226,13 @@ extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);
struct trace_event_buffer {
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
unsigned long flags;
int pc;
+ struct pt_regs *regs;
};
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
@@ -364,6 +365,128 @@ enum {
EVENT_FILE_FL_WAS_ENABLED_BIT,
};
+extern struct trace_event_file *trace_get_event_file(const char *instance,
+ const char *system,
+ const char *event);
+extern void trace_put_event_file(struct trace_event_file *file);
+
+#define MAX_DYNEVENT_CMD_LEN (2048)
+
+enum dynevent_type {
+ DYNEVENT_TYPE_SYNTH = 1,
+ DYNEVENT_TYPE_KPROBE,
+ DYNEVENT_TYPE_NONE,
+};
+
+struct dynevent_cmd;
+
+typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
+
+struct dynevent_cmd {
+ struct seq_buf seq;
+ const char *event_name;
+ unsigned int n_fields;
+ enum dynevent_type type;
+ dynevent_create_fn_t run_command;
+ void *private_data;
+};
+
+extern int dynevent_create(struct dynevent_cmd *cmd);
+
+extern int synth_event_delete(const char *name);
+
+extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
+ char *buf, int maxlen);
+
+extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
+ const char *name,
+ struct module *mod, ...);
+
+#define synth_event_gen_cmd_start(cmd, name, mod, ...) \
+ __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
+
+struct synth_field_desc {
+ const char *type;
+ const char *name;
+};
+
+extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
+ const char *name,
+ struct module *mod,
+ struct synth_field_desc *fields,
+ unsigned int n_fields);
+extern int synth_event_create(const char *name,
+ struct synth_field_desc *fields,
+ unsigned int n_fields, struct module *mod);
+
+extern int synth_event_add_field(struct dynevent_cmd *cmd,
+ const char *type,
+ const char *name);
+extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
+ const char *type_name);
+extern int synth_event_add_fields(struct dynevent_cmd *cmd,
+ struct synth_field_desc *fields,
+ unsigned int n_fields);
+
+#define synth_event_gen_cmd_end(cmd) \
+ dynevent_create(cmd)
+
+struct synth_event;
+
+struct synth_event_trace_state {
+ struct trace_event_buffer fbuffer;
+ struct synth_trace_event *entry;
+ struct trace_buffer *buffer;
+ struct synth_event *event;
+ unsigned int cur_field;
+ unsigned int n_u64;
+ bool enabled;
+ bool add_next;
+ bool add_name;
+};
+
+extern int synth_event_trace(struct trace_event_file *file,
+ unsigned int n_vals, ...);
+extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+ unsigned int n_vals);
+extern int synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state);
+extern int synth_event_add_next_val(u64 val,
+ struct synth_event_trace_state *trace_state);
+extern int synth_event_add_val(const char *field_name, u64 val,
+ struct synth_event_trace_state *trace_state);
+extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
+
+extern int kprobe_event_delete(const char *name);
+
+extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
+ char *buf, int maxlen);
+
+#define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
+ __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
+
+#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
+ __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
+
+extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
+ bool kretprobe,
+ const char *name,
+ const char *loc, ...);
+
+#define kprobe_event_add_fields(cmd, ...) \
+ __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
+
+#define kprobe_event_add_field(cmd, field) \
+ __kprobe_event_add_fields(cmd, field, NULL)
+
+extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+
+#define kprobe_event_gen_cmd_end(cmd) \
+ dynevent_create(cmd)
+
+#define kretprobe_event_gen_cmd_end(cmd) \
+ dynevent_create(cmd)
+
/*
* Event file flags:
* ENABLED - The event is enabled
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 96d77e5e0664..502c7be50b8d 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -2,7 +2,8 @@
/*
* Stage 1 of the trace events.
*
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
*
* struct trace_event_raw_<call> {
* struct trace_entry ent;
@@ -223,7 +224,8 @@ TRACE_MAKE_SYSTEM_STR();
/*
* Stage 3 of the trace events.
*
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
*
* enum print_line_t
* trace_raw_output_<call>(struct trace_iterator *iter, int flags)
@@ -533,7 +535,8 @@ static inline notrace int trace_event_get_offsets_##call( \
/*
* Stage 4 of the trace events.
*
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
*
* For those macros defined with TRACE_EVENT:
*
@@ -548,7 +551,7 @@ static inline notrace int trace_event_get_offsets_##call( \
* enum event_trigger_type __tt = ETT_NONE;
* struct ring_buffer_event *event;
* struct trace_event_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
+ * struct trace_buffer *buffer;
* unsigned long irq_flags;
* int __data_size;
* int pc;
diff --git a/init/Kconfig b/init/Kconfig
index 24b23d843df1..cfee56c151f1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1224,6 +1224,20 @@ source "usr/Kconfig"
endif
+config BOOT_CONFIG
+ bool "Boot config support"
+ depends on BLK_DEV_INITRD
+ select LIBXBC
+ default y
+ help
+ Extra boot config allows system admin to pass a config file as
+ complemental extension of kernel cmdline when booting.
+ The boot config file must be attached at the end of initramfs
+ with checksum and size.
+ See <file:Documentation/admin-guide/bootconfig.rst> for details.
+
+ If unsure, say Y.
+
choice
prompt "Compiler optimization level"
default CC_OPTIMIZE_FOR_PERFORMANCE
diff --git a/init/main.c b/init/main.c
index d8c7e86c2d28..cc0ee4873419 100644
--- a/init/main.c
+++ b/init/main.c
@@ -28,6 +28,7 @@
#include <linux/initrd.h>
#include <linux/memblock.h>
#include <linux/acpi.h>
+#include <linux/bootconfig.h>
#include <linux/console.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
@@ -136,8 +137,10 @@ char __initdata boot_command_line[COMMAND_LINE_SIZE];
char *saved_command_line;
/* Command line for parameter parsing */
static char *static_command_line;
-/* Command line for per-initcall parameter parsing */
-static char *initcall_command_line;
+/* Untouched extra command line */
+static char *extra_command_line;
+/* Extra init arguments */
+static char *extra_init_args;
static char *execute_command;
static char *ramdisk_execute_command;
@@ -245,6 +248,156 @@ static int __init loglevel(char *str)
early_param("loglevel", loglevel);
+#ifdef CONFIG_BOOT_CONFIG
+
+char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
+
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+static int __init xbc_snprint_cmdline(char *buf, size_t size,
+ struct xbc_node *root)
+{
+ struct xbc_node *knode, *vnode;
+ char *end = buf + size;
+ char c = '\"';
+ const char *val;
+ int ret;
+
+ xbc_node_for_each_key_value(root, knode, val) {
+ ret = xbc_node_compose_key_after(root, knode,
+ xbc_namebuf, XBC_KEYLEN_MAX);
+ if (ret < 0)
+ return ret;
+
+ vnode = xbc_node_get_child(knode);
+ ret = snprintf(buf, rest(buf, end), "%s%c", xbc_namebuf,
+ vnode ? '=' : ' ');
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ if (!vnode)
+ continue;
+
+ c = '\"';
+ xbc_array_for_each_value(vnode, val) {
+ ret = snprintf(buf, rest(buf, end), "%c%s", c, val);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ c = ',';
+ }
+ if (rest(buf, end) > 2)
+ strcpy(buf, "\" ");
+ buf += 2;
+ }
+
+ return buf - (end - size);
+}
+#undef rest
+
+/* Make an extra command line under given key word */
+static char * __init xbc_make_cmdline(const char *key)
+{
+ struct xbc_node *root;
+ char *new_cmdline;
+ int ret, len = 0;
+
+ root = xbc_find_node(key);
+ if (!root)
+ return NULL;
+
+ /* Count required buffer size */
+ len = xbc_snprint_cmdline(NULL, 0, root);
+ if (len <= 0)
+ return NULL;
+
+ new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES);
+ if (!new_cmdline) {
+ pr_err("Failed to allocate memory for extra kernel cmdline.\n");
+ return NULL;
+ }
+
+ ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
+ if (ret < 0 || ret > len) {
+ pr_err("Failed to print extra kernel cmdline.\n");
+ return NULL;
+ }
+
+ return new_cmdline;
+}
+
+u32 boot_config_checksum(unsigned char *p, u32 size)
+{
+ u32 ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
+
+static void __init setup_boot_config(const char *cmdline)
+{
+ u32 size, csum;
+ char *data, *copy;
+ const char *p;
+ u32 *hdr;
+ int ret;
+
+ p = strstr(cmdline, "bootconfig");
+ if (!p || (p != cmdline && !isspace(*(p-1))) ||
+ (p[10] && !isspace(p[10])))
+ return;
+
+ if (!initrd_end)
+ goto not_found;
+
+ hdr = (u32 *)(initrd_end - 8);
+ size = hdr[0];
+ csum = hdr[1];
+
+ if (size >= XBC_DATA_MAX) {
+ pr_err("bootconfig size %d greater than max size %d\n",
+ size, XBC_DATA_MAX);
+ return;
+ }
+
+ data = ((void *)hdr) - size;
+ if ((unsigned long)data < initrd_start)
+ goto not_found;
+
+ if (boot_config_checksum((unsigned char *)data, size) != csum) {
+ pr_err("bootconfig checksum failed\n");
+ return;
+ }
+
+ copy = memblock_alloc(size + 1, SMP_CACHE_BYTES);
+ if (!copy) {
+ pr_err("Failed to allocate memory for bootconfig\n");
+ return;
+ }
+
+ memcpy(copy, data, size);
+ copy[size] = '\0';
+
+ ret = xbc_init(copy);
+ if (ret < 0)
+ pr_err("Failed to parse bootconfig\n");
+ else {
+ pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
+ /* keys starting with "kernel." are passed via cmdline */
+ extra_command_line = xbc_make_cmdline("kernel");
+ /* Also, "init." keys are init arguments */
+ extra_init_args = xbc_make_cmdline("init");
+ }
+ return;
+not_found:
+ pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+}
+#else
+#define setup_boot_config(cmdline) do { } while (0)
+#endif
+
/* Change NUL term back to "=", to make "param" the whole string. */
static void __init repair_env_string(char *param, char *val)
{
@@ -373,22 +526,50 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
*/
static void __init setup_command_line(char *command_line)
{
- size_t len = strlen(boot_command_line) + 1;
+ size_t len, xlen = 0, ilen = 0;
- saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
- if (!saved_command_line)
- panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+ if (extra_command_line)
+ xlen = strlen(extra_command_line);
+ if (extra_init_args)
+ ilen = strlen(extra_init_args) + 4; /* for " -- " */
- initcall_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
- if (!initcall_command_line)
- panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+ len = xlen + strlen(boot_command_line) + 1;
+
+ saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES);
+ if (!saved_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
if (!static_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len);
- strcpy(saved_command_line, boot_command_line);
- strcpy(static_command_line, command_line);
+ if (xlen) {
+ /*
+ * We have to put extra_command_line before boot command
+ * lines because there could be dashes (separator of init
+ * command line) in the command lines.
+ */
+ strcpy(saved_command_line, extra_command_line);
+ strcpy(static_command_line, extra_command_line);
+ }
+ strcpy(saved_command_line + xlen, boot_command_line);
+ strcpy(static_command_line + xlen, command_line);
+
+ if (ilen) {
+ /*
+ * Append supplemental init boot args to saved_command_line
+ * so that user can check what command line options passed
+ * to init.
+ */
+ len = strlen(saved_command_line);
+ if (!strstr(boot_command_line, " -- ")) {
+ strcpy(saved_command_line + len, " -- ");
+ len += 4;
+ } else
+ saved_command_line[len++] = ' ';
+
+ strcpy(saved_command_line + len, extra_init_args);
+ }
}
/*
@@ -595,6 +776,7 @@ asmlinkage __visible void __init start_kernel(void)
pr_notice("%s", linux_banner);
early_security_init();
setup_arch(&command_line);
+ setup_boot_config(command_line);
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -604,7 +786,7 @@ asmlinkage __visible void __init start_kernel(void)
build_all_zonelists(NULL);
page_alloc_init();
- pr_notice("Kernel command line: %s\n", boot_command_line);
+ pr_notice("Kernel command line: %s\n", saved_command_line);
/* parameters may set static keys */
jump_label_init();
parse_early_param();
@@ -615,6 +797,9 @@ asmlinkage __visible void __init start_kernel(void)
if (!IS_ERR_OR_NULL(after_dashes))
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
NULL, set_init_arg);
+ if (extra_init_args)
+ parse_args("Setting extra init args", extra_init_args,
+ NULL, 0, -1, -1, NULL, set_init_arg);
/*
* These use large bootmem allocations and must precede
@@ -996,13 +1181,12 @@ static int __init ignore_unknown_bootoption(char *param, char *val,
return 0;
}
-static void __init do_initcall_level(int level)
+static void __init do_initcall_level(int level, char *command_line)
{
initcall_entry_t *fn;
- strcpy(initcall_command_line, saved_command_line);
parse_args(initcall_level_names[level],
- initcall_command_line, __start___param,
+ command_line, __start___param,
__stop___param - __start___param,
level, level,
NULL, ignore_unknown_bootoption);
@@ -1015,9 +1199,20 @@ static void __init do_initcall_level(int level)
static void __init do_initcalls(void)
{
int level;
+ size_t len = strlen(saved_command_line) + 1;
+ char *command_line;
+
+ command_line = kzalloc(len, GFP_KERNEL);
+ if (!command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
+ /* Parser modifies command_line, restore it each time */
+ strcpy(command_line, saved_command_line);
+ do_initcall_level(level, command_line);
+ }
- for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
- do_initcall_level(level);
+ kfree(command_line);
}
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dc9c643bce94..17f9a4a909eb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4373,7 +4373,7 @@ static void free_event_rcu(struct rcu_head *head)
}
static void ring_buffer_attach(struct perf_event *event,
- struct ring_buffer *rb);
+ struct perf_buffer *rb);
static void detach_sb_event(struct perf_event *event)
{
@@ -5054,7 +5054,7 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
__poll_t events = EPOLLHUP;
poll_wait(file, &event->waitq, wait);
@@ -5296,7 +5296,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
return perf_event_set_bpf_prog(event, arg);
case PERF_EVENT_IOC_PAUSE_OUTPUT: {
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5432,7 +5432,7 @@ static void calc_timer_values(struct perf_event *event,
static void perf_event_init_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5464,7 +5464,7 @@ void __weak arch_perf_update_userpage(
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
@@ -5515,7 +5515,7 @@ EXPORT_SYMBOL_GPL(perf_event_update_userpage);
static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
{
struct perf_event *event = vmf->vma->vm_file->private_data;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
vm_fault_t ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -5548,9 +5548,9 @@ unlock:
}
static void ring_buffer_attach(struct perf_event *event,
- struct ring_buffer *rb)
+ struct perf_buffer *rb)
{
- struct ring_buffer *old_rb = NULL;
+ struct perf_buffer *old_rb = NULL;
unsigned long flags;
if (event->rb) {
@@ -5608,7 +5608,7 @@ static void ring_buffer_attach(struct perf_event *event,
static void ring_buffer_wakeup(struct perf_event *event)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5619,9 +5619,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_unlock();
}
-struct ring_buffer *ring_buffer_get(struct perf_event *event)
+struct perf_buffer *ring_buffer_get(struct perf_event *event)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
@@ -5634,7 +5634,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
return rb;
}
-void ring_buffer_put(struct ring_buffer *rb)
+void ring_buffer_put(struct perf_buffer *rb)
{
if (!refcount_dec_and_test(&rb->refcount))
return;
@@ -5672,7 +5672,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
- struct ring_buffer *rb = ring_buffer_get(event);
+ struct perf_buffer *rb = ring_buffer_get(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
@@ -5790,8 +5790,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
+ struct perf_buffer *rb = NULL;
unsigned long locked, lock_limit;
- struct ring_buffer *rb = NULL;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra = 0, extra = 0;
@@ -6266,7 +6266,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
size_t size)
{
struct perf_event *sampler = event->aux_event;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
data->aux_size = 0;
@@ -6299,7 +6299,7 @@ out:
return data->aux_size;
}
-long perf_pmu_snapshot_aux(struct ring_buffer *rb,
+long perf_pmu_snapshot_aux(struct perf_buffer *rb,
struct perf_event *event,
struct perf_output_handle *handle,
unsigned long size)
@@ -6338,8 +6338,8 @@ static void perf_aux_sample_output(struct perf_event *event,
struct perf_sample_data *data)
{
struct perf_event *sampler = event->aux_event;
+ struct perf_buffer *rb;
unsigned long pad;
- struct ring_buffer *rb;
long size;
if (WARN_ON_ONCE(!sampler || !data->aux_size))
@@ -6707,7 +6707,7 @@ void perf_output_sample(struct perf_output_handle *handle,
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
- struct ring_buffer *rb = handle->rb;
+ struct perf_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
@@ -7150,7 +7150,7 @@ void perf_event_exec(void)
}
struct remote_output {
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
int err;
};
@@ -7158,7 +7158,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
{
struct perf_event *parent = event->parent;
struct remote_output *ro = data;
- struct ring_buffer *rb = ro->rb;
+ struct perf_buffer *rb = ro->rb;
struct stop_event_data sd = {
.event = event,
};
@@ -10998,7 +10998,7 @@ err_size:
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
- struct ring_buffer *rb = NULL;
+ struct perf_buffer *rb = NULL;
int ret = -EINVAL;
if (!output_event)
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 747d67f130cb..f16f66b6b655 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -10,7 +10,7 @@
#define RING_BUFFER_WRITABLE 0x01
-struct ring_buffer {
+struct perf_buffer {
refcount_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
@@ -58,17 +58,17 @@ struct ring_buffer {
void *data_pages[0];
};
-extern void rb_free(struct ring_buffer *rb);
+extern void rb_free(struct perf_buffer *rb);
static inline void rb_free_rcu(struct rcu_head *rcu_head)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
- rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+ rb = container_of(rcu_head, struct perf_buffer, rcu_head);
rb_free(rb);
}
-static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
+static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
{
if (!pause && rb->nr_pages)
rb->paused = 0;
@@ -76,16 +76,16 @@ static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
rb->paused = 1;
}
-extern struct ring_buffer *
+extern struct perf_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);
-extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags);
-extern void rb_free_aux(struct ring_buffer *rb);
-extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
-extern void ring_buffer_put(struct ring_buffer *rb);
+extern void rb_free_aux(struct perf_buffer *rb);
+extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
+extern void ring_buffer_put(struct perf_buffer *rb);
-static inline bool rb_has_aux(struct ring_buffer *rb)
+static inline bool rb_has_aux(struct perf_buffer *rb)
{
return !!rb->aux_nr_pages;
}
@@ -94,7 +94,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
unsigned long size, u64 flags);
extern struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
+perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
#ifdef CONFIG_PERF_USE_VMALLOC
/*
@@ -103,25 +103,25 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
* Required for architectures that have d-cache aliasing issues.
*/
-static inline int page_order(struct ring_buffer *rb)
+static inline int page_order(struct perf_buffer *rb)
{
return rb->page_order;
}
#else
-static inline int page_order(struct ring_buffer *rb)
+static inline int page_order(struct perf_buffer *rb)
{
return 0;
}
#endif
-static inline unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct perf_buffer *rb)
{
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
-static inline unsigned long perf_aux_size(struct ring_buffer *rb)
+static inline unsigned long perf_aux_size(struct perf_buffer *rb)
{
return rb->aux_nr_pages << PAGE_SHIFT;
}
@@ -141,7 +141,7 @@ static inline unsigned long perf_aux_size(struct ring_buffer *rb)
buf += written; \
handle->size -= written; \
if (!handle->size) { \
- struct ring_buffer *rb = handle->rb; \
+ struct perf_buffer *rb = handle->rb; \
\
handle->page++; \
handle->page &= rb->nr_pages - 1; \
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 7ffd5c763f93..192b8abc6330 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -35,7 +35,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
*/
static void perf_output_get_handle(struct perf_output_handle *handle)
{
- struct ring_buffer *rb = handle->rb;
+ struct perf_buffer *rb = handle->rb;
preempt_disable();
@@ -49,7 +49,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
static void perf_output_put_handle(struct perf_output_handle *handle)
{
- struct ring_buffer *rb = handle->rb;
+ struct perf_buffer *rb = handle->rb;
unsigned long head;
unsigned int nest;
@@ -150,7 +150,7 @@ __perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
bool backward)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned long tail, offset, head;
int have_lost, page_shift;
struct {
@@ -301,7 +301,7 @@ void perf_output_end(struct perf_output_handle *handle)
}
static void
-ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
+ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
{
long max_size = perf_data_size(rb);
@@ -361,7 +361,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
{
struct perf_event *output_event = event;
unsigned long aux_head, aux_tail;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned int nest;
if (output_event->parent)
@@ -449,7 +449,7 @@ err:
}
EXPORT_SYMBOL_GPL(perf_aux_output_begin);
-static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
{
if (rb->aux_overwrite)
return false;
@@ -475,7 +475,7 @@ static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
{
bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
- struct ring_buffer *rb = handle->rb;
+ struct perf_buffer *rb = handle->rb;
unsigned long aux_head;
/* in overwrite mode, driver provides aux_head via handle */
@@ -532,7 +532,7 @@ EXPORT_SYMBOL_GPL(perf_aux_output_end);
*/
int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
{
- struct ring_buffer *rb = handle->rb;
+ struct perf_buffer *rb = handle->rb;
if (size > handle->size)
return -ENOSPC;
@@ -569,8 +569,8 @@ long perf_output_copy_aux(struct perf_output_handle *aux_handle,
struct perf_output_handle *handle,
unsigned long from, unsigned long to)
{
+ struct perf_buffer *rb = aux_handle->rb;
unsigned long tocopy, remainder, len = 0;
- struct ring_buffer *rb = aux_handle->rb;
void *addr;
from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
@@ -626,7 +626,7 @@ static struct page *rb_alloc_aux_page(int node, int order)
return page;
}
-static void rb_free_aux_page(struct ring_buffer *rb, int idx)
+static void rb_free_aux_page(struct perf_buffer *rb, int idx)
{
struct page *page = virt_to_page(rb->aux_pages[idx]);
@@ -635,7 +635,7 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
__free_page(page);
}
-static void __rb_free_aux(struct ring_buffer *rb)
+static void __rb_free_aux(struct perf_buffer *rb)
{
int pg;
@@ -662,7 +662,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
}
}
-int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
+int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags)
{
bool overwrite = !(flags & RING_BUFFER_WRITABLE);
@@ -753,7 +753,7 @@ out:
return ret;
}
-void rb_free_aux(struct ring_buffer *rb)
+void rb_free_aux(struct perf_buffer *rb)
{
if (refcount_dec_and_test(&rb->aux_refcount))
__rb_free_aux(rb);
@@ -766,7 +766,7 @@ void rb_free_aux(struct ring_buffer *rb)
*/
static struct page *
-__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
if (pgoff > rb->nr_pages)
return NULL;
@@ -798,13 +798,13 @@ static void perf_mmap_free_page(void *addr)
__free_page(page);
}
-struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned long size;
int i;
- size = sizeof(struct ring_buffer);
+ size = sizeof(struct perf_buffer);
size += nr_pages * sizeof(void *);
if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
@@ -843,7 +843,7 @@ fail:
return NULL;
}
-void rb_free(struct ring_buffer *rb)
+void rb_free(struct perf_buffer *rb)
{
int i;
@@ -854,13 +854,13 @@ void rb_free(struct ring_buffer *rb)
}
#else
-static int data_page_nr(struct ring_buffer *rb)
+static int data_page_nr(struct perf_buffer *rb)
{
return rb->nr_pages << page_order(rb);
}
static struct page *
-__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
/* The '>' counts in the user page. */
if (pgoff > data_page_nr(rb))
@@ -878,11 +878,11 @@ static void perf_mmap_unmark_page(void *addr)
static void rb_free_work(struct work_struct *work)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
void *base;
int i, nr;
- rb = container_of(work, struct ring_buffer, work);
+ rb = container_of(work, struct perf_buffer, work);
nr = data_page_nr(rb);
base = rb->user_page;
@@ -894,18 +894,18 @@ static void rb_free_work(struct work_struct *work)
kfree(rb);
}
-void rb_free(struct ring_buffer *rb)
+void rb_free(struct perf_buffer *rb)
{
schedule_work(&rb->work);
}
-struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned long size;
void *all_buf;
- size = sizeof(struct ring_buffer);
+ size = sizeof(struct perf_buffer);
size += sizeof(void *);
rb = kzalloc(size, GFP_KERNEL);
@@ -939,7 +939,7 @@ fail:
#endif
struct page *
-perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
if (rb->aux_nr_pages) {
/* above AUX space */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 25a0fcfa7a5d..91e885194dbc 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,6 +141,15 @@ menuconfig FTRACE
if FTRACE
+config BOOTTIME_TRACING
+ bool "Boot-time Tracing support"
+ depends on BOOT_CONFIG && TRACING
+ default y
+ help
+ Enable developer to setup ftrace subsystem via supplemental
+ kernel cmdline at boot time for debugging (tracing) driver
+ initialization and boot process.
+
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
@@ -172,6 +181,77 @@ config FUNCTION_GRAPH_TRACER
the return value. This is done by setting the current return
address on the current task structure into a stack of calls.
+config DYNAMIC_FTRACE
+ bool "enable/disable function tracing dynamically"
+ depends on FUNCTION_TRACER
+ depends on HAVE_DYNAMIC_FTRACE
+ default y
+ help
+ This option will modify all the calls to function tracing
+ dynamically (will patch them out of the binary image and
+ replace them with a No-Op instruction) on boot up. During
+ compile time, a table is made of all the locations that ftrace
+ can function trace, and this table is linked into the kernel
+ image. When this is enabled, functions can be individually
+ enabled, and the functions not enabled will not affect
+ performance of the system.
+
+ See the files in /sys/kernel/debug/tracing:
+ available_filter_functions
+ set_ftrace_filter
+ set_ftrace_notrace
+
+ This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
+ otherwise has native performance as long as no tracing is active.
+
+config DYNAMIC_FTRACE_WITH_REGS
+ def_bool y
+ depends on DYNAMIC_FTRACE
+ depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ def_bool y
+ depends on DYNAMIC_FTRACE
+ depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
+config FUNCTION_PROFILER
+ bool "Kernel function profiler"
+ depends on FUNCTION_TRACER
+ default n
+ help
+ This option enables the kernel function profiler. A file is created
+ in debugfs called function_profile_enabled which defaults to zero.
+ When a 1 is echoed into this file profiling begins, and when a
+ zero is entered, profiling stops. A "functions" file is created in
+ the trace_stat directory; this file shows the list of functions that
+ have been hit and their counters.
+
+ If in doubt, say N.
+
+config STACK_TRACER
+ bool "Trace max stack"
+ depends on HAVE_FUNCTION_TRACER
+ select FUNCTION_TRACER
+ select STACKTRACE
+ select KALLSYMS
+ help
+ This special tracer records the maximum stack footprint of the
+ kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
+
+ This tracer works by hooking into every function call that the
+ kernel executes, and keeping a maximum stack depth value and
+ stack-trace saved. If this is configured with DYNAMIC_FTRACE
+ then it will not have any overhead while the stack tracer
+ is disabled.
+
+ To enable the stack tracer on bootup, pass in 'stacktrace'
+ on the kernel command line.
+
+ The stack tracer can also be enabled or disabled via the
+ sysctl kernel.stack_tracer_enabled
+
+ Say N if unsure.
+
config TRACE_PREEMPT_TOGGLE
bool
help
@@ -282,6 +362,19 @@ config HWLAT_TRACER
file. Every time a latency is greater than tracing_thresh, it will
be recorded into the ring buffer.
+config MMIOTRACE
+ bool "Memory mapped IO tracing"
+ depends on HAVE_MMIOTRACE_SUPPORT && PCI
+ select GENERIC_TRACER
+ help
+ Mmiotrace traces Memory Mapped I/O access and is meant for
+ debugging and reverse engineering. It is called from the ioremap
+ implementation and works via page faults. Tracing is disabled by
+ default and can be enabled at run-time.
+
+ See Documentation/trace/mmiotrace.rst.
+ If you are not helping to develop drivers, say N.
+
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
@@ -410,30 +503,6 @@ config BRANCH_TRACER
Say N if unsure.
-config STACK_TRACER
- bool "Trace max stack"
- depends on HAVE_FUNCTION_TRACER
- select FUNCTION_TRACER
- select STACKTRACE
- select KALLSYMS
- help
- This special tracer records the maximum stack footprint of the
- kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
-
- This tracer works by hooking into every function call that the
- kernel executes, and keeping a maximum stack depth value and
- stack-trace saved. If this is configured with DYNAMIC_FTRACE
- then it will not have any overhead while the stack tracer
- is disabled.
-
- To enable the stack tracer on bootup, pass in 'stacktrace'
- on the kernel command line.
-
- The stack tracer can also be enabled or disabled via the
- sysctl kernel.stack_tracer_enabled
-
- Say N if unsure.
-
config BLK_DEV_IO_TRACE
bool "Support for tracing block IO actions"
depends on SYSFS
@@ -531,53 +600,6 @@ config DYNAMIC_EVENTS
config PROBE_EVENTS
def_bool n
-config DYNAMIC_FTRACE
- bool "enable/disable function tracing dynamically"
- depends on FUNCTION_TRACER
- depends on HAVE_DYNAMIC_FTRACE
- default y
- help
- This option will modify all the calls to function tracing
- dynamically (will patch them out of the binary image and
- replace them with a No-Op instruction) on boot up. During
- compile time, a table is made of all the locations that ftrace
- can function trace, and this table is linked into the kernel
- image. When this is enabled, functions can be individually
- enabled, and the functions not enabled will not affect
- performance of the system.
-
- See the files in /sys/kernel/debug/tracing:
- available_filter_functions
- set_ftrace_filter
- set_ftrace_notrace
-
- This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
- otherwise has native performance as long as no tracing is active.
-
-config DYNAMIC_FTRACE_WITH_REGS
- def_bool y
- depends on DYNAMIC_FTRACE
- depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
-
-config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
- def_bool y
- depends on DYNAMIC_FTRACE
- depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-
-config FUNCTION_PROFILER
- bool "Kernel function profiler"
- depends on FUNCTION_TRACER
- default n
- help
- This option enables the kernel function profiler. A file is created
- in debugfs called function_profile_enabled which defaults to zero.
- When a 1 is echoed into this file profiling begins, and when a
- zero is entered, profiling stops. A "functions" file is created in
- the trace_stat directory; this file shows the list of functions that
- have been hit and their counters.
-
- If in doubt, say N.
-
config BPF_KPROBE_OVERRIDE
bool "Enable BPF programs to override a kprobed function"
depends on BPF_EVENTS
@@ -592,54 +614,6 @@ config FTRACE_MCOUNT_RECORD
depends on DYNAMIC_FTRACE
depends on HAVE_FTRACE_MCOUNT_RECORD
-config FTRACE_SELFTEST
- bool
-
-config FTRACE_STARTUP_TEST
- bool "Perform a startup test on ftrace"
- depends on GENERIC_TRACER
- select FTRACE_SELFTEST
- help
- This option performs a series of startup tests on ftrace. On bootup
- a series of tests are made to verify that the tracer is
- functioning properly. It will do tests on all the configured
- tracers of ftrace.
-
-config EVENT_TRACE_STARTUP_TEST
- bool "Run selftest on trace events"
- depends on FTRACE_STARTUP_TEST
- default y
- help
- This option performs a test on all trace events in the system.
- It basically just enables each event and runs some code that
- will trigger events (not necessarily the event it enables)
- This may take some time run as there are a lot of events.
-
-config EVENT_TRACE_TEST_SYSCALLS
- bool "Run selftest on syscall events"
- depends on EVENT_TRACE_STARTUP_TEST
- help
- This option will also enable testing every syscall event.
- It only enables the event and disables it and runs various loads
- with the event enabled. This adds a bit more time for kernel boot
- up since it runs this on every system call defined.
-
- TBD - enable a way to actually call the syscalls as we test their
- events
-
-config MMIOTRACE
- bool "Memory mapped IO tracing"
- depends on HAVE_MMIOTRACE_SUPPORT && PCI
- select GENERIC_TRACER
- help
- Mmiotrace traces Memory Mapped I/O access and is meant for
- debugging and reverse engineering. It is called from the ioremap
- implementation and works via page faults. Tracing is disabled by
- default and can be enabled at run-time.
-
- See Documentation/trace/mmiotrace.rst.
- If you are not helping to develop drivers, say N.
-
config TRACING_MAP
bool
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -680,16 +654,6 @@ config TRACE_EVENT_INJECT
If unsure, say N.
-config MMIOTRACE_TEST
- tristate "Test module for mmiotrace"
- depends on MMIOTRACE && m
- help
- This is a dumb module for testing mmiotrace. It is very dangerous
- as it will write garbage to IO memory starting at a given address.
- However, it should be safe to use on e.g. unused portion of VRAM.
-
- Say N, unless you absolutely know what you are doing.
-
config TRACEPOINT_BENCHMARK
bool "Add tracepoint that benchmarks tracepoints"
help
@@ -736,6 +700,81 @@ config RING_BUFFER_BENCHMARK
If unsure, say N.
+config TRACE_EVAL_MAP_FILE
+ bool "Show eval mappings for trace events"
+ depends on TRACING
+ help
+ The "print fmt" of the trace events will show the enum/sizeof names
+ instead of their values. This can cause problems for user space tools
+ that use this string to parse the raw data as user space does not know
+ how to convert the string to its value.
+
+ To fix this, there's a special macro in the kernel that can be used
+ to convert an enum/sizeof into its value. If this macro is used, then
+ the print fmt strings will be converted to their values.
+
+ If something does not get converted properly, this option can be
+ used to show what enums/sizeof the kernel tried to convert.
+
+ This option is for debugging the conversions. A file is created
+ in the tracing directory called "eval_map" that will show the
+ names matched with their values and what trace event system they
+ belong too.
+
+ Normally, the mapping of the strings to values will be freed after
+ boot up or module load. With this option, they will not be freed, as
+ they are needed for the "eval_map" file. Enabling this option will
+ increase the memory footprint of the running kernel.
+
+ If unsure, say N.
+
+config GCOV_PROFILE_FTRACE
+ bool "Enable GCOV profiling on ftrace subsystem"
+ depends on GCOV_KERNEL
+ help
+ Enable GCOV profiling on ftrace subsystem for checking
+ which functions/lines are tested.
+
+ If unsure, say N.
+
+ Note that on a kernel compiled with this config, ftrace will
+ run significantly slower.
+
+config FTRACE_SELFTEST
+ bool
+
+config FTRACE_STARTUP_TEST
+ bool "Perform a startup test on ftrace"
+ depends on GENERIC_TRACER
+ select FTRACE_SELFTEST
+ help
+ This option performs a series of startup tests on ftrace. On bootup
+ a series of tests are made to verify that the tracer is
+ functioning properly. It will do tests on all the configured
+ tracers of ftrace.
+
+config EVENT_TRACE_STARTUP_TEST
+ bool "Run selftest on trace events"
+ depends on FTRACE_STARTUP_TEST
+ default y
+ help
+ This option performs a test on all trace events in the system.
+ It basically just enables each event and runs some code that
+ will trigger events (not necessarily the event it enables)
+ This may take some time run as there are a lot of events.
+
+config EVENT_TRACE_TEST_SYSCALLS
+ bool "Run selftest on syscall events"
+ depends on EVENT_TRACE_STARTUP_TEST
+ help
+ This option will also enable testing every syscall event.
+ It only enables the event and disables it and runs various loads
+ with the event enabled. This adds a bit more time for kernel boot
+ up since it runs this on every system call defined.
+
+ TBD - enable a way to actually call the syscalls as we test their
+ events
+
config RING_BUFFER_STARTUP_TEST
bool "Ring buffer startup self test"
depends on RING_BUFFER
@@ -759,8 +798,18 @@ config RING_BUFFER_STARTUP_TEST
If unsure, say N
+config MMIOTRACE_TEST
+ tristate "Test module for mmiotrace"
+ depends on MMIOTRACE && m
+ help
+ This is a dumb module for testing mmiotrace. It is very dangerous
+ as it will write garbage to IO memory starting at a given address.
+ However, it should be safe to use on e.g. unused portion of VRAM.
+
+ Say N, unless you absolutely know what you are doing.
+
config PREEMPTIRQ_DELAY_TEST
- tristate "Preempt / IRQ disable delay thread to test latency tracers"
+ tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
depends on m
help
Select this option to build a test module that can help test latency
@@ -774,45 +823,30 @@ config PREEMPTIRQ_DELAY_TEST
If unsure, say N
-config TRACE_EVAL_MAP_FILE
- bool "Show eval mappings for trace events"
- depends on TRACING
- help
- The "print fmt" of the trace events will show the enum/sizeof names
- instead of their values. This can cause problems for user space tools
- that use this string to parse the raw data as user space does not know
- how to convert the string to its value.
-
- To fix this, there's a special macro in the kernel that can be used
- to convert an enum/sizeof into its value. If this macro is used, then
- the print fmt strings will be converted to their values.
-
- If something does not get converted properly, this option can be
- used to show what enums/sizeof the kernel tried to convert.
-
- This option is for debugging the conversions. A file is created
- in the tracing directory called "eval_map" that will show the
- names matched with their values and what trace event system they
- belong too.
+config SYNTH_EVENT_GEN_TEST
+ tristate "Test module for in-kernel synthetic event generation"
+ depends on HIST_TRIGGERS
+ help
+ This option creates a test module to check the base
+ functionality of in-kernel synthetic event definition and
+ generation.
- Normally, the mapping of the strings to values will be freed after
- boot up or module load. With this option, they will not be freed, as
- they are needed for the "eval_map" file. Enabling this option will
- increase the memory footprint of the running kernel.
+ To test, insert the module, and then check the trace buffer
+ for the generated sample events.
- If unsure, say N.
+ If unsure, say N.
-config GCOV_PROFILE_FTRACE
- bool "Enable GCOV profiling on ftrace subsystem"
- depends on GCOV_KERNEL
+config KPROBE_EVENT_GEN_TEST
+ tristate "Test module for in-kernel kprobe event generation"
+ depends on KPROBE_EVENTS
help
- Enable GCOV profiling on ftrace subsystem for checking
- which functions/lines are tested.
+ This option creates a test module to check the base
+ functionality of in-kernel kprobe event definition.
- If unsure, say N.
+ To test, insert the module, and then check the trace buffer
+ for the generated kprobe events.
- Note that on a kernel compiled with this config, ftrace will
- run significantly slower.
+ If unsure, say N.
endif # FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 0e63db62225f..f9dcd19165fa 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -44,6 +44,8 @@ obj-$(CONFIG_TRACING) += trace_stat.o
obj-$(CONFIG_TRACING) += trace_printk.o
obj-$(CONFIG_TRACING_MAP) += tracing_map.o
obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o
+obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o
+obj-$(CONFIG_KPROBE_EVENT_GEN_TEST) += kprobe_event_gen_test.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o
@@ -83,6 +85,7 @@ endif
obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
+obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 475e29498bca..0735ae8545d8 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -68,14 +68,14 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
{
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
- struct ring_buffer *buffer = NULL;
+ struct trace_buffer *buffer = NULL;
int pc = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) {
- buffer = blk_tr->trace_buffer.buffer;
+ buffer = blk_tr->array_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len,
@@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
- struct ring_buffer *buffer = NULL;
+ struct trace_buffer *buffer = NULL;
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
@@ -248,7 +248,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) {
tracing_record_cmdline(current);
- buffer = blk_tr->trace_buffer.buffer;
+ buffer = blk_tr->array_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9bf1f2cd515e..3f7ee102868a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,8 +62,6 @@
})
/* hash bits for specific function selection */
-#define FTRACE_HASH_BITS 7
-#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
@@ -146,7 +144,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
{
struct trace_array *tr = op->private;
- if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
+ if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
return;
op->saved_func(ip, parent_ip, op, regs);
@@ -1103,9 +1101,6 @@ struct ftrace_page {
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
-/* estimate from running different kernels */
-#define NR_TO_INIT 10000
-
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
@@ -5464,7 +5459,7 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
struct ftrace_hash *hash;
hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
- if (WARN_ON(!hash))
+ if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
return;
while (buf) {
@@ -5596,8 +5591,8 @@ static const struct file_operations ftrace_notrace_fops = {
static DEFINE_MUTEX(graph_lock);
-struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
-struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
enum graph_filter_type {
GRAPH_FILTER_NOTRACE = 0,
@@ -5872,8 +5867,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
mutex_unlock(&graph_lock);
- /* Wait till all users are no longer using the old hash */
- synchronize_rcu();
+ /*
+ * We need to do a hard force of sched synchronization.
+ * This is because we use preempt_disable() to do RCU, but
+ * the function tracers can be called where RCU is not watching
+ * (like before user_exit()). We can not rely on the RCU
+ * infrastructure to do the synchronization, thus we must do it
+ * ourselves.
+ */
+ schedule_on_each_cpu(ftrace_sync);
free_ftrace_hash(old_hash);
}
@@ -6596,7 +6598,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list,
func = kmalloc(sizeof(*func), GFP_KERNEL);
if (!func) {
- WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
+ MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
return;
}
@@ -6922,7 +6924,7 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->function_pids);
- this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, next));
}
@@ -6976,7 +6978,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
for_each_possible_cpu(cpu)
- per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
rcu_assign_pointer(tr->function_pids, NULL);
@@ -7031,9 +7033,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
struct trace_array *tr = m->private;
struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
- if (v == FTRACE_NO_PIDS)
+ if (v == FTRACE_NO_PIDS) {
+ (*pos)++;
return NULL;
-
+ }
return trace_pid_next(pid_list, v, pos);
}
@@ -7100,7 +7103,7 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->function_pids,
mutex_is_locked(&ftrace_lock));
- this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
trace_ignore_this_task(pid_list, current));
}
diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
new file mode 100644
index 000000000000..18b0f1cbb947
--- /dev/null
+++ b/kernel/trace/kprobe_event_gen_test.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module for in-kernel kprobe event creation and generation.
+ *
+ * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <linux/trace_events.h>
+
+/*
+ * This module is a simple test of basic functionality for in-kernel
+ * kprobe/kretprobe event creation. The first test uses
+ * kprobe_event_gen_cmd_start(), kprobe_event_add_fields() and
+ * kprobe_event_gen_cmd_end() to create a kprobe event, which is then
+ * enabled in order to generate trace output. The second creates a
+ * kretprobe event using kretprobe_event_gen_cmd_start() and
+ * kretprobe_event_gen_cmd_end(), and is also then enabled.
+ *
+ * To test, select CONFIG_KPROBE_EVENT_GEN_TEST and build the module.
+ * Then:
+ *
+ * # insmod kernel/trace/kprobe_event_gen_test.ko
+ * # cat /sys/kernel/debug/tracing/trace
+ *
+ * You should see many instances of the "gen_kprobe_test" and
+ * "gen_kretprobe_test" events in the trace buffer.
+ *
+ * To remove the events, remove the module:
+ *
+ * # rmmod kprobe_event_gen_test
+ *
+ */
+
+static struct trace_event_file *gen_kprobe_test;
+static struct trace_event_file *gen_kretprobe_test;
+
+/*
+ * Test to make sure we can create a kprobe event, then add more
+ * fields.
+ */
+static int __init test_gen_kprobe_cmd(void)
+{
+ struct dynevent_cmd cmd;
+ char *buf;
+ int ret;
+
+ /* Create a buffer to hold the generated command */
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Before generating the command, initialize the cmd object */
+ kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ /*
+ * Define the gen_kprobe_test event with the first 2 kprobe
+ * fields.
+ */
+ ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test",
+ "do_sys_open",
+ "dfd=%ax", "filename=%dx");
+ if (ret)
+ goto free;
+
+ /* Use kprobe_event_add_fields to add the rest of the fields */
+
+ ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)");
+ if (ret)
+ goto free;
+
+ /*
+ * This actually creates the event.
+ */
+ ret = kprobe_event_gen_cmd_end(&cmd);
+ if (ret)
+ goto free;
+
+ /*
+ * Now get the gen_kprobe_test event file. We need to prevent
+ * the instance and event from disappearing from underneath
+ * us, which trace_get_event_file() does (though in this case
+ * we're using the top-level instance which never goes away).
+ */
+ gen_kprobe_test = trace_get_event_file(NULL, "kprobes",
+ "gen_kprobe_test");
+ if (IS_ERR(gen_kprobe_test)) {
+ ret = PTR_ERR(gen_kprobe_test);
+ goto delete;
+ }
+
+ /* Enable the event or you won't see anything */
+ ret = trace_array_set_clr_event(gen_kprobe_test->tr,
+ "kprobes", "gen_kprobe_test", true);
+ if (ret) {
+ trace_put_event_file(gen_kprobe_test);
+ goto delete;
+ }
+ out:
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ ret = kprobe_event_delete("gen_kprobe_test");
+ free:
+ kfree(buf);
+
+ goto out;
+}
+
+/*
+ * Test to make sure we can create a kretprobe event.
+ */
+static int __init test_gen_kretprobe_cmd(void)
+{
+ struct dynevent_cmd cmd;
+ char *buf;
+ int ret;
+
+ /* Create a buffer to hold the generated command */
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Before generating the command, initialize the cmd object */
+ kprobe_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ /*
+ * Define the kretprobe event.
+ */
+ ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test",
+ "do_sys_open",
+ "$retval");
+ if (ret)
+ goto free;
+
+ /*
+ * This actually creates the event.
+ */
+ ret = kretprobe_event_gen_cmd_end(&cmd);
+ if (ret)
+ goto free;
+
+ /*
+ * Now get the gen_kretprobe_test event file. We need to
+ * prevent the instance and event from disappearing from
+ * underneath us, which trace_get_event_file() does (though in
+ * this case we're using the top-level instance which never
+ * goes away).
+ */
+ gen_kretprobe_test = trace_get_event_file(NULL, "kprobes",
+ "gen_kretprobe_test");
+ if (IS_ERR(gen_kretprobe_test)) {
+ ret = PTR_ERR(gen_kretprobe_test);
+ goto delete;
+ }
+
+ /* Enable the event or you won't see anything */
+ ret = trace_array_set_clr_event(gen_kretprobe_test->tr,
+ "kprobes", "gen_kretprobe_test", true);
+ if (ret) {
+ trace_put_event_file(gen_kretprobe_test);
+ goto delete;
+ }
+ out:
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ ret = kprobe_event_delete("gen_kretprobe_test");
+ free:
+ kfree(buf);
+
+ goto out;
+}
+
+static int __init kprobe_event_gen_test_init(void)
+{
+ int ret;
+
+ ret = test_gen_kprobe_cmd();
+ if (ret)
+ return ret;
+
+ ret = test_gen_kretprobe_cmd();
+ if (ret) {
+ WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+ "kprobes",
+ "gen_kretprobe_test", false));
+ trace_put_event_file(gen_kretprobe_test);
+ WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
+ }
+
+ return ret;
+}
+
+static void __exit kprobe_event_gen_test_exit(void)
+{
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+ "kprobes",
+ "gen_kprobe_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_kprobe_test);
+
+ /* Now unregister and free the event */
+ WARN_ON(kprobe_event_delete("gen_kprobe_test"));
+
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+ "kprobes",
+ "gen_kretprobe_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_kretprobe_test);
+
+ /* Now unregister and free the event */
+ WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
+}
+
+module_init(kprobe_event_gen_test_init)
+module_exit(kprobe_event_gen_test_exit)
+
+MODULE_AUTHOR("Tom Zanussi");
+MODULE_DESCRIPTION("kprobe event generation test");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3f655371eaf6..61f0e92ace99 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -300,8 +300,6 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
/* Missed count stored at end */
#define RB_MISSED_STORED (1 << 30)
-#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
-
struct buffer_data_page {
u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */
@@ -443,7 +441,7 @@ enum {
struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
@@ -482,7 +480,7 @@ struct ring_buffer_per_cpu {
struct rb_irq_work irq_work;
};
-struct ring_buffer {
+struct trace_buffer {
unsigned flags;
int cpus;
atomic_t record_disabled;
@@ -518,7 +516,7 @@ struct ring_buffer_iter {
*
* Returns the number of pages used by a per_cpu buffer of the ring buffer.
*/
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
{
return buffer->buffers[cpu]->nr_pages;
}
@@ -530,7 +528,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
*
* Returns the number of pages that have content in the ring buffer.
*/
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
{
size_t read;
size_t cnt;
@@ -573,7 +571,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*/
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
{
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
DEFINE_WAIT(wait);
@@ -684,7 +682,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -742,13 +740,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
-static inline u64 rb_time_stamp(struct ring_buffer *buffer)
+static inline u64 rb_time_stamp(struct trace_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
}
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
{
u64 time;
@@ -760,7 +758,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
@@ -1283,7 +1281,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
}
static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
@@ -1368,16 +1366,17 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
* __ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
+ * @key: ring buffer reader_lock_key.
*
* Currently the only flag that is available is the RB_FL_OVERWRITE
* flag. This flag means that the buffer will overwrite old data
* when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head.
*/
-struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
long nr_pages;
int bsize;
int cpu;
@@ -1447,7 +1446,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
* @buffer: the buffer to free.
*/
void
-ring_buffer_free(struct ring_buffer *buffer)
+ring_buffer_free(struct trace_buffer *buffer)
{
int cpu;
@@ -1463,18 +1462,18 @@ ring_buffer_free(struct ring_buffer *buffer)
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void))
{
buffer->clock = clock;
}
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
{
buffer->time_stamp_abs = abs;
}
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
{
return buffer->time_stamp_abs;
}
@@ -1712,7 +1711,7 @@ static void update_pages_handler(struct work_struct *work)
*
* Returns 0 on success and < 0 on failure.
*/
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
int cpu_id)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -1891,7 +1890,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
{
mutex_lock(&buffer->mutex);
if (val)
@@ -2206,7 +2205,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
{
struct buffer_page *tail_page = info->tail_page;
struct buffer_page *commit_page = cpu_buffer->commit_page;
- struct ring_buffer *buffer = cpu_buffer->buffer;
+ struct trace_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page;
int ret;
@@ -2330,11 +2329,11 @@ static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
/**
* rb_update_event - update event type and data
+ * @cpu_buffer: The per cpu buffer of the @event
* @event: the event to update
- * @type: the type of event
- * @length: the size of the event field in the ring buffer
+ * @info: The info to update the @event with (contains length and delta)
*
- * Update the type and data fields of the event. The length
+ * Update the type and data fields of the @event. The length
* is the actual size that is written to the ring buffer,
* and with this, we can determine what to place into the
* data field.
@@ -2609,7 +2608,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
}
static __always_inline void
-rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
size_t nr_pages;
size_t dirty;
@@ -2733,7 +2732,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
* Call this function before calling another ring_buffer_lock_reserve() and
* call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
*/
-void ring_buffer_nest_start(struct ring_buffer *buffer)
+void ring_buffer_nest_start(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
@@ -2753,7 +2752,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
* Must be called after ring_buffer_nest_start() and after the
* ring_buffer_unlock_commit().
*/
-void ring_buffer_nest_end(struct ring_buffer *buffer)
+void ring_buffer_nest_end(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
@@ -2775,7 +2774,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
*
* Must be paired with ring_buffer_lock_reserve.
*/
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -2868,7 +2867,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
}
static __always_inline struct ring_buffer_event *
-rb_reserve_next_event(struct ring_buffer *buffer,
+rb_reserve_next_event(struct trace_buffer *buffer,
struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length)
{
@@ -2961,7 +2960,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* If NULL is returned, then nothing has been allocated or locked.
*/
struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
+ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@@ -3062,7 +3061,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* If this function is called, do not call ring_buffer_unlock_commit on
* the event.
*/
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3113,7 +3112,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* Note, like ring_buffer_lock_reserve, the length is the length of the data
* and not the length of the event which would hold the header.
*/
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length,
void *data)
{
@@ -3193,7 +3192,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
*
* The caller should call synchronize_rcu() after this.
*/
-void ring_buffer_record_disable(struct ring_buffer *buffer)
+void ring_buffer_record_disable(struct trace_buffer *buffer)
{
atomic_inc(&buffer->record_disabled);
}
@@ -3206,7 +3205,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
* Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable).
*/
-void ring_buffer_record_enable(struct ring_buffer *buffer)
+void ring_buffer_record_enable(struct trace_buffer *buffer)
{
atomic_dec(&buffer->record_disabled);
}
@@ -3223,7 +3222,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
* it works like an on/off switch, where as the disable() version
* must be paired with a enable().
*/
-void ring_buffer_record_off(struct ring_buffer *buffer)
+void ring_buffer_record_off(struct trace_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
@@ -3246,7 +3245,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
* it works like an on/off switch, where as the enable() version
* must be paired with a disable().
*/
-void ring_buffer_record_on(struct ring_buffer *buffer)
+void ring_buffer_record_on(struct trace_buffer *buffer)
{
unsigned int rd;
unsigned int new_rd;
@@ -3264,7 +3263,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on);
*
* Returns true if the ring buffer is in a state that it accepts writes.
*/
-bool ring_buffer_record_is_on(struct ring_buffer *buffer)
+bool ring_buffer_record_is_on(struct trace_buffer *buffer)
{
return !atomic_read(&buffer->record_disabled);
}
@@ -3280,7 +3279,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer)
* ring_buffer_record_disable(), as that is a temporary disabling of
* the ring buffer.
*/
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
{
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
}
@@ -3295,7 +3294,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
*
* The caller should call synchronize_rcu() after this.
*/
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3315,7 +3314,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
* Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable).
*/
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3345,7 +3344,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3378,7 +3377,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3398,7 +3397,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the entries from.
*/
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3417,7 +3416,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3440,7 +3439,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
-ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3462,7 +3461,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
-ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
@@ -3483,7 +3482,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
* @cpu: The per CPU buffer to get the number of events read
*/
unsigned long
-ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -3502,7 +3501,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
* Returns the total number of entries in the ring buffer
* (all CPU entries)
*/
-unsigned long ring_buffer_entries(struct ring_buffer *buffer)
+unsigned long ring_buffer_entries(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long entries = 0;
@@ -3525,7 +3524,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries);
* Returns the total number of overruns in the ring buffer
* (all CPU entries)
*/
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long overruns = 0;
@@ -3949,7 +3948,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek);
static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int nr_loops = 0;
@@ -4077,7 +4076,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
* not consume the data.
*/
struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4141,7 +4140,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
* and eventually empty the ring buffer if the producer is slower.
*/
struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer;
@@ -4201,7 +4200,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
@@ -4331,8 +4330,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
* @buffer: The ring buffer.
+ * @cpu: The CPU to get ring buffer size from.
*/
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
{
/*
* Earlier, this method returned
@@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer to reset a per cpu buffer of
* @cpu: The CPU buffer to be reset
*/
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
@@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
* ring_buffer_reset - reset a ring buffer
* @buffer: The ring buffer to reset all cpu buffers
*/
-void ring_buffer_reset(struct ring_buffer *buffer)
+void ring_buffer_reset(struct trace_buffer *buffer)
{
int cpu;
@@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
* rind_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test
*/
-bool ring_buffer_empty(struct ring_buffer *buffer)
+bool ring_buffer_empty(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
@@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
* @buffer: The ring buffer
* @cpu: The CPU buffer to test
*/
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
@@ -4504,14 +4504,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
* @buffer_a: One buffer to swap with
* @buffer_b: The other buffer to swap with
+ * @cpu: the CPU of the buffers to swap
*
* This function is useful for tracers that want to take a "snapshot"
* of a CPU buffer and has another back up buffer lying around.
* it is expected that the tracer handles the cpu buffer not being
* used at the moment.
*/
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu)
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
@@ -4590,7 +4591,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns:
* The page allocated, or ERR_PTR
*/
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_data_page *bpage = NULL;
@@ -4637,7 +4638,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
*
* Free a page allocated from ring_buffer_alloc_read_page.
*/
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct buffer_data_page *bpage = data;
@@ -4697,7 +4698,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* >=0 if data has been transferred, returns the offset of consumed data.
* <0 if no data has been transferred.
*/
-int ring_buffer_read_page(struct ring_buffer *buffer,
+int ring_buffer_read_page(struct trace_buffer *buffer,
void **data_page, size_t len, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
@@ -4868,12 +4869,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page);
*/
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
long nr_pages_same;
int cpu_i;
unsigned long nr_pages;
- buffer = container_of(node, struct ring_buffer, node);
+ buffer = container_of(node, struct trace_buffer, node);
if (cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
@@ -4923,7 +4924,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
static struct task_struct *rb_threads[NR_CPUS] __initdata;
struct rb_test_data {
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long events;
unsigned long bytes_written;
unsigned long bytes_alloc;
@@ -5065,7 +5066,7 @@ static __init int rb_hammer_test(void *arg)
static __init int test_ringbuffer(void)
{
struct task_struct *rb_hammer;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
int cpu;
int ret = 0;
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 32149e46551c..8df0aa810950 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -29,7 +29,7 @@ static int reader_finish;
static DECLARE_COMPLETION(read_start);
static DECLARE_COMPLETION(read_done);
-static struct ring_buffer *buffer;
+static struct trace_buffer *buffer;
static struct task_struct *producer;
static struct task_struct *consumer;
static unsigned long read;
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
new file mode 100644
index 000000000000..4aefe003cb7c
--- /dev/null
+++ b/kernel/trace/synth_event_gen_test.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module for in-kernel sythetic event creation and generation.
+ *
+ * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
+ */
+
+#include <linux/module.h>
+#include <linux/trace_events.h>
+
+/*
+ * This module is a simple test of basic functionality for in-kernel
+ * synthetic event creation and generation, the first and second tests
+ * using synth_event_gen_cmd_start() and synth_event_add_field(), the
+ * third uses synth_event_create() to do it all at once with a static
+ * field array.
+ *
+ * Following that are a few examples using the created events to test
+ * various ways of tracing a synthetic event.
+ *
+ * To test, select CONFIG_SYNTH_EVENT_GEN_TEST and build the module.
+ * Then:
+ *
+ * # insmod kernel/trace/synth_event_gen_test.ko
+ * # cat /sys/kernel/debug/tracing/trace
+ *
+ * You should see several events in the trace buffer -
+ * "create_synth_test", "empty_synth_test", and several instances of
+ * "gen_synth_test".
+ *
+ * To remove the events, remove the module:
+ *
+ * # rmmod synth_event_gen_test
+ *
+ */
+
+static struct trace_event_file *create_synth_test;
+static struct trace_event_file *empty_synth_test;
+static struct trace_event_file *gen_synth_test;
+
+/*
+ * Test to make sure we can create a synthetic event, then add more
+ * fields.
+ */
+static int __init test_gen_synth_cmd(void)
+{
+ struct dynevent_cmd cmd;
+ u64 vals[7];
+ char *buf;
+ int ret;
+
+ /* Create a buffer to hold the generated command */
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Before generating the command, initialize the cmd object */
+ synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ /*
+ * Create the empty gen_synth_test synthetic event with the
+ * first 4 fields.
+ */
+ ret = synth_event_gen_cmd_start(&cmd, "gen_synth_test", THIS_MODULE,
+ "pid_t", "next_pid_field",
+ "char[16]", "next_comm_field",
+ "u64", "ts_ns",
+ "u64", "ts_ms");
+ if (ret)
+ goto free;
+
+ /* Use synth_event_add_field to add the rest of the fields */
+
+ ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "int", "my_int_field");
+ if (ret)
+ goto free;
+
+ ret = synth_event_gen_cmd_end(&cmd);
+ if (ret)
+ goto free;
+
+ /*
+ * Now get the gen_synth_test event file. We need to prevent
+ * the instance and event from disappearing from underneath
+ * us, which trace_get_event_file() does (though in this case
+ * we're using the top-level instance which never goes away).
+ */
+ gen_synth_test = trace_get_event_file(NULL, "synthetic",
+ "gen_synth_test");
+ if (IS_ERR(gen_synth_test)) {
+ ret = PTR_ERR(gen_synth_test);
+ goto delete;
+ }
+
+ /* Enable the event or you won't see anything */
+ ret = trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic", "gen_synth_test", true);
+ if (ret) {
+ trace_put_event_file(gen_synth_test);
+ goto delete;
+ }
+
+ /* Create some bogus values just for testing */
+
+ vals[0] = 777; /* next_pid_field */
+ vals[1] = (u64)"hula hoops"; /* next_comm_field */
+ vals[2] = 1000000; /* ts_ns */
+ vals[3] = 1000; /* ts_ms */
+ vals[4] = smp_processor_id(); /* cpu */
+ vals[5] = (u64)"thneed"; /* my_string_field */
+ vals[6] = 598; /* my_int_field */
+
+ /* Now generate a gen_synth_test event */
+ ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ synth_event_delete("gen_synth_test");
+ free:
+ kfree(buf);
+
+ goto out;
+}
+
+/*
+ * Test to make sure we can create an initially empty synthetic event,
+ * then add all the fields.
+ */
+static int __init test_empty_synth_event(void)
+{
+ struct dynevent_cmd cmd;
+ u64 vals[7];
+ char *buf;
+ int ret;
+
+ /* Create a buffer to hold the generated command */
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Before generating the command, initialize the cmd object */
+ synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ /*
+ * Create the empty_synth_test synthetic event with no fields.
+ */
+ ret = synth_event_gen_cmd_start(&cmd, "empty_synth_test", THIS_MODULE);
+ if (ret)
+ goto free;
+
+ /* Use synth_event_add_field to add all of the fields */
+
+ ret = synth_event_add_field(&cmd, "pid_t", "next_pid_field");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "char[16]", "next_comm_field");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "u64", "ts_ns");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "u64", "ts_ms");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "unsigned int", "cpu");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "char[64]", "my_string_field");
+ if (ret)
+ goto free;
+
+ ret = synth_event_add_field(&cmd, "int", "my_int_field");
+ if (ret)
+ goto free;
+
+ /* All fields have been added, close and register the synth event */
+
+ ret = synth_event_gen_cmd_end(&cmd);
+ if (ret)
+ goto free;
+
+ /*
+ * Now get the empty_synth_test event file. We need to
+ * prevent the instance and event from disappearing from
+ * underneath us, which trace_get_event_file() does (though in
+ * this case we're using the top-level instance which never
+ * goes away).
+ */
+ empty_synth_test = trace_get_event_file(NULL, "synthetic",
+ "empty_synth_test");
+ if (IS_ERR(empty_synth_test)) {
+ ret = PTR_ERR(empty_synth_test);
+ goto delete;
+ }
+
+ /* Enable the event or you won't see anything */
+ ret = trace_array_set_clr_event(empty_synth_test->tr,
+ "synthetic", "empty_synth_test", true);
+ if (ret) {
+ trace_put_event_file(empty_synth_test);
+ goto delete;
+ }
+
+ /* Create some bogus values just for testing */
+
+ vals[0] = 777; /* next_pid_field */
+ vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
+ vals[2] = 1000000; /* ts_ns */
+ vals[3] = 1000; /* ts_ms */
+ vals[4] = smp_processor_id(); /* cpu */
+ vals[5] = (u64)"thneed_2.0"; /* my_string_field */
+ vals[6] = 399; /* my_int_field */
+
+ /* Now trace an empty_synth_test event */
+ ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ synth_event_delete("empty_synth_test");
+ free:
+ kfree(buf);
+
+ goto out;
+}
+
+static struct synth_field_desc create_synth_test_fields[] = {
+ { .type = "pid_t", .name = "next_pid_field" },
+ { .type = "char[16]", .name = "next_comm_field" },
+ { .type = "u64", .name = "ts_ns" },
+ { .type = "u64", .name = "ts_ms" },
+ { .type = "unsigned int", .name = "cpu" },
+ { .type = "char[64]", .name = "my_string_field" },
+ { .type = "int", .name = "my_int_field" },
+};
+
+/*
+ * Test synthetic event creation all at once from array of field
+ * descriptors.
+ */
+static int __init test_create_synth_event(void)
+{
+ u64 vals[7];
+ int ret;
+
+ /* Create the create_synth_test event with the fields above */
+ ret = synth_event_create("create_synth_test",
+ create_synth_test_fields,
+ ARRAY_SIZE(create_synth_test_fields),
+ THIS_MODULE);
+ if (ret)
+ goto out;
+
+ /*
+ * Now get the create_synth_test event file. We need to
+ * prevent the instance and event from disappearing from
+ * underneath us, which trace_get_event_file() does (though in
+ * this case we're using the top-level instance which never
+ * goes away).
+ */
+ create_synth_test = trace_get_event_file(NULL, "synthetic",
+ "create_synth_test");
+ if (IS_ERR(create_synth_test)) {
+ ret = PTR_ERR(create_synth_test);
+ goto delete;
+ }
+
+ /* Enable the event or you won't see anything */
+ ret = trace_array_set_clr_event(create_synth_test->tr,
+ "synthetic", "create_synth_test", true);
+ if (ret) {
+ trace_put_event_file(create_synth_test);
+ goto delete;
+ }
+
+ /* Create some bogus values just for testing */
+
+ vals[0] = 777; /* next_pid_field */
+ vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
+ vals[2] = 1000000; /* ts_ns */
+ vals[3] = 1000; /* ts_ms */
+ vals[4] = smp_processor_id(); /* cpu */
+ vals[5] = (u64)"thneed"; /* my_string_field */
+ vals[6] = 398; /* my_int_field */
+
+ /* Now generate a create_synth_test event */
+ ret = synth_event_trace_array(create_synth_test, vals, ARRAY_SIZE(vals));
+ out:
+ return ret;
+ delete:
+ /* We got an error after creating the event, delete it */
+ ret = synth_event_delete("create_synth_test");
+
+ goto out;
+}
+
+/*
+ * Test tracing a synthetic event by reserving trace buffer space,
+ * then filling in fields one after another.
+ */
+static int __init test_add_next_synth_val(void)
+{
+ struct synth_event_trace_state trace_state;
+ int ret;
+
+ /* Start by reserving space in the trace buffer */
+ ret = synth_event_trace_start(gen_synth_test, &trace_state);
+ if (ret)
+ return ret;
+
+ /* Write some bogus values into the trace buffer, one after another */
+
+ /* next_pid_field */
+ ret = synth_event_add_next_val(777, &trace_state);
+ if (ret)
+ goto out;
+
+ /* next_comm_field */
+ ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+ if (ret)
+ goto out;
+
+ /* ts_ns */
+ ret = synth_event_add_next_val(1000000, &trace_state);
+ if (ret)
+ goto out;
+
+ /* ts_ms */
+ ret = synth_event_add_next_val(1000, &trace_state);
+ if (ret)
+ goto out;
+
+ /* cpu */
+ ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+ if (ret)
+ goto out;
+
+ /* my_string_field */
+ ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+ if (ret)
+ goto out;
+
+ /* my_int_field */
+ ret = synth_event_add_next_val(395, &trace_state);
+ out:
+ /* Finally, commit the event */
+ ret = synth_event_trace_end(&trace_state);
+
+ return ret;
+}
+
+/*
+ * Test tracing a synthetic event by reserving trace buffer space,
+ * then filling in fields using field names, which can be done in any
+ * order.
+ */
+static int __init test_add_synth_val(void)
+{
+ struct synth_event_trace_state trace_state;
+ int ret;
+
+ /* Start by reserving space in the trace buffer */
+ ret = synth_event_trace_start(gen_synth_test, &trace_state);
+ if (ret)
+ return ret;
+
+ /* Write some bogus values into the trace buffer, using field names */
+
+ ret = synth_event_add_val("ts_ns", 1000000, &trace_state);
+ if (ret)
+ goto out;
+
+ ret = synth_event_add_val("ts_ms", 1000, &trace_state);
+ if (ret)
+ goto out;
+
+ ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+ if (ret)
+ goto out;
+
+ ret = synth_event_add_val("next_pid_field", 777, &trace_state);
+ if (ret)
+ goto out;
+
+ ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+ &trace_state);
+ if (ret)
+ goto out;
+
+ ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+ &trace_state);
+ if (ret)
+ goto out;
+
+ ret = synth_event_add_val("my_int_field", 3999, &trace_state);
+ out:
+ /* Finally, commit the event */
+ ret = synth_event_trace_end(&trace_state);
+
+ return ret;
+}
+
+/*
+ * Test tracing a synthetic event all at once from array of values.
+ */
+static int __init test_trace_synth_event(void)
+{
+ int ret;
+
+ /* Trace some bogus values just for testing */
+ ret = synth_event_trace(create_synth_test, 7, /* number of values */
+ 444, /* next_pid_field */
+ (u64)"clackers", /* next_comm_field */
+ 1000000, /* ts_ns */
+ 1000, /* ts_ms */
+ smp_processor_id(), /* cpu */
+ (u64)"Thneed", /* my_string_field */
+ 999); /* my_int_field */
+ return ret;
+}
+
+static int __init synth_event_gen_test_init(void)
+{
+ int ret;
+
+ ret = test_gen_synth_cmd();
+ if (ret)
+ return ret;
+
+ ret = test_empty_synth_event();
+ if (ret) {
+ WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic",
+ "gen_synth_test", false));
+ trace_put_event_file(gen_synth_test);
+ WARN_ON(synth_event_delete("gen_synth_test"));
+ goto out;
+ }
+
+ ret = test_create_synth_event();
+ if (ret) {
+ WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic",
+ "gen_synth_test", false));
+ trace_put_event_file(gen_synth_test);
+ WARN_ON(synth_event_delete("gen_synth_test"));
+
+ WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
+ "synthetic",
+ "empty_synth_test", false));
+ trace_put_event_file(empty_synth_test);
+ WARN_ON(synth_event_delete("empty_synth_test"));
+ goto out;
+ }
+
+ ret = test_add_next_synth_val();
+ WARN_ON(ret);
+
+ ret = test_add_synth_val();
+ WARN_ON(ret);
+
+ ret = test_trace_synth_event();
+ WARN_ON(ret);
+ out:
+ return ret;
+}
+
+static void __exit synth_event_gen_test_exit(void)
+{
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(gen_synth_test->tr,
+ "synthetic",
+ "gen_synth_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(gen_synth_test);
+
+ /* Now unregister and free the synthetic event */
+ WARN_ON(synth_event_delete("gen_synth_test"));
+
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(empty_synth_test->tr,
+ "synthetic",
+ "empty_synth_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(empty_synth_test);
+
+ /* Now unregister and free the synthetic event */
+ WARN_ON(synth_event_delete("empty_synth_test"));
+
+ /* Disable the event or you can't remove it */
+ WARN_ON(trace_array_set_clr_event(create_synth_test->tr,
+ "synthetic",
+ "create_synth_test", false));
+
+ /* Now give the file and instance back */
+ trace_put_event_file(create_synth_test);
+
+ /* Now unregister and free the synthetic event */
+ WARN_ON(synth_event_delete("create_synth_test"));
+}
+
+module_init(synth_event_gen_test_init)
+module_exit(synth_event_gen_test_exit)
+
+MODULE_AUTHOR("Tom Zanussi");
+MODULE_DESCRIPTION("synthetic event generation test");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5e02d65505c1..c797a15a1fc7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -162,8 +162,8 @@ union trace_eval_map_item {
static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
-static int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct ring_buffer *buffer,
+int tracing_set_tracer(struct trace_array *tr, const char *buf);
+static void ftrace_trace_userstack(struct trace_buffer *buffer,
unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100
@@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr)
}
int call_filter_check_discard(struct trace_event_call *call, void *rec,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
@@ -603,7 +603,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
return read;
}
-static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
+static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
{
u64 ts;
@@ -619,7 +619,7 @@ static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
u64 ftrace_now(int cpu)
{
- return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+ return buffer_ftrace_now(&global_trace.array_buffer, cpu);
}
/**
@@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void)
#endif
#ifdef CONFIG_STACKTRACE
-static void __ftrace_trace_stack(struct ring_buffer *buffer,
+static void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs);
#else
-static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
+static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
}
static inline void ftrace_trace_stack(struct trace_array *tr,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
@@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event,
}
static __always_inline struct ring_buffer_event *
-__trace_buffer_lock_reserve(struct ring_buffer *buffer,
+__trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
unsigned long flags, int pc)
@@ -796,8 +796,8 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,
void tracer_tracing_on(struct trace_array *tr)
{
- if (tr->trace_buffer.buffer)
- ring_buffer_record_on(tr->trace_buffer.buffer);
+ if (tr->array_buffer.buffer)
+ ring_buffer_record_on(tr->array_buffer.buffer);
/*
* This flag is looked at when buffers haven't been allocated
* yet, or by some tracers (like irqsoff), that just want to
@@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
static __always_inline void
-__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_taskinfo_save, true);
@@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
int __trace_puts(unsigned long ip, const char *str, int size)
{
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct print_entry *entry;
unsigned long irq_flags;
int alloc;
@@ -865,11 +865,14 @@ int __trace_puts(unsigned long ip, const char *str, int size)
alloc = sizeof(*entry) + size + 2; /* possible \n added */
local_save_flags(irq_flags);
- buffer = global_trace.trace_buffer.buffer;
+ buffer = global_trace.array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
irq_flags, pc);
- if (!event)
- return 0;
+ if (!event) {
+ size = 0;
+ goto out;
+ }
entry = ring_buffer_event_data(event);
entry->ip = ip;
@@ -885,7 +888,8 @@ int __trace_puts(unsigned long ip, const char *str, int size)
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
-
+ out:
+ ring_buffer_nest_end(buffer);
return size;
}
EXPORT_SYMBOL_GPL(__trace_puts);
@@ -898,10 +902,11 @@ EXPORT_SYMBOL_GPL(__trace_puts);
int __trace_bputs(unsigned long ip, const char *str)
{
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct bputs_entry *entry;
unsigned long irq_flags;
int size = sizeof(struct bputs_entry);
+ int ret = 0;
int pc;
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
@@ -913,11 +918,13 @@ int __trace_bputs(unsigned long ip, const char *str)
return 0;
local_save_flags(irq_flags);
- buffer = global_trace.trace_buffer.buffer;
+ buffer = global_trace.array_buffer.buffer;
+
+ ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
irq_flags, pc);
if (!event)
- return 0;
+ goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
@@ -926,7 +933,10 @@ int __trace_bputs(unsigned long ip, const char *str)
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
- return 1;
+ ret = 1;
+ out:
+ ring_buffer_nest_end(buffer);
+ return ret;
}
EXPORT_SYMBOL_GPL(__trace_bputs);
@@ -1036,9 +1046,9 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
}
EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
-static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
- struct trace_buffer *size_buf, int cpu_id);
-static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+ struct array_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
int tracing_alloc_snapshot_instance(struct trace_array *tr)
{
@@ -1048,7 +1058,7 @@ int tracing_alloc_snapshot_instance(struct trace_array *tr)
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&tr->max_buffer,
- &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+ &tr->array_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0)
return ret;
@@ -1251,8 +1261,8 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
void tracer_tracing_off(struct trace_array *tr)
{
- if (tr->trace_buffer.buffer)
- ring_buffer_record_off(tr->trace_buffer.buffer);
+ if (tr->array_buffer.buffer)
+ ring_buffer_record_off(tr->array_buffer.buffer);
/*
* This flag is looked at when buffers haven't been allocated
* yet, or by some tracers (like irqsoff), that just want to
@@ -1294,8 +1304,8 @@ void disable_trace_on_warning(void)
*/
bool tracer_tracing_is_on(struct trace_array *tr)
{
- if (tr->trace_buffer.buffer)
- return ring_buffer_record_is_on(tr->trace_buffer.buffer);
+ if (tr->array_buffer.buffer)
+ return ring_buffer_record_is_on(tr->array_buffer.buffer);
return !tr->buffer_disabled;
}
@@ -1590,8 +1600,8 @@ void latency_fsnotify(struct trace_array *tr)
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct trace_buffer *trace_buf = &tr->trace_buffer;
- struct trace_buffer *max_buf = &tr->max_buffer;
+ struct array_buffer *trace_buf = &tr->array_buffer;
+ struct array_buffer *max_buf = &tr->max_buffer;
struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
@@ -1649,8 +1659,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
arch_spin_lock(&tr->max_lock);
- /* Inherit the recordable setting from trace_buffer */
- if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+ /* Inherit the recordable setting from array_buffer */
+ if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
ring_buffer_record_on(tr->max_buffer.buffer);
else
ring_buffer_record_off(tr->max_buffer.buffer);
@@ -1659,7 +1669,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
goto out_unlock;
#endif
- swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
+ swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
@@ -1692,7 +1702,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&tr->max_lock);
- ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
+ ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
if (ret == -EBUSY) {
/*
@@ -1718,7 +1728,7 @@ static int wait_on_pipe(struct trace_iterator *iter, int full)
if (trace_buffer_iter(iter, iter->cpu_file))
return 0;
- return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
+ return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
full);
}
@@ -1769,7 +1779,7 @@ static int run_tracer_selftest(struct tracer *type)
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
tr->current_trace = type;
@@ -1795,7 +1805,7 @@ static int run_tracer_selftest(struct tracer *type)
return -1;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) {
@@ -1962,9 +1972,9 @@ int __init register_tracer(struct tracer *type)
return ret;
}
-static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
+static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
{
- struct ring_buffer *buffer = buf->buffer;
+ struct trace_buffer *buffer = buf->buffer;
if (!buffer)
return;
@@ -1978,9 +1988,9 @@ static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
ring_buffer_record_enable(buffer);
}
-void tracing_reset_online_cpus(struct trace_buffer *buf)
+void tracing_reset_online_cpus(struct array_buffer *buf)
{
- struct ring_buffer *buffer = buf->buffer;
+ struct trace_buffer *buffer = buf->buffer;
int cpu;
if (!buffer)
@@ -2008,7 +2018,7 @@ void tracing_reset_all_online_cpus(void)
if (!tr->clear_trace)
continue;
tr->clear_trace = false;
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer);
#endif
@@ -2098,7 +2108,7 @@ int is_tracing_stopped(void)
*/
void tracing_start(void)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
@@ -2117,7 +2127,7 @@ void tracing_start(void)
/* Prevent the buffers from switching */
arch_spin_lock(&global_trace.max_lock);
- buffer = global_trace.trace_buffer.buffer;
+ buffer = global_trace.array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
@@ -2135,7 +2145,7 @@ void tracing_start(void)
static void tracing_start_tr(struct trace_array *tr)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
@@ -2156,7 +2166,7 @@ static void tracing_start_tr(struct trace_array *tr)
goto out;
}
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
@@ -2172,7 +2182,7 @@ static void tracing_start_tr(struct trace_array *tr)
*/
void tracing_stop(void)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long flags;
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
@@ -2182,7 +2192,7 @@ void tracing_stop(void)
/* Prevent the buffers from switching */
arch_spin_lock(&global_trace.max_lock);
- buffer = global_trace.trace_buffer.buffer;
+ buffer = global_trace.array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
@@ -2200,7 +2210,7 @@ void tracing_stop(void)
static void tracing_stop_tr(struct trace_array *tr)
{
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long flags;
/* If global, we need to also stop the max tracer */
@@ -2211,7 +2221,7 @@ static void tracing_stop_tr(struct trace_array *tr)
if (tr->stop_count++)
goto out;
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
@@ -2442,7 +2452,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
struct ring_buffer_event *
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
unsigned long flags, int pc)
@@ -2561,10 +2571,10 @@ void trace_buffered_event_disable(void)
preempt_enable();
}
-static struct ring_buffer *temp_buffer;
+static struct trace_buffer *temp_buffer;
struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc)
@@ -2572,7 +2582,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
struct ring_buffer_event *entry;
int val;
- *current_rb = trace_file->tr->trace_buffer.buffer;
+ *current_rb = trace_file->tr->array_buffer.buffer;
if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
(EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
@@ -2610,6 +2620,7 @@ static DEFINE_MUTEX(tracepoint_printk_mutex);
static void output_printk(struct trace_event_buffer *fbuffer)
{
struct trace_event_call *event_call;
+ struct trace_event_file *file;
struct trace_event *event;
unsigned long flags;
struct trace_iterator *iter = tracepoint_print_iter;
@@ -2623,6 +2634,12 @@ static void output_printk(struct trace_event_buffer *fbuffer)
!event_call->event.funcs->trace)
return;
+ file = fbuffer->trace_file;
+ if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+ (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+ !filter_match_preds(file->filter, fbuffer->entry)))
+ return;
+
event = &fbuffer->trace_file->event_call->event;
spin_lock_irqsave(&tracepoint_iter_lock, flags);
@@ -2673,9 +2690,9 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
- event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
+ event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
- fbuffer->flags, fbuffer->pc);
+ fbuffer->flags, fbuffer->pc, fbuffer->regs);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
@@ -2689,7 +2706,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
# define STACK_SKIP 3
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
@@ -2710,7 +2727,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
* Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
*/
void
-trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
__buffer_unlock_commit(buffer, event);
@@ -2845,7 +2862,7 @@ trace_function(struct trace_array *tr,
int pc)
{
struct trace_event_call *call = &event_function;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
@@ -2883,7 +2900,7 @@ struct ftrace_stacks {
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
-static void __ftrace_trace_stack(struct ring_buffer *buffer,
+static void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
@@ -2958,7 +2975,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
}
static inline void ftrace_trace_stack(struct trace_array *tr,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
@@ -2971,7 +2988,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
if (rcu_is_watching()) {
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
@@ -3009,7 +3026,7 @@ void trace_dump_stack(int skip)
/* Skip 1 to skip this function. */
skip++;
#endif
- __ftrace_trace_stack(global_trace.trace_buffer.buffer,
+ __ftrace_trace_stack(global_trace.array_buffer.buffer,
flags, skip, preempt_count(), NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3018,7 +3035,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
static DEFINE_PER_CPU(int, user_stack_count);
static void
-ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
@@ -3063,7 +3080,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
preempt_enable();
}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct ring_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_buffer *buffer,
unsigned long flags, int pc)
{
}
@@ -3109,7 +3126,7 @@ static int alloc_percpu_trace_buffer(void)
struct trace_buffer_struct *buffers;
buffers = alloc_percpu(struct trace_buffer_struct);
- if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
+ if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
return -ENOMEM;
trace_percpu_buffer = buffers;
@@ -3154,7 +3171,7 @@ void trace_printk_init_buffers(void)
* directly here. If the global_trace.buffer is already
* allocated here, then this was called by module code.
*/
- if (global_trace.trace_buffer.buffer)
+ if (global_trace.array_buffer.buffer)
tracing_start_cmdline_record();
}
EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
@@ -3188,7 +3205,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
struct trace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
unsigned long flags;
@@ -3213,11 +3230,12 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
- goto out;
+ goto out_put;
local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
flags, pc);
if (!event)
@@ -3233,6 +3251,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
}
out:
+ ring_buffer_nest_end(buffer);
+out_put:
put_trace_buf();
out_nobuffer:
@@ -3245,7 +3265,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk);
__printf(3, 0)
static int
-__trace_array_vprintk(struct ring_buffer *buffer,
+__trace_array_vprintk(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, va_list args)
{
struct trace_event_call *call = &event_print;
@@ -3275,6 +3295,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
local_save_flags(flags);
size = sizeof(*entry) + len + 1;
+ ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
flags, pc);
if (!event)
@@ -3289,6 +3310,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
}
out:
+ ring_buffer_nest_end(buffer);
put_trace_buf();
out_nobuffer:
@@ -3302,7 +3324,7 @@ __printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
+ return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
}
__printf(3, 0)
@@ -3326,7 +3348,7 @@ int trace_array_printk(struct trace_array *tr,
EXPORT_SYMBOL_GPL(trace_array_printk);
__printf(3, 4)
-int trace_array_printk_buf(struct ring_buffer *buffer,
+int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...)
{
int ret;
@@ -3367,7 +3389,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
else
- event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
+ event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
lost_events);
if (event) {
@@ -3382,7 +3404,7 @@ static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
unsigned long *missing_events, u64 *ent_ts)
{
- struct ring_buffer *buffer = iter->trace_buffer->buffer;
+ struct trace_buffer *buffer = iter->array_buffer->buffer;
struct trace_entry *ent, *next = NULL;
unsigned long lost_events = 0, next_lost = 0;
int cpu_file = iter->cpu_file;
@@ -3459,7 +3481,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
static void trace_consume(struct trace_iterator *iter)
{
- ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
+ ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
&iter->lost_events);
}
@@ -3497,7 +3519,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
unsigned long entries = 0;
u64 ts;
- per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
+ per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter)
@@ -3511,13 +3533,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
* by the timestamp being before the start of the buffer.
*/
while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
- if (ts >= iter->trace_buffer->time_start)
+ if (ts >= iter->array_buffer->time_start)
break;
entries++;
ring_buffer_read(buf_iter, NULL);
}
- per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
+ per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
}
/*
@@ -3602,7 +3624,7 @@ static void s_stop(struct seq_file *m, void *p)
}
static void
-get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
+get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
unsigned long *entries, int cpu)
{
unsigned long count;
@@ -3624,7 +3646,7 @@ get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
}
static void
-get_total_entries(struct trace_buffer *buf,
+get_total_entries(struct array_buffer *buf,
unsigned long *total, unsigned long *entries)
{
unsigned long t, e;
@@ -3647,7 +3669,7 @@ unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
if (!tr)
tr = &global_trace;
- get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
+ get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
return entries;
}
@@ -3659,7 +3681,7 @@ unsigned long trace_total_entries(struct trace_array *tr)
if (!tr)
tr = &global_trace;
- get_total_entries(&tr->trace_buffer, &total, &entries);
+ get_total_entries(&tr->array_buffer, &total, &entries);
return entries;
}
@@ -3676,7 +3698,7 @@ static void print_lat_help_header(struct seq_file *m)
"# \\ / ||||| \\ | / \n");
}
-static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
+static void print_event_info(struct array_buffer *buf, struct seq_file *m)
{
unsigned long total;
unsigned long entries;
@@ -3687,7 +3709,7 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
seq_puts(m, "#\n");
}
-static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
+static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
@@ -3698,7 +3720,7 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
}
-static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
+static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
@@ -3720,7 +3742,7 @@ void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
- struct trace_buffer *buf = iter->trace_buffer;
+ struct array_buffer *buf = iter->array_buffer;
struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
struct tracer *type = iter->trace;
unsigned long entries;
@@ -3795,7 +3817,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
cpumask_test_cpu(iter->cpu, iter->started))
return;
- if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
+ if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
return;
if (cpumask_available(iter->started))
@@ -3929,7 +3951,7 @@ int trace_empty(struct trace_iterator *iter)
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
- if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
+ if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
return 0;
}
return 1;
@@ -3941,7 +3963,7 @@ int trace_empty(struct trace_iterator *iter)
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
- if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
+ if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
return 0;
}
}
@@ -4031,10 +4053,10 @@ void trace_default_header(struct seq_file *m)
} else {
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
if (trace_flags & TRACE_ITER_IRQ_INFO)
- print_func_help_header_irq(iter->trace_buffer,
+ print_func_help_header_irq(iter->array_buffer,
m, trace_flags);
else
- print_func_help_header(iter->trace_buffer, m,
+ print_func_help_header(iter->array_buffer, m,
trace_flags);
}
}
@@ -4192,21 +4214,21 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
#ifdef CONFIG_TRACER_MAX_TRACE
/* Currently only the top directory has a snapshot */
if (tr->current_trace->print_max || snapshot)
- iter->trace_buffer = &tr->max_buffer;
+ iter->array_buffer = &tr->max_buffer;
else
#endif
- iter->trace_buffer = &tr->trace_buffer;
+ iter->array_buffer = &tr->array_buffer;
iter->snapshot = snapshot;
iter->pos = -1;
iter->cpu_file = tracing_get_cpu(inode);
mutex_init(&iter->mutex);
/* Notify the tracer early; before we stop tracing. */
- if (iter->trace && iter->trace->open)
+ if (iter->trace->open)
iter->trace->open(iter);
/* Annotate start of buffers if we had overruns */
- if (ring_buffer_overruns(iter->trace_buffer->buffer))
+ if (ring_buffer_overruns(iter->array_buffer->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -4220,7 +4242,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->trace_buffer->buffer,
+ ring_buffer_read_prepare(iter->array_buffer->buffer,
cpu, GFP_KERNEL);
}
ring_buffer_read_prepare_sync();
@@ -4231,7 +4253,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->trace_buffer->buffer,
+ ring_buffer_read_prepare(iter->array_buffer->buffer,
cpu, GFP_KERNEL);
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
@@ -4357,7 +4379,7 @@ static int tracing_open(struct inode *inode, struct file *file)
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
int cpu = tracing_get_cpu(inode);
- struct trace_buffer *trace_buf = &tr->trace_buffer;
+ struct array_buffer *trace_buf = &tr->array_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->current_trace->print_max)
@@ -4554,20 +4576,13 @@ out_err:
return count;
}
-static ssize_t
-tracing_cpumask_write(struct file *filp, const char __user *ubuf,
- size_t count, loff_t *ppos)
+int tracing_set_cpumask(struct trace_array *tr,
+ cpumask_var_t tracing_cpumask_new)
{
- struct trace_array *tr = file_inode(filp)->i_private;
- cpumask_var_t tracing_cpumask_new;
- int err, cpu;
-
- if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
- return -ENOMEM;
+ int cpu;
- err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
- if (err)
- goto err_unlock;
+ if (!tr)
+ return -EINVAL;
local_irq_disable();
arch_spin_lock(&tr->max_lock);
@@ -4578,24 +4593,47 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
*/
if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
- ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
+ atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+ ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
- ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
+ atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
+ ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
}
}
arch_spin_unlock(&tr->max_lock);
local_irq_enable();
cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
+
+ return 0;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct trace_array *tr = file_inode(filp)->i_private;
+ cpumask_var_t tracing_cpumask_new;
+ int err;
+
+ if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+ if (err)
+ goto err_free;
+
+ err = tracing_set_cpumask(tr, tracing_cpumask_new);
+ if (err)
+ goto err_free;
+
free_cpumask_var(tracing_cpumask_new);
return count;
-err_unlock:
+err_free:
free_cpumask_var(tracing_cpumask_new);
return err;
@@ -4726,7 +4764,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
ftrace_pid_follow_fork(tr, enabled);
if (mask == TRACE_ITER_OVERWRITE) {
- ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
+ ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
#ifdef CONFIG_TRACER_MAX_TRACE
ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
#endif
@@ -4740,7 +4778,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
return 0;
}
-static int trace_set_options(struct trace_array *tr, char *option)
+int trace_set_options(struct trace_array *tr, char *option)
{
char *cmp;
int neg = 0;
@@ -5361,14 +5399,12 @@ static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
* Paranoid! If ptr points to end, we don't want to increment past it.
* This really should never happen.
*/
+ (*pos)++;
ptr = update_eval_map(ptr);
if (WARN_ON_ONCE(!ptr))
return NULL;
ptr++;
-
- (*pos)++;
-
ptr = update_eval_map(ptr);
return ptr;
@@ -5534,11 +5570,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
int tracer_init(struct tracer *t, struct trace_array *tr)
{
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
return t->init(tr);
}
-static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
+static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
{
int cpu;
@@ -5548,8 +5584,8 @@ static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
#ifdef CONFIG_TRACER_MAX_TRACE
/* resize @tr's buffer to the size of @size_tr's entries */
-static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
- struct trace_buffer *size_buf, int cpu_id)
+static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+ struct array_buffer *size_buf, int cpu_id)
{
int cpu, ret = 0;
@@ -5587,10 +5623,10 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
ring_buffer_expanded = true;
/* May be called before buffers are initialized */
- if (!tr->trace_buffer.buffer)
+ if (!tr->array_buffer.buffer)
return 0;
- ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
+ ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
if (ret < 0)
return ret;
@@ -5601,8 +5637,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
if (ret < 0) {
- int r = resize_buffer_duplicate_size(&tr->trace_buffer,
- &tr->trace_buffer, cpu);
+ int r = resize_buffer_duplicate_size(&tr->array_buffer,
+ &tr->array_buffer, cpu);
if (r < 0) {
/*
* AARGH! We are left with different
@@ -5633,15 +5669,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
#endif /* CONFIG_TRACER_MAX_TRACE */
if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&tr->trace_buffer, size);
+ set_buffer_entries(&tr->array_buffer, size);
else
- per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
+ per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
return ret;
}
-static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
- unsigned long size, int cpu_id)
+ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+ unsigned long size, int cpu_id)
{
int ret = size;
@@ -5720,7 +5756,7 @@ static void add_tracer_options(struct trace_array *tr, struct tracer *t)
create_trace_option_files(tr, t);
}
-static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+int tracing_set_tracer(struct trace_array *tr, const char *buf)
{
struct tracer *t;
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5979,7 +6015,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter->tr = tr;
- iter->trace_buffer = &tr->trace_buffer;
+ iter->array_buffer = &tr->array_buffer;
iter->cpu_file = tracing_get_cpu(inode);
mutex_init(&iter->mutex);
filp->private_data = iter;
@@ -6039,7 +6075,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
*/
return EPOLLIN | EPOLLRDNORM;
else
- return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
+ return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
filp, poll_table);
}
@@ -6356,8 +6392,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */
if (size == 0)
- size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
- if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
+ size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
+ if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
buf_size_same = 0;
break;
}
@@ -6373,7 +6409,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
} else
r = sprintf(buf, "X\n");
} else
- r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
+ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
mutex_unlock(&trace_types_lock);
@@ -6420,7 +6456,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
- size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
+ size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
@@ -6470,7 +6506,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event;
enum event_trigger_type tt = ETT_NONE;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct print_entry *entry;
unsigned long irq_flags;
ssize_t written;
@@ -6499,7 +6535,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (cnt < FAULTED_SIZE)
size += FAULTED_SIZE - cnt;
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, preempt_count());
if (unlikely(!event))
@@ -6550,7 +6586,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
{
struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct raw_data_entry *entry;
unsigned long irq_flags;
ssize_t written;
@@ -6579,7 +6615,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
if (cnt < FAULT_SIZE_ID)
size += FAULT_SIZE_ID - cnt;
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
irq_flags, preempt_count());
if (!event)
@@ -6634,13 +6670,13 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tr->clock_id = i;
- ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
+ ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
/*
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->max_buffer.buffer)
@@ -6703,7 +6739,7 @@ static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
mutex_lock(&trace_types_lock);
- if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
+ if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
seq_puts(m, "delta [absolute]\n");
else
seq_puts(m, "[delta] absolute\n");
@@ -6748,7 +6784,7 @@ int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
goto out;
}
- ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
+ ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->max_buffer.buffer)
@@ -6797,7 +6833,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
ret = 0;
iter->tr = tr;
- iter->trace_buffer = &tr->max_buffer;
+ iter->array_buffer = &tr->max_buffer;
iter->cpu_file = tracing_get_cpu(inode);
m->private = iter;
file->private_data = m;
@@ -6860,7 +6896,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
#endif
if (tr->allocated_snapshot)
ret = resize_buffer_duplicate_size(&tr->max_buffer,
- &tr->trace_buffer, iter->cpu_file);
+ &tr->array_buffer, iter->cpu_file);
else
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
@@ -6935,7 +6971,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
}
info->iter.snapshot = true;
- info->iter.trace_buffer = &info->iter.tr->max_buffer;
+ info->iter.array_buffer = &info->iter.tr->max_buffer;
return ret;
}
@@ -7310,7 +7346,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
info->iter.tr = tr;
info->iter.cpu_file = tracing_get_cpu(inode);
info->iter.trace = tr->current_trace;
- info->iter.trace_buffer = &tr->trace_buffer;
+ info->iter.array_buffer = &tr->array_buffer;
info->spare = NULL;
/* Force reading ring buffer for first read */
info->read = (unsigned int)-1;
@@ -7355,7 +7391,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
#endif
if (!info->spare) {
- info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
+ info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
iter->cpu_file);
if (IS_ERR(info->spare)) {
ret = PTR_ERR(info->spare);
@@ -7373,7 +7409,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
again:
trace_access_lock(iter->cpu_file);
- ret = ring_buffer_read_page(iter->trace_buffer->buffer,
+ ret = ring_buffer_read_page(iter->array_buffer->buffer,
&info->spare,
count,
iter->cpu_file, 0);
@@ -7423,7 +7459,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
__trace_array_put(iter->tr);
if (info->spare)
- ring_buffer_free_read_page(iter->trace_buffer->buffer,
+ ring_buffer_free_read_page(iter->array_buffer->buffer,
info->spare_cpu, info->spare);
kfree(info);
@@ -7433,7 +7469,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
}
struct buffer_ref {
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
void *page;
int cpu;
refcount_t refcount;
@@ -7528,7 +7564,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
again:
trace_access_lock(iter->cpu_file);
- entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+ entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
struct page *page;
@@ -7541,7 +7577,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
}
refcount_set(&ref->refcount, 1);
- ref->buffer = iter->trace_buffer->buffer;
+ ref->buffer = iter->array_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (IS_ERR(ref->page)) {
ret = PTR_ERR(ref->page);
@@ -7569,7 +7605,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
spd.nr_pages++;
*ppos += PAGE_SIZE;
- entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+ entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
}
trace_access_unlock(iter->cpu_file);
@@ -7613,7 +7649,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
{
struct inode *inode = file_inode(filp);
struct trace_array *tr = inode->i_private;
- struct trace_buffer *trace_buf = &tr->trace_buffer;
+ struct array_buffer *trace_buf = &tr->array_buffer;
int cpu = tracing_get_cpu(inode);
struct trace_seq *s;
unsigned long cnt;
@@ -7894,7 +7930,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
- WARN_ONCE(!tr->percpu_dir,
+ MEM_FAIL(!tr->percpu_dir,
"Could not create tracefs directory 'per_cpu/%d'\n", cpu);
return tr->percpu_dir;
@@ -8215,7 +8251,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
for (cnt = 0; opts[cnt].name; cnt++) {
create_trace_option_file(tr, &topts[cnt], flags,
&opts[cnt]);
- WARN_ONCE(topts[cnt].entry == NULL,
+ MEM_FAIL(topts[cnt].entry == NULL,
"Failed to create trace option: %s",
opts[cnt].name);
}
@@ -8272,7 +8308,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
unsigned long val;
int ret;
@@ -8362,7 +8398,7 @@ static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
static int
-allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
+allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
{
enum ring_buffer_flags rb_flags;
@@ -8382,8 +8418,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
}
/* Allocate the first page for all buffers */
- set_buffer_entries(&tr->trace_buffer,
- ring_buffer_size(tr->trace_buffer.buffer, 0));
+ set_buffer_entries(&tr->array_buffer,
+ ring_buffer_size(tr->array_buffer.buffer, 0));
return 0;
}
@@ -8392,18 +8428,18 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
{
int ret;
- ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
+ ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
if (ret)
return ret;
#ifdef CONFIG_TRACER_MAX_TRACE
ret = allocate_trace_buffer(tr, &tr->max_buffer,
allocate_snapshot ? size : 1);
- if (WARN_ON(ret)) {
- ring_buffer_free(tr->trace_buffer.buffer);
- tr->trace_buffer.buffer = NULL;
- free_percpu(tr->trace_buffer.data);
- tr->trace_buffer.data = NULL;
+ if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
+ ring_buffer_free(tr->array_buffer.buffer);
+ tr->array_buffer.buffer = NULL;
+ free_percpu(tr->array_buffer.data);
+ tr->array_buffer.data = NULL;
return -ENOMEM;
}
tr->allocated_snapshot = allocate_snapshot;
@@ -8417,7 +8453,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
return 0;
}
-static void free_trace_buffer(struct trace_buffer *buf)
+static void free_trace_buffer(struct array_buffer *buf)
{
if (buf->buffer) {
ring_buffer_free(buf->buffer);
@@ -8432,7 +8468,7 @@ static void free_trace_buffers(struct trace_array *tr)
if (!tr)
return;
- free_trace_buffer(&tr->trace_buffer);
+ free_trace_buffer(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
free_trace_buffer(&tr->max_buffer);
@@ -8463,6 +8499,34 @@ static void update_tracer_options(struct trace_array *tr)
mutex_unlock(&trace_types_lock);
}
+/* Must have trace_types_lock held */
+struct trace_array *trace_array_find(const char *instance)
+{
+ struct trace_array *tr, *found = NULL;
+
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr->name && strcmp(tr->name, instance) == 0) {
+ found = tr;
+ break;
+ }
+ }
+
+ return found;
+}
+
+struct trace_array *trace_array_find_get(const char *instance)
+{
+ struct trace_array *tr;
+
+ mutex_lock(&trace_types_lock);
+ tr = trace_array_find(instance);
+ if (tr)
+ tr->ref++;
+ mutex_unlock(&trace_types_lock);
+
+ return tr;
+}
+
static struct trace_array *trace_array_create(const char *name)
{
struct trace_array *tr;
@@ -8539,10 +8603,8 @@ static int instance_mkdir(const char *name)
mutex_lock(&trace_types_lock);
ret = -EEXIST;
- list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- if (tr->name && strcmp(tr->name, name) == 0)
- goto out_unlock;
- }
+ if (trace_array_find(name))
+ goto out_unlock;
tr = trace_array_create(name);
@@ -8564,6 +8626,10 @@ out_unlock:
* NOTE: This function increments the reference counter associated with the
* trace array returned. This makes sure it cannot be freed while in use.
* Use trace_array_put() once the trace array is no longer needed.
+ * If the trace_array is to be freed, trace_array_destroy() needs to
+ * be called after the trace_array_put(), or simply let user space delete
+ * it from the tracefs instances directory. But until the
+ * trace_array_put() is called, user space can not delete it.
*
*/
struct trace_array *trace_array_get_by_name(const char *name)
@@ -8666,12 +8732,9 @@ static int instance_rmdir(const char *name)
mutex_lock(&trace_types_lock);
ret = -ENODEV;
- list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- if (tr->name && strcmp(tr->name, name) == 0) {
- ret = __remove_instance(tr);
- break;
- }
- }
+ tr = trace_array_find(name);
+ if (tr)
+ ret = __remove_instance(tr);
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
@@ -8684,7 +8747,7 @@ static __init void create_trace_instances(struct dentry *d_tracer)
trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
instance_mkdir,
instance_rmdir);
- if (WARN_ON(!trace_instance_dir))
+ if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
return;
}
@@ -8754,7 +8817,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
#endif
if (ftrace_create_function_files(tr, d_tracer))
- WARN(1, "Could not allocate function filter files");
+ MEM_FAIL(1, "Could not allocate function filter files");
#ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file("snapshot", 0644, d_tracer,
@@ -9036,13 +9099,13 @@ void trace_init_global_iter(struct trace_iterator *iter)
iter->tr = &global_trace;
iter->trace = iter->tr->current_trace;
iter->cpu_file = RING_BUFFER_ALL_CPUS;
- iter->trace_buffer = &global_trace.trace_buffer;
+ iter->array_buffer = &global_trace.array_buffer;
if (iter->trace && iter->trace->open)
iter->trace->open(iter);
/* Annotate start of buffers if we had overruns */
- if (ring_buffer_overruns(iter->trace_buffer->buffer))
+ if (ring_buffer_overruns(iter->array_buffer->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -9083,7 +9146,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) {
- atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -9151,7 +9214,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
tr->trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) {
- atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
printk_nmi_direct_exit();
@@ -9306,8 +9369,7 @@ __init static int tracer_alloc_buffers(void)
/* TODO: make the number of buffers hot pluggable with CPUS */
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
- printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
- WARN_ON(1);
+ MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
goto out_free_savedcmd;
}
@@ -9380,7 +9442,8 @@ void __init early_trace_init(void)
if (tracepoint_printk) {
tracepoint_print_iter =
kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
- if (WARN_ON(!tracepoint_print_iter))
+ if (MEM_FAIL(!tracepoint_print_iter,
+ "Failed to allocate trace iterator\n"))
tracepoint_printk = 0;
else
static_key_enable(&tracepoint_printk_key.key);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a98dce1b3334..99372dd7d168 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -93,6 +93,18 @@ enum trace_type {
#include "trace_entries.h"
+/* Use this for memory failure errors */
+#define MEM_FAIL(condition, fmt, ...) ({ \
+ static bool __section(.data.once) __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ pr_err("ERROR: " fmt, ##__VA_ARGS__); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
/*
* syscalls are special, and need special handling, this is why
* they are not included in trace_entries.h
@@ -175,9 +187,9 @@ struct trace_array_cpu {
struct tracer;
struct trace_option_dentry;
-struct trace_buffer {
+struct array_buffer {
struct trace_array *tr;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct trace_array_cpu __percpu *data;
u64 time_start;
int cpu;
@@ -248,7 +260,7 @@ struct cond_snapshot {
struct trace_array {
struct list_head list;
char *name;
- struct trace_buffer trace_buffer;
+ struct array_buffer array_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
/*
* The max_buffer is used to snapshot the trace when a maximum
@@ -256,12 +268,12 @@ struct trace_array {
* Some tracers will use this to store a maximum trace while
* it continues examining live traces.
*
- * The buffers for the max_buffer are set up the same as the trace_buffer
+ * The buffers for the max_buffer are set up the same as the array_buffer
* When a snapshot is taken, the buffer of the max_buffer is swapped
- * with the buffer of the trace_buffer and the buffers are reset for
- * the trace_buffer so the tracing can continue.
+ * with the buffer of the array_buffer and the buffers are reset for
+ * the array_buffer so the tracing can continue.
*/
- struct trace_buffer max_buffer;
+ struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
@@ -345,6 +357,8 @@ extern struct mutex trace_types_lock;
extern int trace_array_get(struct trace_array *tr);
extern int tracing_check_open_get_tr(struct trace_array *tr);
+extern struct trace_array *trace_array_find(const char *instance);
+extern struct trace_array *trace_array_find_get(const char *instance);
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -684,7 +698,7 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
-void tracing_reset_online_cpus(struct trace_buffer *buf);
+void tracing_reset_online_cpus(struct array_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -704,7 +718,7 @@ struct dentry *tracing_init_dentry(void);
struct ring_buffer_event;
struct ring_buffer_event *
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type,
unsigned long len,
unsigned long flags,
@@ -716,7 +730,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
-void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event);
int trace_empty(struct trace_iterator *iter);
@@ -872,7 +886,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern int
trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args);
-int trace_array_printk_buf(struct ring_buffer *buffer,
+int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
@@ -949,22 +963,31 @@ extern void __trace_graph_return(struct trace_array *tr,
unsigned long flags, int pc);
#ifdef CONFIG_DYNAMIC_FTRACE
-extern struct ftrace_hash *ftrace_graph_hash;
-extern struct ftrace_hash *ftrace_graph_notrace_hash;
+extern struct ftrace_hash __rcu *ftrace_graph_hash;
+extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{
unsigned long addr = trace->func;
int ret = 0;
+ struct ftrace_hash *hash;
preempt_disable_notrace();
- if (ftrace_hash_empty(ftrace_graph_hash)) {
+ /*
+ * Have to open code "rcu_dereference_sched()" because the
+ * function graph tracer can be called when RCU is not
+ * "watching".
+ * Protected with schedule_on_each_cpu(ftrace_sync)
+ */
+ hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
+
+ if (ftrace_hash_empty(hash)) {
ret = 1;
goto out;
}
- if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+ if (ftrace_lookup_ip(hash, addr)) {
/*
* This needs to be cleared on the return functions
@@ -1000,10 +1023,20 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
int ret = 0;
+ struct ftrace_hash *notrace_hash;
preempt_disable_notrace();
- if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
+ /*
+ * Have to open code "rcu_dereference_sched()" because the
+ * function graph tracer can be called when RCU is not
+ * "watching".
+ * Protected with schedule_on_each_cpu(ftrace_sync)
+ */
+ notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+ !preemptible());
+
+ if (ftrace_lookup_ip(notrace_hash, addr))
ret = 1;
preempt_enable_notrace();
@@ -1056,7 +1089,7 @@ struct ftrace_func_command {
extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct trace_array *tr)
{
- return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
+ return !this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
}
extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr,
@@ -1144,6 +1177,11 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd);
void ftrace_create_filter_files(struct ftrace_ops *ops,
struct dentry *parent);
void ftrace_destroy_filter_files(struct ftrace_ops *ops);
+
+extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
#else
struct ftrace_func_command;
@@ -1366,17 +1404,17 @@ struct trace_subsystem_dir {
};
extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event);
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
static inline void trace_buffer_unlock_commit(struct trace_array *tr,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
@@ -1389,7 +1427,7 @@ void trace_buffered_event_disable(void);
void trace_buffered_event_enable(void);
static inline void
-__trace_event_discard_commit(struct ring_buffer *buffer,
+__trace_event_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event)
{
if (this_cpu_read(trace_buffered_event) == event) {
@@ -1415,7 +1453,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer,
*/
static inline bool
__event_trigger_test_discard(struct trace_event_file *file,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event,
void *entry,
enum event_trigger_type *tt)
@@ -1450,7 +1488,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
*/
static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc)
{
@@ -1481,7 +1519,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
*/
static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file,
- struct ring_buffer *buffer,
+ struct trace_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc,
struct pt_regs *regs)
@@ -1892,6 +1930,15 @@ void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
+/* Used from boot time tracer */
+extern int trace_set_options(struct trace_array *tr, char *option);
+extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
+extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+ unsigned long size, int cpu_id);
+extern int tracing_set_cpumask(struct trace_array *tr,
+ cpumask_var_t tracing_cpumask_new);
+
+
#define MAX_EVENT_NAME_LEN 64
extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
@@ -1949,6 +1996,9 @@ static inline const char *get_syscall_name(int syscall)
#ifdef CONFIG_EVENT_TRACING
void trace_event_init(void);
void trace_event_eval_update(struct trace_eval_map **map, int len);
+/* Used from boot time tracer */
+extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
+extern int trigger_process_regex(struct trace_event_file *file, char *buff);
#else
static inline void __init trace_event_init(void) { }
static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
new file mode 100644
index 000000000000..06d7feb5255f
--- /dev/null
+++ b/kernel/trace/trace_boot.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace_boot.c
+ * Tracing kernel boot-time
+ */
+
+#define pr_fmt(fmt) "trace_boot: " fmt
+
+#include <linux/bootconfig.h>
+#include <linux/cpumask.h>
+#include <linux/ftrace.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/trace.h>
+#include <linux/trace_events.h>
+
+#include "trace.h"
+
+#define MAX_BUF_LEN 256
+
+static void __init
+trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
+{
+ struct xbc_node *anode;
+ const char *p;
+ char buf[MAX_BUF_LEN];
+ unsigned long v = 0;
+
+ /* Common ftrace options */
+ xbc_node_for_each_array_value(node, "options", anode, p) {
+ if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+ pr_err("String is too long: %s\n", p);
+ continue;
+ }
+
+ if (trace_set_options(tr, buf) < 0)
+ pr_err("Failed to set option: %s\n", buf);
+ }
+
+ p = xbc_node_find_value(node, "trace_clock", NULL);
+ if (p && *p != '\0') {
+ if (tracing_set_clock(tr, p) < 0)
+ pr_err("Failed to set trace clock: %s\n", p);
+ }
+
+ p = xbc_node_find_value(node, "buffer_size", NULL);
+ if (p && *p != '\0') {
+ v = memparse(p, NULL);
+ if (v < PAGE_SIZE)
+ pr_err("Buffer size is too small: %s\n", p);
+ if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
+ pr_err("Failed to resize trace buffer to %s\n", p);
+ }
+
+ p = xbc_node_find_value(node, "cpumask", NULL);
+ if (p && *p != '\0') {
+ cpumask_var_t new_mask;
+
+ if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ if (cpumask_parse(p, new_mask) < 0 ||
+ tracing_set_cpumask(tr, new_mask) < 0)
+ pr_err("Failed to set new CPU mask %s\n", p);
+ free_cpumask_var(new_mask);
+ }
+ }
+}
+
+#ifdef CONFIG_EVENT_TRACING
+static void __init
+trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
+{
+ struct xbc_node *anode;
+ char buf[MAX_BUF_LEN];
+ const char *p;
+
+ xbc_node_for_each_array_value(node, "events", anode, p) {
+ if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
+ pr_err("String is too long: %s\n", p);
+ continue;
+ }
+
+ if (ftrace_set_clr_event(tr, buf, 1) < 0)
+ pr_err("Failed to enable event: %s\n", p);
+ }
+}
+
+#ifdef CONFIG_KPROBE_EVENTS
+static int __init
+trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
+{
+ struct dynevent_cmd cmd;
+ struct xbc_node *anode;
+ char buf[MAX_BUF_LEN];
+ const char *val;
+ int ret;
+
+ kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+
+ ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
+ if (ret)
+ return ret;
+
+ xbc_node_for_each_array_value(node, "probes", anode, val) {
+ ret = kprobe_event_add_field(&cmd, val);
+ if (ret)
+ return ret;
+ }
+
+ ret = kprobe_event_gen_cmd_end(&cmd);
+ if (ret)
+ pr_err("Failed to add probe: %s\n", buf);
+
+ return ret;
+}
+#else
+static inline int __init
+trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
+{
+ pr_err("Kprobe event is not supported.\n");
+ return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_HIST_TRIGGERS
+static int __init
+trace_boot_add_synth_event(struct xbc_node *node, const char *event)
+{
+ struct dynevent_cmd cmd;
+ struct xbc_node *anode;
+ char buf[MAX_BUF_LEN];
+ const char *p;
+ int ret;
+
+ synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+
+ ret = synth_event_gen_cmd_start(&cmd, event, NULL);
+ if (ret)
+ return ret;
+
+ xbc_node_for_each_array_value(node, "fields", anode, p) {
+ ret = synth_event_add_field_str(&cmd, p);
+ if (ret)
+ return ret;
+ }
+
+ ret = synth_event_gen_cmd_end(&cmd);
+ if (ret < 0)
+ pr_err("Failed to add synthetic event: %s\n", buf);
+
+ return ret;
+}
+#else
+static inline int __init
+trace_boot_add_synth_event(struct xbc_node *node, const char *event)
+{
+ pr_err("Synthetic event is not supported.\n");
+ return -ENOTSUPP;
+}
+#endif
+
+static void __init
+trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
+ struct xbc_node *enode)
+{
+ struct trace_event_file *file;
+ struct xbc_node *anode;
+ char buf[MAX_BUF_LEN];
+ const char *p, *group, *event;
+
+ group = xbc_node_get_data(gnode);
+ event = xbc_node_get_data(enode);
+
+ if (!strcmp(group, "kprobes"))
+ if (trace_boot_add_kprobe_event(enode, event) < 0)
+ return;
+ if (!strcmp(group, "synthetic"))
+ if (trace_boot_add_synth_event(enode, event) < 0)
+ return;
+
+ mutex_lock(&event_mutex);
+ file = find_event_file(tr, group, event);
+ if (!file) {
+ pr_err("Failed to find event: %s:%s\n", group, event);
+ goto out;
+ }
+
+ p = xbc_node_find_value(enode, "filter", NULL);
+ if (p && *p != '\0') {
+ if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+ pr_err("filter string is too long: %s\n", p);
+ else if (apply_event_filter(file, buf) < 0)
+ pr_err("Failed to apply filter: %s\n", buf);
+ }
+
+ xbc_node_for_each_array_value(enode, "actions", anode, p) {
+ if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+ pr_err("action string is too long: %s\n", p);
+ else if (trigger_process_regex(file, buf) < 0)
+ pr_err("Failed to apply an action: %s\n", buf);
+ }
+
+ if (xbc_node_find_value(enode, "enable", NULL)) {
+ if (trace_event_enable_disable(file, 1, 0) < 0)
+ pr_err("Failed to enable event node: %s:%s\n",
+ group, event);
+ }
+out:
+ mutex_unlock(&event_mutex);
+}
+
+static void __init
+trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
+{
+ struct xbc_node *gnode, *enode;
+
+ node = xbc_node_find_child(node, "event");
+ if (!node)
+ return;
+ /* per-event key starts with "event.GROUP.EVENT" */
+ xbc_node_for_each_child(node, gnode)
+ xbc_node_for_each_child(gnode, enode)
+ trace_boot_init_one_event(tr, gnode, enode);
+}
+#else
+#define trace_boot_enable_events(tr, node) do {} while (0)
+#define trace_boot_init_events(tr, node) do {} while (0)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void __init
+trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
+{
+ struct xbc_node *anode;
+ const char *p;
+ char *q;
+
+ xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
+ q = kstrdup(p, GFP_KERNEL);
+ if (!q)
+ return;
+ if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
+ pr_err("Failed to add %s to ftrace filter\n", p);
+ else
+ ftrace_filter_param = true;
+ kfree(q);
+ }
+ xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
+ q = kstrdup(p, GFP_KERNEL);
+ if (!q)
+ return;
+ if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
+ pr_err("Failed to add %s to ftrace filter\n", p);
+ else
+ ftrace_filter_param = true;
+ kfree(q);
+ }
+}
+#else
+#define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
+#endif
+
+static void __init
+trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
+{
+ const char *p;
+
+ trace_boot_set_ftrace_filter(tr, node);
+
+ p = xbc_node_find_value(node, "tracer", NULL);
+ if (p && *p != '\0') {
+ if (tracing_set_tracer(tr, p) < 0)
+ pr_err("Failed to set given tracer: %s\n", p);
+ }
+}
+
+static void __init
+trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
+{
+ trace_boot_set_instance_options(tr, node);
+ trace_boot_init_events(tr, node);
+ trace_boot_enable_events(tr, node);
+ trace_boot_enable_tracer(tr, node);
+}
+
+static void __init
+trace_boot_init_instances(struct xbc_node *node)
+{
+ struct xbc_node *inode;
+ struct trace_array *tr;
+ const char *p;
+
+ node = xbc_node_find_child(node, "instance");
+ if (!node)
+ return;
+
+ xbc_node_for_each_child(node, inode) {
+ p = xbc_node_get_data(inode);
+ if (!p || *p == '\0')
+ continue;
+
+ tr = trace_array_get_by_name(p);
+ if (!tr) {
+ pr_err("Failed to get trace instance %s\n", p);
+ continue;
+ }
+ trace_boot_init_one_instance(tr, inode);
+ trace_array_put(tr);
+ }
+}
+
+static int __init trace_boot_init(void)
+{
+ struct xbc_node *trace_node;
+ struct trace_array *tr;
+
+ trace_node = xbc_find_node("ftrace");
+ if (!trace_node)
+ return 0;
+
+ tr = top_trace_array();
+ if (!tr)
+ return 0;
+
+ /* Global trace array is also one instance */
+ trace_boot_init_one_instance(tr, trace_node);
+ trace_boot_init_instances(trace_node);
+
+ return 0;
+}
+
+fs_initcall(trace_boot_init);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 88e158d27965..eff099123aa2 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{
struct trace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer;
+ struct trace_buffer *buffer;
struct trace_array_cpu *data;
struct ring_buffer_event *event;
struct trace_branch *entry;
- struct ring_buffer *buffer;
unsigned long flags;
int pc;
const char *p;
@@ -55,12 +55,12 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
raw_local_irq_save(flags);
current->trace_recursion |= TRACE_BRANCH_BIT;
- data = this_cpu_ptr(tr->trace_buffer.data);
+ data = this_cpu_ptr(tr->array_buffer.data);
if (atomic_read(&data->disabled))
goto out;
pc = preempt_count();
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index 89779eb84a07..9f2e8520b748 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -223,3 +223,215 @@ static __init int init_dynamic_event(void)
return 0;
}
fs_initcall(init_dynamic_event);
+
+/**
+ * dynevent_arg_add - Add an arg to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @arg: The argument to append to the current cmd
+ * @check_arg: An (optional) pointer to a function checking arg sanity
+ *
+ * Append an argument to a dynevent_cmd. The argument string will be
+ * appended to the current cmd string, followed by a separator, if
+ * applicable. Before the argument is added, the @check_arg function,
+ * if present, will be used to check the sanity of the current arg
+ * string.
+ *
+ * The cmd string and separator should be set using the
+ * dynevent_arg_init() before any arguments are added using this
+ * function.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_arg_add(struct dynevent_cmd *cmd,
+ struct dynevent_arg *arg,
+ dynevent_check_arg_fn_t check_arg)
+{
+ int ret = 0;
+
+ if (check_arg) {
+ ret = check_arg(arg);
+ if (ret)
+ return ret;
+ }
+
+ ret = seq_buf_printf(&cmd->seq, " %s%c", arg->str, arg->separator);
+ if (ret) {
+ pr_err("String is too long: %s%c\n", arg->str, arg->separator);
+ return -E2BIG;
+ }
+
+ return ret;
+}
+
+/**
+ * dynevent_arg_pair_add - Add an arg pair to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @arg_pair: The argument pair to append to the current cmd
+ * @check_arg: An (optional) pointer to a function checking arg sanity
+ *
+ * Append an argument pair to a dynevent_cmd. An argument pair
+ * consists of a left-hand-side argument and a right-hand-side
+ * argument separated by an operator, which can be whitespace, all
+ * followed by a separator, if applicable. This can be used to add
+ * arguments of the form 'type variable_name;' or 'x+y'.
+ *
+ * The lhs argument string will be appended to the current cmd string,
+ * followed by an operator, if applicable, followd by the rhs string,
+ * followed finally by a separator, if applicable. Before the
+ * argument is added, the @check_arg function, if present, will be
+ * used to check the sanity of the current arg strings.
+ *
+ * The cmd strings, operator, and separator should be set using the
+ * dynevent_arg_pair_init() before any arguments are added using this
+ * function.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
+ struct dynevent_arg_pair *arg_pair,
+ dynevent_check_arg_fn_t check_arg)
+{
+ int ret = 0;
+
+ if (check_arg) {
+ ret = check_arg(arg_pair);
+ if (ret)
+ return ret;
+ }
+
+ ret = seq_buf_printf(&cmd->seq, " %s%c%s%c", arg_pair->lhs,
+ arg_pair->operator, arg_pair->rhs,
+ arg_pair->separator);
+ if (ret) {
+ pr_err("field string is too long: %s%c%s%c\n", arg_pair->lhs,
+ arg_pair->operator, arg_pair->rhs,
+ arg_pair->separator);
+ return -E2BIG;
+ }
+
+ return ret;
+}
+
+/**
+ * dynevent_str_add - Add a string to a dynevent_cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event cmd
+ * @str: The string to append to the current cmd
+ *
+ * Append a string to a dynevent_cmd. The string will be appended to
+ * the current cmd string as-is, with nothing prepended or appended.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int dynevent_str_add(struct dynevent_cmd *cmd, const char *str)
+{
+ int ret = 0;
+
+ ret = seq_buf_puts(&cmd->seq, str);
+ if (ret) {
+ pr_err("String is too long: %s\n", str);
+ return -E2BIG;
+ }
+
+ return ret;
+}
+
+/**
+ * dynevent_cmd_init - Initialize a dynevent_cmd object
+ * @cmd: A pointer to the dynevent_cmd struct representing the cmd
+ * @buf: A pointer to the buffer to generate the command into
+ * @maxlen: The length of the buffer the command will be generated into
+ * @type: The type of the cmd, checked against further operations
+ * @run_command: The type-specific function that will actually run the command
+ *
+ * Initialize a dynevent_cmd. A dynevent_cmd is used to build up and
+ * run dynamic event creation commands, such as commands for creating
+ * synthetic and kprobe events. Before calling any of the functions
+ * used to build the command, a dynevent_cmd object should be
+ * instantiated and initialized using this function.
+ *
+ * The initialization sets things up by saving a pointer to the
+ * user-supplied buffer and its length via the @buf and @maxlen
+ * params, and by saving the cmd-specific @type and @run_command
+ * params which are used to check subsequent dynevent_cmd operations
+ * and actually run the command when complete.
+ */
+void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
+ enum dynevent_type type,
+ dynevent_create_fn_t run_command)
+{
+ memset(cmd, '\0', sizeof(*cmd));
+
+ seq_buf_init(&cmd->seq, buf, maxlen);
+ cmd->type = type;
+ cmd->run_command = run_command;
+}
+
+/**
+ * dynevent_arg_init - Initialize a dynevent_arg object
+ * @arg: A pointer to the dynevent_arg struct representing the arg
+ * @separator: An (optional) separator, appended after adding the arg
+ *
+ * Initialize a dynevent_arg object. A dynevent_arg represents an
+ * object used to append single arguments to the current command
+ * string. After the arg string is successfully appended to the
+ * command string, the optional @separator is appended. If no
+ * separator was specified when initializing the arg, a space will be
+ * appended.
+ */
+void dynevent_arg_init(struct dynevent_arg *arg,
+ char separator)
+{
+ memset(arg, '\0', sizeof(*arg));
+
+ if (!separator)
+ separator = ' ';
+ arg->separator = separator;
+}
+
+/**
+ * dynevent_arg_pair_init - Initialize a dynevent_arg_pair object
+ * @arg_pair: A pointer to the dynevent_arg_pair struct representing the arg
+ * @operator: An (optional) operator, appended after adding the first arg
+ * @separator: An (optional) separator, appended after adding the second arg
+ *
+ * Initialize a dynevent_arg_pair object. A dynevent_arg_pair
+ * represents an object used to append argument pairs such as 'type
+ * variable_name;' or 'x+y' to the current command string. An
+ * argument pair consists of a left-hand-side argument and a
+ * right-hand-side argument separated by an operator, which can be
+ * whitespace, all followed by a separator, if applicable. After the
+ * first arg string is successfully appended to the command string,
+ * the optional @operator is appended, followed by the second arg and
+ * and optional @separator. If no separator was specified when
+ * initializing the arg, a space will be appended.
+ */
+void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
+ char operator, char separator)
+{
+ memset(arg_pair, '\0', sizeof(*arg_pair));
+
+ if (!operator)
+ operator = ' ';
+ arg_pair->operator = operator;
+
+ if (!separator)
+ separator = ' ';
+ arg_pair->separator = separator;
+}
+
+/**
+ * dynevent_create - Create the dynamic event contained in dynevent_cmd
+ * @cmd: The dynevent_cmd object containing the dynamic event creation command
+ *
+ * Once a dynevent_cmd object has been successfully built up via the
+ * dynevent_cmd_init(), dynevent_arg_add() and dynevent_arg_pair_add()
+ * functions, this function runs the final command to actually create
+ * the event.
+ *
+ * Return: 0 if the event was successfully created, error otherwise.
+ */
+int dynevent_create(struct dynevent_cmd *cmd)
+{
+ return cmd->run_command(cmd);
+}
+EXPORT_SYMBOL_GPL(dynevent_create);
diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h
index 46898138d2df..d6857a254ede 100644
--- a/kernel/trace/trace_dynevent.h
+++ b/kernel/trace/trace_dynevent.h
@@ -117,4 +117,36 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type);
#define for_each_dyn_event_safe(pos, n) \
list_for_each_entry_safe(pos, n, &dyn_event_list, list)
+extern void dynevent_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen,
+ enum dynevent_type type,
+ dynevent_create_fn_t run_command);
+
+typedef int (*dynevent_check_arg_fn_t)(void *data);
+
+struct dynevent_arg {
+ const char *str;
+ char separator; /* e.g. ';', ',', or nothing */
+};
+
+extern void dynevent_arg_init(struct dynevent_arg *arg,
+ char separator);
+extern int dynevent_arg_add(struct dynevent_cmd *cmd,
+ struct dynevent_arg *arg,
+ dynevent_check_arg_fn_t check_arg);
+
+struct dynevent_arg_pair {
+ const char *lhs;
+ const char *rhs;
+ char operator; /* e.g. '=' or nothing */
+ char separator; /* e.g. ';', ',', or nothing */
+};
+
+extern void dynevent_arg_pair_init(struct dynevent_arg_pair *arg_pair,
+ char operator, char separator);
+
+extern int dynevent_arg_pair_add(struct dynevent_cmd *cmd,
+ struct dynevent_arg_pair *arg_pair,
+ dynevent_check_arg_fn_t check_arg);
+extern int dynevent_str_add(struct dynevent_cmd *cmd, const char *str);
+
#endif
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 3e9d81608284..f22746f3c132 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -164,7 +164,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
F_STRUCT(
__field( int, size )
- __dynamic_array(unsigned long, caller )
+ __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
),
F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index fcd3975a88d1..f38234ecea18 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -238,7 +238,7 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
if (!pid_list)
return false;
- data = this_cpu_ptr(tr->trace_buffer.data);
+ data = this_cpu_ptr(tr->array_buffer.data);
return data->ignore_pid;
}
@@ -273,6 +273,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
if (!fbuffer->event)
return NULL;
+ fbuffer->regs = NULL;
fbuffer->entry = ring_buffer_event_data(fbuffer->event);
return fbuffer->entry;
}
@@ -547,7 +548,7 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids);
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, prev) &&
trace_ignore_this_task(pid_list, next));
}
@@ -561,7 +562,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids);
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, next));
}
@@ -572,12 +573,12 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
struct trace_pid_list *pid_list;
/* Nothing to do if we are already tracing */
- if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+ if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, task));
}
@@ -588,13 +589,13 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
struct trace_pid_list *pid_list;
/* Nothing to do if we are not tracing */
- if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+ if (this_cpu_read(tr->array_buffer.data->ignore_pid))
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
/* Set tracing if current is enabled */
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
@@ -626,7 +627,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
}
for_each_possible_cpu(cpu)
- per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
rcu_assign_pointer(tr->filtered_pids, NULL);
@@ -1595,7 +1596,7 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->filtered_pids,
mutex_is_locked(&event_mutex));
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
trace_ignore_this_task(pid_list, current));
}
@@ -2553,6 +2554,91 @@ find_event_file(struct trace_array *tr, const char *system, const char *event)
return file;
}
+/**
+ * trace_get_event_file - Find and return a trace event file
+ * @instance: The name of the trace instance containing the event
+ * @system: The name of the system containing the event
+ * @event: The name of the event
+ *
+ * Return a trace event file given the trace instance name, trace
+ * system, and trace event name. If the instance name is NULL, it
+ * refers to the top-level trace array.
+ *
+ * This function will look it up and return it if found, after calling
+ * trace_array_get() to prevent the instance from going away, and
+ * increment the event's module refcount to prevent it from being
+ * removed.
+ *
+ * To release the file, call trace_put_event_file(), which will call
+ * trace_array_put() and decrement the event's module refcount.
+ *
+ * Return: The trace event on success, ERR_PTR otherwise.
+ */
+struct trace_event_file *trace_get_event_file(const char *instance,
+ const char *system,
+ const char *event)
+{
+ struct trace_array *tr = top_trace_array();
+ struct trace_event_file *file = NULL;
+ int ret = -EINVAL;
+
+ if (instance) {
+ tr = trace_array_find_get(instance);
+ if (!tr)
+ return ERR_PTR(-ENOENT);
+ } else {
+ ret = trace_array_get(tr);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&event_mutex);
+
+ file = find_event_file(tr, system, event);
+ if (!file) {
+ trace_array_put(tr);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Don't let event modules unload while in use */
+ ret = try_module_get(file->event_call->mod);
+ if (!ret) {
+ trace_array_put(tr);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = 0;
+ out:
+ mutex_unlock(&event_mutex);
+
+ if (ret)
+ file = ERR_PTR(ret);
+
+ return file;
+}
+EXPORT_SYMBOL_GPL(trace_get_event_file);
+
+/**
+ * trace_put_event_file - Release a file from trace_get_event_file()
+ * @file: The trace event file
+ *
+ * If a file was retrieved using trace_get_event_file(), this should
+ * be called when it's no longer needed. It will cancel the previous
+ * trace_array_get() called by that function, and decrement the
+ * event's module refcount.
+ */
+void trace_put_event_file(struct trace_event_file *file)
+{
+ mutex_lock(&event_mutex);
+ module_put(file->event_call->mod);
+ mutex_unlock(&event_mutex);
+
+ trace_array_put(file->tr);
+}
+EXPORT_SYMBOL_GPL(trace_put_event_file);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* Avoid typos */
@@ -3409,8 +3495,8 @@ static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
+ struct trace_buffer *buffer;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
struct ftrace_entry *entry;
unsigned long flags;
long disabled;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index f2896d13001b..e7ce7cdac62f 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -66,7 +66,12 @@
C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
C(VAR_NOT_FOUND, "Couldn't find variable"), \
- C(FIELD_NOT_FOUND, "Couldn't find field"),
+ C(FIELD_NOT_FOUND, "Couldn't find field"), \
+ C(EMPTY_ASSIGNMENT, "Empty assignment"), \
+ C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
+ C(EMPTY_SORT_FIELD, "Empty sort field"), \
+ C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
+ C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
#undef C
#define C(a, b) HIST_ERR_##a
@@ -375,7 +380,7 @@ struct hist_trigger_data {
unsigned int n_save_var_str;
};
-static int synth_event_create(int argc, const char **argv);
+static int create_synth_event(int argc, const char **argv);
static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
static int synth_event_release(struct dyn_event *ev);
static bool synth_event_is_busy(struct dyn_event *ev);
@@ -383,7 +388,7 @@ static bool synth_event_match(const char *system, const char *event,
int argc, const char **argv, struct dyn_event *ev);
static struct dyn_event_operations synth_event_ops = {
- .create = synth_event_create,
+ .create = create_synth_event,
.show = synth_event_show,
.is_busy = synth_event_is_busy,
.free = synth_event_release,
@@ -394,6 +399,7 @@ struct synth_field {
char *type;
char *name;
size_t size;
+ unsigned int offset;
bool is_signed;
bool is_string;
};
@@ -408,6 +414,7 @@ struct synth_event {
struct trace_event_class class;
struct trace_event_call call;
struct tracepoint *tp;
+ struct module *mod;
};
static bool is_synth_event(struct dyn_event *ev)
@@ -470,11 +477,12 @@ struct action_data {
* When a histogram trigger is hit, the values of any
* references to variables, including variables being passed
* as parameters to synthetic events, are collected into a
- * var_ref_vals array. This var_ref_idx is the index of the
- * first param in the array to be passed to the synthetic
- * event invocation.
+ * var_ref_vals array. This var_ref_idx array is an array of
+ * indices into the var_ref_vals array, one for each synthetic
+ * event param, and is passed to the synthetic event
+ * invocation.
*/
- unsigned int var_ref_idx;
+ unsigned int var_ref_idx[TRACING_MAP_VARS_MAX];
struct synth_event *synth_event;
bool use_trace_keyword;
char *synth_event_name;
@@ -608,7 +616,8 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
if (!str)
return;
- strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
+ strcpy(last_cmd, "hist:");
+ strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:"));
if (file) {
call = file->event_call;
@@ -662,6 +671,8 @@ static int synth_event_define_fields(struct trace_event_call *call)
if (ret)
break;
+ event->fields[i]->offset = n_u64;
+
if (event->fields[i]->is_string) {
offset += STR_VAR_LEN_MAX;
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
@@ -834,7 +845,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
fmt = synth_field_fmt(se->fields[i]->type);
/* parameter types */
- if (tr->trace_flags & TRACE_ITER_VERBOSE)
+ if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
trace_seq_printf(s, "%s ", fmt);
snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
@@ -875,14 +886,14 @@ static struct trace_event_functions synth_event_funcs = {
static notrace void trace_event_raw_event_synth(void *__data,
u64 *var_ref_vals,
- unsigned int var_ref_idx)
+ unsigned int *var_ref_idx)
{
struct trace_event_file *trace_file = __data;
struct synth_trace_event *entry;
struct trace_event_buffer fbuffer;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct synth_event *event;
- unsigned int i, n_u64;
+ unsigned int i, n_u64, val_idx;
int fields_size = 0;
event = trace_file->event_call->data;
@@ -896,7 +907,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
* Avoid ring buffer recursion detection, as this event
* is being performed within another event.
*/
- buffer = trace_file->tr->trace_buffer.buffer;
+ buffer = trace_file->tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
entry = trace_event_buffer_reserve(&fbuffer, trace_file,
@@ -905,15 +916,16 @@ static notrace void trace_event_raw_event_synth(void *__data,
goto out;
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ val_idx = var_ref_idx[i];
if (event->fields[i]->is_string) {
- char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
+ char *str_val = (char *)(long)var_ref_vals[val_idx];
char *str_field = (char *)&entry->fields[n_u64];
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
struct synth_field *field = event->fields[i];
- u64 val = var_ref_vals[var_ref_idx + i];
+ u64 val = var_ref_vals[val_idx];
switch (field->size) {
case 1:
@@ -1113,10 +1125,10 @@ static struct tracepoint *alloc_synth_tracepoint(char *name)
}
typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
- unsigned int var_ref_idx);
+ unsigned int *var_ref_idx);
static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
- unsigned int var_ref_idx)
+ unsigned int *var_ref_idx)
{
struct tracepoint *tp = event->tp;
@@ -1293,6 +1305,273 @@ struct hist_var_data {
struct hist_trigger_data *hist_data;
};
+static int synth_event_check_arg_fn(void *data)
+{
+ struct dynevent_arg_pair *arg_pair = data;
+ int size;
+
+ size = synth_field_size((char *)arg_pair->lhs);
+
+ return size ? 0 : -EINVAL;
+}
+
+/**
+ * synth_event_add_field - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type: The type of the new field to add
+ * @name: The name of the new field to add
+ *
+ * Add a new field to a synthetic event cmd object. Field ordering is in
+ * the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
+ const char *name)
+{
+ struct dynevent_arg_pair arg_pair;
+ int ret;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ if (!type || !name)
+ return -EINVAL;
+
+ dynevent_arg_pair_init(&arg_pair, 0, ';');
+
+ arg_pair.lhs = type;
+ arg_pair.rhs = name;
+
+ ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
+ if (ret)
+ return ret;
+
+ if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field);
+
+/**
+ * synth_event_add_field_str - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type_name: The type and name of the new field to add, as a single string
+ *
+ * Add a new field to a synthetic event cmd object, as a single
+ * string. The @type_name string is expected to be of the form 'type
+ * name', which will be appended by ';'. No sanity checking is done -
+ * what's passed in is assumed to already be well-formed. Field
+ * ordering is in the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
+{
+ struct dynevent_arg arg;
+ int ret;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ if (!type_name)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, ';');
+
+ arg.str = type_name;
+
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field_str);
+
+/**
+ * synth_event_add_fields - Add multiple fields to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Add a new set of fields to a synthetic event cmd object. The event
+ * fields that will be defined for the event should be passed in as an
+ * array of struct synth_field_desc, and the number of elements in the
+ * array passed in as n_fields. Field ordering will retain the
+ * ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_fields(struct dynevent_cmd *cmd,
+ struct synth_field_desc *fields,
+ unsigned int n_fields)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < n_fields; i++) {
+ if (fields[i].type == NULL || fields[i].name == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_fields);
+
+/**
+ * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @mod: The module creating the event, NULL if not created from a module
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the synth_event_gen_cmd_start() wrapper, which
+ * automatically adds a NULL to the end of the arg list. If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end(). This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * There should be an even number variable args, each pair consisting
+ * of a type followed by a field name.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
+ struct module *mod, ...)
+{
+ struct dynevent_arg arg;
+ va_list args;
+ int ret;
+
+ cmd->event_name = name;
+ cmd->private_data = mod;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, 0);
+ arg.str = name;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ va_start(args, mod);
+ for (;;) {
+ const char *type, *name;
+
+ type = va_arg(args, const char *);
+ if (!type)
+ break;
+ name = va_arg(args, const char *);
+ if (!name)
+ break;
+
+ if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = synth_event_add_field(cmd, type, name);
+ if (ret)
+ break;
+ }
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
+
+/**
+ * synth_event_gen_cmd_array_start - Start synthetic event command from an array
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end(). This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * The event fields that will be defined for the event should be
+ * passed in as an array of struct synth_field_desc, and the number of
+ * elements in the array passed in as n_fields. Field ordering will
+ * retain the ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
+ struct module *mod,
+ struct synth_field_desc *fields,
+ unsigned int n_fields)
+{
+ struct dynevent_arg arg;
+ unsigned int i;
+ int ret = 0;
+
+ cmd->event_name = name;
+ cmd->private_data = mod;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ if (n_fields > SYNTH_FIELDS_MAX)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, 0);
+ arg.str = name;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < n_fields; i++) {
+ if (fields[i].type == NULL || fields[i].name == NULL)
+ return -EINVAL;
+
+ ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
+
static int __create_synth_event(int argc, const char *name, const char **argv)
{
struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
@@ -1361,29 +1640,123 @@ static int __create_synth_event(int argc, const char *name, const char **argv)
goto out;
}
+/**
+ * synth_event_create - Create a new synthetic event
+ * @name: The name of the new sythetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ * @mod: The module creating the event, NULL if not created from a module
+ *
+ * Create a new synthetic event with the given name under the
+ * trace/events/synthetic/ directory. The event fields that will be
+ * defined for the event should be passed in as an array of struct
+ * synth_field_desc, and the number elements in the array passed in as
+ * n_fields. Field ordering will retain the ordering given in the
+ * fields array.
+ *
+ * If the new synthetic event is being created from a module, the mod
+ * param must be non-NULL. This will ensure that the trace buffer
+ * won't contain unreadable events.
+ *
+ * The new synth event should be deleted using synth_event_delete()
+ * function. The new synthetic event can be generated from modules or
+ * other kernel code using trace_synth_event() and related functions.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_create(const char *name, struct synth_field_desc *fields,
+ unsigned int n_fields, struct module *mod)
+{
+ struct dynevent_cmd cmd;
+ char *buf;
+ int ret;
+
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
+ fields, n_fields);
+ if (ret)
+ goto out;
+
+ ret = synth_event_gen_cmd_end(&cmd);
+ out:
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_create);
+
+static int destroy_synth_event(struct synth_event *se)
+{
+ int ret;
+
+ if (se->ref)
+ ret = -EBUSY;
+ else {
+ ret = unregister_synth_event(se);
+ if (!ret) {
+ dyn_event_remove(&se->devent);
+ free_synth_event(se);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * synth_event_delete - Delete a synthetic event
+ * @event_name: The name of the new sythetic event
+ *
+ * Delete a synthetic event that was created with synth_event_create().
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_delete(const char *event_name)
+{
+ struct synth_event *se = NULL;
+ struct module *mod = NULL;
+ int ret = -ENOENT;
+
+ mutex_lock(&event_mutex);
+ se = find_synth_event(event_name);
+ if (se) {
+ mod = se->mod;
+ ret = destroy_synth_event(se);
+ }
+ mutex_unlock(&event_mutex);
+
+ if (mod) {
+ mutex_lock(&trace_types_lock);
+ /*
+ * It is safest to reset the ring buffer if the module
+ * being unloaded registered any events that were
+ * used. The only worry is if a new module gets
+ * loaded, and takes on the same id as the events of
+ * this module. When printing out the buffer, traced
+ * events left over from this module may be passed to
+ * the new module events and unexpected results may
+ * occur.
+ */
+ tracing_reset_all_online_cpus();
+ mutex_unlock(&trace_types_lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_delete);
+
static int create_or_delete_synth_event(int argc, char **argv)
{
const char *name = argv[0];
- struct synth_event *event = NULL;
int ret;
/* trace_run_command() ensures argc != 0 */
if (name[0] == '!') {
- mutex_lock(&event_mutex);
- event = find_synth_event(name + 1);
- if (event) {
- if (event->ref)
- ret = -EBUSY;
- else {
- ret = unregister_synth_event(event);
- if (!ret) {
- dyn_event_remove(&event->devent);
- free_synth_event(event);
- }
- }
- } else
- ret = -ENOENT;
- mutex_unlock(&event_mutex);
+ ret = synth_event_delete(name + 1);
return ret;
}
@@ -1391,7 +1764,474 @@ static int create_or_delete_synth_event(int argc, char **argv)
return ret == -ECANCELED ? -EINVAL : ret;
}
-static int synth_event_create(int argc, const char **argv)
+static int synth_event_run_command(struct dynevent_cmd *cmd)
+{
+ struct synth_event *se;
+ int ret;
+
+ ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
+ if (ret)
+ return ret;
+
+ se = find_synth_event(cmd->event_name);
+ if (WARN_ON(!se))
+ return -ENOENT;
+
+ se->mod = cmd->private_data;
+
+ return ret;
+}
+
+/**
+ * synth_event_cmd_init - Initialize a synthetic event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object. Use this before
+ * calling any of the other dyenvent_cmd functions.
+ */
+void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+ dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
+ synth_event_run_command);
+}
+EXPORT_SYMBOL_GPL(synth_event_cmd_init);
+
+/**
+ * synth_event_trace - Trace a synthetic event
+ * @file: The trace_event_file representing the synthetic event
+ * @n_vals: The number of values in vals
+ * @args: Variable number of args containing the event values
+ *
+ * Trace a synthetic event using the values passed in the variable
+ * argument list.
+ *
+ * The argument list should be a list 'n_vals' u64 values. The number
+ * of vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64. Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
+{
+ struct trace_event_buffer fbuffer;
+ struct synth_trace_event *entry;
+ struct trace_buffer *buffer;
+ struct synth_event *event;
+ unsigned int i, n_u64;
+ int fields_size = 0;
+ va_list args;
+ int ret = 0;
+
+ /*
+ * Normal event generation doesn't get called at all unless
+ * the ENABLED bit is set (which attaches the probe thus
+ * allowing this code to be called, etc). Because this is
+ * called directly by the user, we don't have that but we
+ * still need to honor not logging when disabled.
+ */
+ if (!(file->flags & EVENT_FILE_FL_ENABLED))
+ return 0;
+
+ event = file->event_call->data;
+
+ if (n_vals != event->n_fields)
+ return -EINVAL;
+
+ if (trace_trigger_soft_disabled(file))
+ return -EINVAL;
+
+ fields_size = event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ buffer = file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
+
+ entry = trace_event_buffer_reserve(&fbuffer, file,
+ sizeof(*entry) + fields_size);
+ if (!entry) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ va_start(args, n_vals);
+ for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ u64 val;
+
+ val = va_arg(args, u64);
+
+ if (event->fields[i]->is_string) {
+ char *str_val = (char *)(long)val;
+ char *str_field = (char *)&entry->fields[n_u64];
+
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ entry->fields[n_u64] = val;
+ n_u64++;
+ }
+ }
+ va_end(args);
+
+ trace_event_buffer_commit(&fbuffer);
+out:
+ ring_buffer_nest_end(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace);
+
+/**
+ * synth_event_trace_array - Trace a synthetic event from an array
+ * @file: The trace_event_file representing the synthetic event
+ * @vals: Array of values
+ * @n_vals: The number of values in vals
+ *
+ * Trace a synthetic event using the values passed in as 'vals'.
+ *
+ * The 'vals' array is just an array of 'n_vals' u64. The number of
+ * vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64. Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+ unsigned int n_vals)
+{
+ struct trace_event_buffer fbuffer;
+ struct synth_trace_event *entry;
+ struct trace_buffer *buffer;
+ struct synth_event *event;
+ unsigned int i, n_u64;
+ int fields_size = 0;
+ int ret = 0;
+
+ /*
+ * Normal event generation doesn't get called at all unless
+ * the ENABLED bit is set (which attaches the probe thus
+ * allowing this code to be called, etc). Because this is
+ * called directly by the user, we don't have that but we
+ * still need to honor not logging when disabled.
+ */
+ if (!(file->flags & EVENT_FILE_FL_ENABLED))
+ return 0;
+
+ event = file->event_call->data;
+
+ if (n_vals != event->n_fields)
+ return -EINVAL;
+
+ if (trace_trigger_soft_disabled(file))
+ return -EINVAL;
+
+ fields_size = event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ buffer = file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
+
+ entry = trace_event_buffer_reserve(&fbuffer, file,
+ sizeof(*entry) + fields_size);
+ if (!entry) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ if (event->fields[i]->is_string) {
+ char *str_val = (char *)(long)vals[i];
+ char *str_field = (char *)&entry->fields[n_u64];
+
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ entry->fields[n_u64] = vals[i];
+ n_u64++;
+ }
+ }
+
+ trace_event_buffer_commit(&fbuffer);
+out:
+ ring_buffer_nest_end(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_array);
+
+/**
+ * synth_event_trace_start - Start piecewise synthetic event trace
+ * @file: The trace_event_file representing the synthetic event
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Start the trace of a synthetic event field-by-field rather than all
+ * at once.
+ *
+ * This function 'opens' an event trace, which means space is reserved
+ * for the event in the trace buffer, after which the event's
+ * individual field values can be set through either
+ * synth_event_add_next_val() or synth_event_add_val().
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state until the event trace is
+ * closed (and the event finally traced) using
+ * synth_event_trace_end().
+ *
+ * Note that synth_event_trace_end() must be called after all values
+ * have been added for each event trace, regardless of whether adding
+ * all field values succeeded or not.
+ *
+ * Note also that for a given event trace, all fields must be added
+ * using either synth_event_add_next_val() or synth_event_add_val()
+ * but not both together or interleaved.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state)
+{
+ struct synth_trace_event *entry;
+ int fields_size = 0;
+ int ret = 0;
+
+ if (!trace_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memset(trace_state, '\0', sizeof(*trace_state));
+
+ /*
+ * Normal event tracing doesn't get called at all unless the
+ * ENABLED bit is set (which attaches the probe thus allowing
+ * this code to be called, etc). Because this is called
+ * directly by the user, we don't have that but we still need
+ * to honor not logging when disabled. For the the iterated
+ * trace case, we save the enabed state upon start and just
+ * ignore the following data calls.
+ */
+ if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
+ trace_state->enabled = false;
+ goto out;
+ }
+
+ trace_state->enabled = true;
+
+ trace_state->event = file->event_call->data;
+
+ if (trace_trigger_soft_disabled(file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fields_size = trace_state->event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ trace_state->buffer = file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(trace_state->buffer);
+
+ entry = trace_event_buffer_reserve(&trace_state->fbuffer, file,
+ sizeof(*entry) + fields_size);
+ if (!entry) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ trace_state->entry = entry;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_start);
+
+static int __synth_event_add_val(const char *field_name, u64 val,
+ struct synth_event_trace_state *trace_state)
+{
+ struct synth_field *field = NULL;
+ struct synth_trace_event *entry;
+ struct synth_event *event;
+ int i, ret = 0;
+
+ if (!trace_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* can't mix add_next_synth_val() with add_synth_val() */
+ if (field_name) {
+ if (trace_state->add_next) {
+ ret = -EINVAL;
+ goto out;
+ }
+ trace_state->add_name = true;
+ } else {
+ if (trace_state->add_name) {
+ ret = -EINVAL;
+ goto out;
+ }
+ trace_state->add_next = true;
+ }
+
+ if (!trace_state->enabled)
+ goto out;
+
+ event = trace_state->event;
+ if (trace_state->add_name) {
+ for (i = 0; i < event->n_fields; i++) {
+ field = event->fields[i];
+ if (strcmp(field->name, field_name) == 0)
+ break;
+ }
+ if (!field) {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (trace_state->cur_field >= event->n_fields) {
+ ret = -EINVAL;
+ goto out;
+ }
+ field = event->fields[trace_state->cur_field++];
+ }
+
+ entry = trace_state->entry;
+ if (field->is_string) {
+ char *str_val = (char *)(long)val;
+ char *str_field;
+
+ if (!str_val) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ str_field = (char *)&entry->fields[field->offset];
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ } else
+ entry->fields[field->offset] = val;
+ out:
+ return ret;
+}
+
+/**
+ * synth_event_add_next_val - Add the next field's value to an open synth trace
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the next field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64. If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function assumes all the fields in an event are to be set one
+ * after another - successive calls to this function are made, one for
+ * each field, in the order of the fields in the event, until all
+ * fields have been set. If you'd rather set each field individually
+ * without regard to ordering, synth_event_add_val() can be used
+ * instead.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_next_val(u64 val,
+ struct synth_event_trace_state *trace_state)
+{
+ return __synth_event_add_val(NULL, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_next_val);
+
+/**
+ * synth_event_add_val - Add a named field's value to an open synth trace
+ * @field_name: The name of the synthetic event field value to set
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the named field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64. If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function looks up the field name, and if found, sets the field
+ * to the specified value. This lookup makes this function more
+ * expensive than synth_event_add_next_val(), so use that or the
+ * none-piecewise synth_event_trace() instead if efficiency is more
+ * important.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_val(const char *field_name, u64 val,
+ struct synth_event_trace_state *trace_state)
+{
+ return __synth_event_add_val(field_name, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_val);
+
+/**
+ * synth_event_trace_end - End piecewise synthetic event trace
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * End the trace of a synthetic event opened by
+ * synth_event_trace__start().
+ *
+ * This function 'closes' an event trace, which basically means that
+ * it commits the reserved event and cleans up other loose ends.
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state opened with
+ * synth_event_trace_start().
+ *
+ * Note that this function must be called after all values have been
+ * added for each event trace, regardless of whether adding all field
+ * values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+ if (!trace_state)
+ return -EINVAL;
+
+ trace_event_buffer_commit(&trace_state->fbuffer);
+
+ ring_buffer_nest_end(trace_state->buffer);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_end);
+
+static int create_synth_event(int argc, const char **argv)
{
const char *name = argv[0];
int len;
@@ -2041,12 +2881,6 @@ static int parse_map_size(char *str)
unsigned long size, map_bits;
int ret;
- strsep(&str, "=");
- if (!str) {
- ret = -EINVAL;
- goto out;
- }
-
ret = kstrtoul(str, 0, &size);
if (ret)
goto out;
@@ -2106,25 +2940,25 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
static int parse_assignment(struct trace_array *tr,
char *str, struct hist_trigger_attrs *attrs)
{
- int ret = 0;
+ int len, ret = 0;
- if ((str_has_prefix(str, "key=")) ||
- (str_has_prefix(str, "keys="))) {
- attrs->keys_str = kstrdup(str, GFP_KERNEL);
+ if ((len = str_has_prefix(str, "key=")) ||
+ (len = str_has_prefix(str, "keys="))) {
+ attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
if (!attrs->keys_str) {
ret = -ENOMEM;
goto out;
}
- } else if ((str_has_prefix(str, "val=")) ||
- (str_has_prefix(str, "vals=")) ||
- (str_has_prefix(str, "values="))) {
- attrs->vals_str = kstrdup(str, GFP_KERNEL);
+ } else if ((len = str_has_prefix(str, "val=")) ||
+ (len = str_has_prefix(str, "vals=")) ||
+ (len = str_has_prefix(str, "values="))) {
+ attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
if (!attrs->vals_str) {
ret = -ENOMEM;
goto out;
}
- } else if (str_has_prefix(str, "sort=")) {
- attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+ } else if ((len = str_has_prefix(str, "sort="))) {
+ attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
if (!attrs->sort_key_str) {
ret = -ENOMEM;
goto out;
@@ -2135,12 +2969,8 @@ static int parse_assignment(struct trace_array *tr,
ret = -ENOMEM;
goto out;
}
- } else if (str_has_prefix(str, "clock=")) {
- strsep(&str, "=");
- if (!str) {
- ret = -EINVAL;
- goto out;
- }
+ } else if ((len = str_has_prefix(str, "clock="))) {
+ str += len;
str = strstrip(str);
attrs->clock = kstrdup(str, GFP_KERNEL);
@@ -2148,8 +2978,8 @@ static int parse_assignment(struct trace_array *tr,
ret = -ENOMEM;
goto out;
}
- } else if (str_has_prefix(str, "size=")) {
- int map_bits = parse_map_size(str);
+ } else if ((len = str_has_prefix(str, "size="))) {
+ int map_bits = parse_map_size(str + len);
if (map_bits < 0) {
ret = map_bits;
@@ -2189,8 +3019,15 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
while (trigger_str) {
char *str = strsep(&trigger_str, ":");
+ char *rhs;
- if (strchr(str, '=')) {
+ rhs = strchr(str, '=');
+ if (rhs) {
+ if (!strlen(++rhs)) {
+ ret = -EINVAL;
+ hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
+ goto free;
+ }
ret = parse_assignment(tr, str, attrs);
if (ret)
goto free;
@@ -2661,6 +3498,22 @@ static int init_var_ref(struct hist_field *ref_field,
goto out;
}
+static int find_var_ref_idx(struct hist_trigger_data *hist_data,
+ struct hist_field *var_field)
+{
+ struct hist_field *ref_field;
+ int i;
+
+ for (i = 0; i < hist_data->n_var_refs; i++) {
+ ref_field = hist_data->var_refs[i];
+ if (ref_field->var.idx == var_field->var.idx &&
+ ref_field->var.hist_data == var_field->hist_data)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
/**
* create_var_ref - Create a variable reference and attach it to trigger
* @hist_data: The trigger that will be referencing the variable
@@ -4146,8 +4999,11 @@ static int check_synth_field(struct synth_event *event,
field = event->fields[field_pos];
- if (strcmp(field->type, hist_field->type) != 0)
- return -EINVAL;
+ if (strcmp(field->type, hist_field->type) != 0) {
+ if (field->size != hist_field->size ||
+ field->is_signed != hist_field->is_signed)
+ return -EINVAL;
+ }
return 0;
}
@@ -4234,11 +5090,11 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
struct trace_array *tr = hist_data->event_file->tr;
char *event_name, *param, *system = NULL;
struct hist_field *hist_field, *var_ref;
- unsigned int i, var_ref_idx;
+ unsigned int i;
unsigned int field_pos = 0;
struct synth_event *event;
char *synth_event_name;
- int ret = 0;
+ int var_ref_idx, ret = 0;
lockdep_assert_held(&event_mutex);
@@ -4255,8 +5111,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
event->ref++;
- var_ref_idx = hist_data->n_var_refs;
-
for (i = 0; i < data->n_params; i++) {
char *p;
@@ -4305,6 +5159,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
goto err;
}
+ var_ref_idx = find_var_ref_idx(hist_data, var_ref);
+ if (WARN_ON(var_ref_idx < 0)) {
+ ret = var_ref_idx;
+ goto err;
+ }
+
+ data->var_ref_idx[i] = var_ref_idx;
+
field_pos++;
kfree(p);
continue;
@@ -4323,7 +5185,6 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
}
data->synth_event = event;
- data->var_ref_idx = var_ref_idx;
out:
return ret;
err:
@@ -4542,10 +5403,6 @@ static int create_val_fields(struct hist_trigger_data *hist_data,
if (!fields_str)
goto out;
- strsep(&fields_str, "=");
- if (!fields_str)
- goto out;
-
for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
j < TRACING_MAP_VALS_MAX; i++) {
field_str = strsep(&fields_str, ",");
@@ -4640,10 +5497,6 @@ static int create_key_fields(struct hist_trigger_data *hist_data,
if (!fields_str)
goto out;
- strsep(&fields_str, "=");
- if (!fields_str)
- goto out;
-
for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
field_str = strsep(&fields_str, ",");
if (!field_str)
@@ -4775,7 +5628,7 @@ static int create_hist_fields(struct hist_trigger_data *hist_data,
return ret;
}
-static int is_descending(const char *str)
+static int is_descending(struct trace_array *tr, const char *str)
{
if (!str)
return 0;
@@ -4786,11 +5639,14 @@ static int is_descending(const char *str)
if (strcmp(str, "ascending") == 0)
return 0;
+ hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
+
return -EINVAL;
}
static int create_sort_keys(struct hist_trigger_data *hist_data)
{
+ struct trace_array *tr = hist_data->event_file->tr;
char *fields_str = hist_data->attrs->sort_key_str;
struct tracing_map_sort_key *sort_key;
int descending, ret = 0;
@@ -4801,12 +5657,6 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
if (!fields_str)
goto out;
- strsep(&fields_str, "=");
- if (!fields_str) {
- ret = -EINVAL;
- goto out;
- }
-
for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
struct hist_field *hist_field;
char *field_str, *field_name;
@@ -4815,25 +5665,30 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
sort_key = &hist_data->sort_keys[i];
field_str = strsep(&fields_str, ",");
- if (!field_str) {
- if (i == 0)
- ret = -EINVAL;
+ if (!field_str)
+ break;
+
+ if (!*field_str) {
+ ret = -EINVAL;
+ hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
break;
}
if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
+ hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
ret = -EINVAL;
break;
}
field_name = strsep(&field_str, ".");
- if (!field_name) {
+ if (!field_name || !*field_name) {
ret = -EINVAL;
+ hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
break;
}
if (strcmp(field_name, "hitcount") == 0) {
- descending = is_descending(field_str);
+ descending = is_descending(tr, field_str);
if (descending < 0) {
ret = descending;
break;
@@ -4855,7 +5710,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
if (strcmp(field_name, test_name) == 0) {
sort_key->field_idx = idx;
- descending = is_descending(field_str);
+ descending = is_descending(tr, field_str);
if (descending < 0) {
ret = descending;
goto out;
@@ -4866,6 +5721,7 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
}
if (j == hist_data->n_fields) {
ret = -EINVAL;
+ hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
break;
}
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 40106fff06a4..dd34a1b46a86 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -116,9 +116,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
{
struct trace_event_file *event_file = event_file_data(m->private);
- if (t == SHOW_AVAILABLE_TRIGGERS)
+ if (t == SHOW_AVAILABLE_TRIGGERS) {
+ (*pos)++;
return NULL;
-
+ }
return seq_list_next(t, &event_file->triggers, pos);
}
@@ -213,7 +214,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
return ret;
}
-static int trigger_process_regex(struct trace_event_file *file, char *buff)
+int trigger_process_regex(struct trace_event_file *file, char *buff)
{
char *command, *next = buff;
struct event_command *p;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index b611cd36e22d..8a4c8d5c2c98 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -101,7 +101,7 @@ static int function_trace_init(struct trace_array *tr)
ftrace_init_array_ops(tr, func);
- tr->trace_buffer.cpu = get_cpu();
+ tr->array_buffer.cpu = get_cpu();
put_cpu();
tracing_start_cmdline_record();
@@ -118,7 +118,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
}
static void
@@ -143,7 +143,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
goto out;
cpu = smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (!atomic_read(&data->disabled)) {
local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc);
@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 78af97163147..7d71546ba00a 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
- ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
+ ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
- event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
+ event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
}
@@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
{
unsigned long long usecs;
- usecs = iter->ts - iter->trace_buffer->time_start;
+ usecs = iter->ts - iter->array_buffer->time_start;
do_div(usecs, NSEC_PER_USEC);
trace_seq_printf(s, "%9llu us | ", usecs);
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 402d022d0229..a48808c43249 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
struct trace_event_call *call = &event_hwlat;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct hwlat_entry *entry;
unsigned long flags;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a745b0cee5d3..10bbb0f381d5 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -122,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags) && !preempt_count())
return 0;
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1))
@@ -167,7 +167,7 @@ static int irqsoff_display_graph(struct trace_array *tr, int set)
per_cpu(tracing_cpu, cpu) = 0;
tr->max_latency = 0;
- tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
+ tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
return start_irqsoff_tracer(irqsoff_trace, set);
}
@@ -382,7 +382,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
if (per_cpu(tracing_cpu, cpu))
return;
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled))
return;
@@ -420,7 +420,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
if (!tracer_enabled || !tracing_is_enabled())
return;
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled))
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index cca65044c14c..9da76104f7a2 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
- ring_buffer_read_prepare(iter.trace_buffer->buffer,
+ ring_buffer_read_prepare(iter.array_buffer->buffer,
cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
@@ -51,7 +51,7 @@ static void ftrace_dump_buf(int skip_entries, long cpu_file)
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
- ring_buffer_read_prepare(iter.trace_buffer->buffer,
+ ring_buffer_read_prepare(iter.array_buffer->buffer,
cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
@@ -124,7 +124,7 @@ static int kdb_ftdump(int argc, const char **argv)
iter.buffer_iter = buffer_iter;
for_each_tracing_cpu(cpu) {
- atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
/* A negative skip_entries means skip all but the last entries */
@@ -139,7 +139,7 @@ static int kdb_ftdump(int argc, const char **argv)
ftrace_dump_buf(skip_entries, cpu_file);
for_each_tracing_cpu(cpu) {
- atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
kdb_trap_printk--;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index aa515d578c5b..d8264ebb9581 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -22,7 +22,6 @@
#define KPROBE_EVENT_SYSTEM "kprobes"
#define KRETPROBE_MAXACTIVE_MAX 4096
-#define MAX_KPROBE_CMDLINE_SIZE 1024
/* Kprobe early definition from command line */
static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
@@ -902,6 +901,167 @@ static int create_or_delete_trace_kprobe(int argc, char **argv)
return ret == -ECANCELED ? -EINVAL : ret;
}
+static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
+{
+ return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
+}
+
+/**
+ * kprobe_event_cmd_init - Initialize a kprobe event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object. Use this before
+ * calling any of the other kprobe_event functions.
+ */
+void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+ dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
+ trace_kprobe_run_command);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+
+/**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+ * @kretprobe: Is this a return probe?
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
+ * adds a NULL to the end of the arg list. If this function is used
+ * directly, make sure the last arg in the variable arg list is NULL.
+ *
+ * Generate a kprobe event command to be executed by
+ * kprobe_event_gen_cmd_end(). This function can be used to generate the
+ * complete command or only the first part of it; in the latter case,
+ * kprobe_event_add_fields() can be used to add more fields following this.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
+ const char *name, const char *loc, ...)
+{
+ char buf[MAX_EVENT_NAME_LEN];
+ struct dynevent_arg arg;
+ va_list args;
+ int ret;
+
+ if (cmd->type != DYNEVENT_TYPE_KPROBE)
+ return -EINVAL;
+
+ if (kretprobe)
+ snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
+ else
+ snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
+
+ ret = dynevent_str_add(cmd, buf);
+ if (ret)
+ return ret;
+
+ dynevent_arg_init(&arg, 0);
+ arg.str = loc;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ va_start(args, loc);
+ for (;;) {
+ const char *field;
+
+ field = va_arg(args, const char *);
+ if (!field)
+ break;
+
+ if (++cmd->n_fields > MAX_TRACE_ARGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ arg.str = field;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ break;
+ }
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
+
+/**
+ * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the kprobe_event_add_fields() wrapper, which
+ * automatically adds a NULL to the end of the arg list. If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Add probe fields to an existing kprobe command using a variable
+ * list of args. Fields are added in the same order they're listed.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
+{
+ struct dynevent_arg arg;
+ va_list args;
+ int ret;
+
+ if (cmd->type != DYNEVENT_TYPE_KPROBE)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, 0);
+
+ va_start(args, cmd);
+ for (;;) {
+ const char *field;
+
+ field = va_arg(args, const char *);
+ if (!field)
+ break;
+
+ if (++cmd->n_fields > MAX_TRACE_ARGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ arg.str = field;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ break;
+ }
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
+
+/**
+ * kprobe_event_delete - Delete a kprobe event
+ * @name: The name of the kprobe event to delete
+ *
+ * Delete a kprobe event with the give @name from kernel code rather
+ * than directly from the command line.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int kprobe_event_delete(const char *name)
+{
+ char buf[MAX_EVENT_NAME_LEN];
+
+ snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
+
+ return trace_run_command(buf, create_or_delete_trace_kprobe);
+}
+EXPORT_SYMBOL_GPL(kprobe_event_delete);
+
static int trace_kprobe_release(struct dyn_event *ev)
{
struct trace_kprobe *tk = to_trace_kprobe(ev);
@@ -1175,35 +1335,35 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
struct trace_event_file *trace_file)
{
struct kprobe_trace_entry_head *entry;
- struct ring_buffer_event *event;
- struct ring_buffer *buffer;
- int size, dsize, pc;
- unsigned long irq_flags;
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+ struct trace_event_buffer fbuffer;
+ int dsize;
WARN_ON(call != trace_file->event_call);
if (trace_trigger_soft_disabled(trace_file))
return;
- local_save_flags(irq_flags);
- pc = preempt_count();
+ local_save_flags(fbuffer.flags);
+ fbuffer.pc = preempt_count();
+ fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
- size = sizeof(*entry) + tk->tp.size + dsize;
- event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- call->event.type,
- size, irq_flags, pc);
- if (!event)
+ fbuffer.event =
+ trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+ call->event.type,
+ sizeof(*entry) + tk->tp.size + dsize,
+ fbuffer.flags, fbuffer.pc);
+ if (!fbuffer.event)
return;
- entry = ring_buffer_event_data(event);
+ fbuffer.regs = regs;
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
- event_trigger_unlock_commit_regs(trace_file, buffer, event,
- entry, irq_flags, pc, regs);
+ trace_event_buffer_commit(&fbuffer);
}
static void
@@ -1223,36 +1383,35 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct trace_event_file *trace_file)
{
struct kretprobe_trace_entry_head *entry;
- struct ring_buffer_event *event;
- struct ring_buffer *buffer;
- int size, pc, dsize;
- unsigned long irq_flags;
+ struct trace_event_buffer fbuffer;
struct trace_event_call *call = trace_probe_event_call(&tk->tp);
+ int dsize;
WARN_ON(call != trace_file->event_call);
if (trace_trigger_soft_disabled(trace_file))
return;
- local_save_flags(irq_flags);
- pc = preempt_count();
+ local_save_flags(fbuffer.flags);
+ fbuffer.pc = preempt_count();
+ fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
- size = sizeof(*entry) + tk->tp.size + dsize;
-
- event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- call->event.type,
- size, irq_flags, pc);
- if (!event)
+ fbuffer.event =
+ trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
+ call->event.type,
+ sizeof(*entry) + tk->tp.size + dsize,
+ fbuffer.flags, fbuffer.pc);
+ if (!fbuffer.event)
return;
- entry = ring_buffer_event_data(event);
+ fbuffer.regs = regs;
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
entry->func = (unsigned long)tk->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
- event_trigger_unlock_commit_regs(trace_file, buffer, event,
- entry, irq_flags, pc, regs);
+ trace_event_buffer_commit(&fbuffer);
}
static void
@@ -1698,11 +1857,12 @@ static __init void setup_boot_kprobe_events(void)
enable_boot_kprobe_events();
}
-/* Make a tracefs interface for controlling probe points */
-static __init int init_kprobe_trace(void)
+/*
+ * Register dynevent at subsys_initcall. This allows kernel to setup kprobe
+ * events in fs_initcall without tracefs.
+ */
+static __init int init_kprobe_trace_early(void)
{
- struct dentry *d_tracer;
- struct dentry *entry;
int ret;
ret = dyn_event_register(&trace_kprobe_ops);
@@ -1712,6 +1872,16 @@ static __init int init_kprobe_trace(void)
if (register_module_notifier(&trace_kprobe_module_nb))
return -EINVAL;
+ return 0;
+}
+subsys_initcall(init_kprobe_trace_early);
+
+/* Make a tracefs interface for controlling probe points */
+static __init int init_kprobe_trace(void)
+{
+ struct dentry *d_tracer;
+ struct dentry *entry;
+
d_tracer = tracing_init_dentry();
if (IS_ERR(d_tracer))
return 0;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index b0388016b687..84582bf1ed5f 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -32,7 +32,7 @@ static void mmio_reset_data(struct trace_array *tr)
overrun_detected = false;
prev_overruns = 0;
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
}
static int mmio_trace_init(struct trace_array *tr)
@@ -122,7 +122,7 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
unsigned long cnt = atomic_xchg(&dropped_count, 0);
- unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
+ unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
if (over > prev_overruns)
cnt += over - prev_overruns;
@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct mmiotrace_rw *rw)
{
struct trace_event_call *call = &event_mmiotrace_rw;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
@@ -318,7 +318,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
void mmio_trace_rw(struct mmiotrace_rw *rw)
{
struct trace_array *tr = mmio_trace_array;
- struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
+ struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
__trace_mmiotrace_rw(tr, data, rw);
}
@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct mmiotrace_map *map)
{
struct trace_event_call *call = &event_mmiotrace_map;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
int pc = preempt_count();
@@ -351,7 +351,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
struct trace_array_cpu *data;
preempt_disable();
- data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
+ data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
__trace_mmiotrace_map(tr, data, map);
preempt_enable();
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d9b4b7c22db4..b4909082f6a4 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -538,7 +538,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
struct trace_array *tr = iter->tr;
unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
- unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
+ unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index e288168661e1..e304196d7c28 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -89,8 +89,10 @@ static void tracing_sched_unregister(void)
static void tracing_start_sched_switch(int ops)
{
- bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
+ bool sched_register;
+
mutex_lock(&sched_register_mutex);
+ sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
switch (ops) {
case RECORD_CMDLINE:
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 617e297f46dc..97b10bb31a1f 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -82,7 +82,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (cpu != wakeup_current_cpu)
goto out_enable;
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
unsigned long flags, int pc)
{
struct trace_event_call *call = &event_context_switch;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc);
@@ -459,7 +459,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (likely(disabled != 1))
goto out;
@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out_unlock;
/* The task we are waiting for is waking up */
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -494,7 +494,7 @@ out_unlock:
arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void __wakeup_reset(struct trace_array *tr)
@@ -513,7 +513,7 @@ static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
local_irq_save(flags);
arch_spin_lock(&wakeup_lock);
@@ -551,7 +551,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
return;
pc = preempt_count();
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -583,7 +583,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
local_save_flags(flags);
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
__trace_stack(wakeup_trace, flags, 0, pc);
@@ -598,7 +598,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
out_locked:
arch_spin_unlock(&wakeup_lock);
out:
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void start_wakeup_tracer(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 69ee8ef12cee..b5e3496cf803 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -23,7 +23,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
return 0;
}
-static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
+static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
* Test the trace buffer to see if all the elements
* are still sane.
*/
-static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
+static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
{
unsigned long flags, cnt = 0;
int cpu, ret = 0;
@@ -362,7 +362,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
msleep(100);
/* we should have nothing in the buffer */
- ret = trace_test_buffer(&tr->trace_buffer, &count);
+ ret = trace_test_buffer(&tr->array_buffer, &count);
if (ret)
goto out;
@@ -383,7 +383,7 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ftrace_enabled = 0;
/* check the trace buffer */
- ret = trace_test_buffer(&tr->trace_buffer, &count);
+ ret = trace_test_buffer(&tr->array_buffer, &count);
ftrace_enabled = 1;
tracing_start();
@@ -682,7 +682,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 0;
/* check the trace buffer */
- ret = trace_test_buffer(&tr->trace_buffer, &count);
+ ret = trace_test_buffer(&tr->array_buffer, &count);
ftrace_enabled = 1;
trace->reset(tr);
@@ -768,7 +768,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* Simulate the init() callback but we attach a watchdog callback
* to detect and recover from possible hangs
*/
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
set_graph_array(tr);
ret = register_ftrace_graph(&fgraph_ops);
if (ret) {
@@ -790,7 +790,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(&tr->trace_buffer, &count);
+ ret = trace_test_buffer(&tr->array_buffer, &count);
/* Need to also simulate the tr->reset to remove this fgraph_ops */
tracing_stop_cmdline_record();
@@ -848,7 +848,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(&tr->trace_buffer, NULL);
+ ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
@@ -910,7 +910,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(&tr->trace_buffer, NULL);
+ ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
@@ -976,7 +976,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(&tr->trace_buffer, NULL);
+ ret = trace_test_buffer(&tr->array_buffer, NULL);
if (ret)
goto out;
@@ -1006,7 +1006,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(&tr->trace_buffer, NULL);
+ ret = trace_test_buffer(&tr->array_buffer, NULL);
if (ret)
goto out;
@@ -1136,7 +1136,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(&tr->trace_buffer, NULL);
+ ret = trace_test_buffer(&tr->array_buffer, NULL);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
@@ -1177,7 +1177,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(&tr->trace_buffer, &count);
+ ret = trace_test_buffer(&tr->array_buffer, &count);
trace->reset(tr);
tracing_start();
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index 87de6edafd14..1d84fcc78e3e 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -30,9 +30,6 @@
/* How much buffer is left on the trace_seq? */
#define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
-/* How much buffer is written? */
-#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
-
/*
* trace_seq should work with being initialized with 0s.
*/
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 874f1274cf99..d1fa19773cc8 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -280,18 +280,22 @@ static int tracing_stat_init(void)
d_tracing = tracing_init_dentry();
if (IS_ERR(d_tracing))
- return 0;
+ return -ENODEV;
stat_dir = tracefs_create_dir("trace_stat", d_tracing);
- if (!stat_dir)
+ if (!stat_dir) {
pr_warn("Could not create tracefs 'trace_stat' entry\n");
+ return -ENOMEM;
+ }
return 0;
}
static int init_stat_file(struct stat_session *session)
{
- if (!stat_dir && tracing_stat_init())
- return -ENODEV;
+ int ret;
+
+ if (!stat_dir && (ret = tracing_stat_init()))
+ return ret;
session->file = tracefs_create_file(session->ts->name, 0644,
stat_dir,
@@ -304,7 +308,7 @@ static int init_stat_file(struct stat_session *session)
int register_stat_tracer(struct tracer_stat *trace)
{
struct stat_session *session, *node;
- int ret;
+ int ret = -EINVAL;
if (!trace)
return -EINVAL;
@@ -315,17 +319,15 @@ int register_stat_tracer(struct tracer_stat *trace)
/* Already registered? */
mutex_lock(&all_stat_sessions_mutex);
list_for_each_entry(node, &all_stat_sessions, session_list) {
- if (node->ts == trace) {
- mutex_unlock(&all_stat_sessions_mutex);
- return -EINVAL;
- }
+ if (node->ts == trace)
+ goto out;
}
- mutex_unlock(&all_stat_sessions_mutex);
+ ret = -ENOMEM;
/* Init the session */
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
- return -ENOMEM;
+ goto out;
session->ts = trace;
INIT_LIST_HEAD(&session->session_list);
@@ -334,15 +336,16 @@ int register_stat_tracer(struct tracer_stat *trace)
ret = init_stat_file(session);
if (ret) {
destroy_session(session);
- return ret;
+ goto out;
}
+ ret = 0;
/* Register */
- mutex_lock(&all_stat_sessions_mutex);
list_add_tail(&session->session_list, &all_stat_sessions);
+ out:
mutex_unlock(&all_stat_sessions_mutex);
- return 0;
+ return ret;
}
void unregister_stat_tracer(struct tracer_stat *trace)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 2978c29d87d4..d85a2f0f316b 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -297,7 +297,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long irq_flags;
unsigned long args[6];
int pc;
@@ -325,7 +325,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
local_save_flags(irq_flags);
pc = preempt_count();
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, irq_flags, pc);
if (!event)
@@ -347,7 +347,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
unsigned long irq_flags;
int pc;
int syscall_nr;
@@ -371,7 +371,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
local_save_flags(irq_flags);
pc = preempt_count();
- buffer = tr->trace_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry),
irq_flags, pc);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 7885ebd23d0c..18d16f3ef980 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -931,8 +931,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
struct trace_event_file *trace_file)
{
struct uprobe_trace_entry_head *entry;
+ struct trace_buffer *buffer;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
void *data;
int size, esize;
struct trace_event_call *call = trace_probe_event_call(&tu->tp);
diff --git a/lib/Kconfig b/lib/Kconfig
index bc7e56370129..0cf875fd627c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -573,6 +573,9 @@ config DIMLIB
config LIBFDT
bool
+config LIBXBC
+ bool
+
config OID_REGISTRY
tristate
help
diff --git a/lib/Makefile b/lib/Makefile
index 23ca78d43d24..8bb91176c2a1 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -230,6 +230,8 @@ $(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
+lib-$(CONFIG_LIBXBC) += bootconfig.o
+
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
new file mode 100644
index 000000000000..afb2e767e6fe
--- /dev/null
+++ b/lib/bootconfig.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extra Boot Config
+ * Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#define pr_fmt(fmt) "bootconfig: " fmt
+
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/bootconfig.h>
+#include <linux/string.h>
+
+/*
+ * Extra Boot Config (XBC) is given as tree-structured ascii text of
+ * key-value pairs on memory.
+ * xbc_parse() parses the text to build a simple tree. Each tree node is
+ * simply a key word or a value. A key node may have a next key node or/and
+ * a child node (both key and value). A value node may have a next value
+ * node (for array).
+ */
+
+static struct xbc_node xbc_nodes[XBC_NODE_MAX] __initdata;
+static int xbc_node_num __initdata;
+static char *xbc_data __initdata;
+static size_t xbc_data_size __initdata;
+static struct xbc_node *last_parent __initdata;
+
+static int __init xbc_parse_error(const char *msg, const char *p)
+{
+ int pos = p - xbc_data;
+
+ pr_err("Parse error at pos %d: %s\n", pos, msg);
+ return -EINVAL;
+}
+
+/**
+ * xbc_root_node() - Get the root node of extended boot config
+ *
+ * Return the address of root node of extended boot config. If the
+ * extended boot config is not initiized, return NULL.
+ */
+struct xbc_node * __init xbc_root_node(void)
+{
+ if (unlikely(!xbc_data))
+ return NULL;
+
+ return xbc_nodes;
+}
+
+/**
+ * xbc_node_index() - Get the index of XBC node
+ * @node: A target node of getting index.
+ *
+ * Return the index number of @node in XBC node list.
+ */
+int __init xbc_node_index(struct xbc_node *node)
+{
+ return node - &xbc_nodes[0];
+}
+
+/**
+ * xbc_node_get_parent() - Get the parent XBC node
+ * @node: An XBC node.
+ *
+ * Return the parent node of @node. If the node is top node of the tree,
+ * return NULL.
+ */
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node)
+{
+ return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent];
+}
+
+/**
+ * xbc_node_get_child() - Get the child XBC node
+ * @node: An XBC node.
+ *
+ * Return the first child node of @node. If the node has no child, return
+ * NULL.
+ */
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node)
+{
+ return node->child ? &xbc_nodes[node->child] : NULL;
+}
+
+/**
+ * xbc_node_get_next() - Get the next sibling XBC node
+ * @node: An XBC node.
+ *
+ * Return the NEXT sibling node of @node. If the node has no next sibling,
+ * return NULL. Note that even if this returns NULL, it doesn't mean @node
+ * has no siblings. (You also has to check whether the parent's child node
+ * is @node or not.)
+ */
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node)
+{
+ return node->next ? &xbc_nodes[node->next] : NULL;
+}
+
+/**
+ * xbc_node_get_data() - Get the data of XBC node
+ * @node: An XBC node.
+ *
+ * Return the data (which is always a null terminated string) of @node.
+ * If the node has invalid data, warn and return NULL.
+ */
+const char * __init xbc_node_get_data(struct xbc_node *node)
+{
+ int offset = node->data & ~XBC_VALUE;
+
+ if (WARN_ON(offset >= xbc_data_size))
+ return NULL;
+
+ return xbc_data + offset;
+}
+
+static bool __init
+xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
+{
+ const char *p = xbc_node_get_data(node);
+ int len = strlen(p);
+
+ if (strncmp(*prefix, p, len))
+ return false;
+
+ p = *prefix + len;
+ if (*p == '.')
+ p++;
+ else if (*p != '\0')
+ return false;
+ *prefix = p;
+
+ return true;
+}
+
+/**
+ * xbc_node_find_child() - Find a child node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ *
+ * Search a node under @parent which matches @key. The @key can contain
+ * several words jointed with '.'. If @parent is NULL, this searches the
+ * node from whole tree. Return NULL if no node is matched.
+ */
+struct xbc_node * __init
+xbc_node_find_child(struct xbc_node *parent, const char *key)
+{
+ struct xbc_node *node;
+
+ if (parent)
+ node = xbc_node_get_child(parent);
+ else
+ node = xbc_root_node();
+
+ while (node && xbc_node_is_key(node)) {
+ if (!xbc_node_match_prefix(node, &key))
+ node = xbc_node_get_next(node);
+ else if (*key != '\0')
+ node = xbc_node_get_child(node);
+ else
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * xbc_node_find_value() - Find a value node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ * @vnode: A container pointer of found XBC node.
+ *
+ * Search a value node under @parent whose (parent) key node matches @key,
+ * store it in *@vnode, and returns the value string.
+ * The @key can contain several words jointed with '.'. If @parent is NULL,
+ * this searches the node from whole tree. Return the value string if a
+ * matched key found, return NULL if no node is matched.
+ * Note that this returns 0-length string and stores NULL in *@vnode if the
+ * key has no value. And also it will return the value of the first entry if
+ * the value is an array.
+ */
+const char * __init
+xbc_node_find_value(struct xbc_node *parent, const char *key,
+ struct xbc_node **vnode)
+{
+ struct xbc_node *node = xbc_node_find_child(parent, key);
+
+ if (!node || !xbc_node_is_key(node))
+ return NULL;
+
+ node = xbc_node_get_child(node);
+ if (node && !xbc_node_is_value(node))
+ return NULL;
+
+ if (vnode)
+ *vnode = node;
+
+ return node ? xbc_node_get_data(node) : "";
+}
+
+/**
+ * xbc_node_compose_key_after() - Compose partial key string of the XBC node
+ * @root: Root XBC node
+ * @node: Target XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the partial key of the @node into @buf, which is starting right
+ * after @root (@root is not included.) If @root is NULL, this returns full
+ * key words of @node.
+ * Returns the total length of the key stored in @buf. Returns -EINVAL
+ * if @node is NULL or @root is not the ancestor of @node or @root is @node,
+ * or returns -ERANGE if the key depth is deeper than max depth.
+ * This is expected to be used with xbc_find_node() to list up all (child)
+ * keys under given key.
+ */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node,
+ char *buf, size_t size)
+{
+ u16 keys[XBC_DEPTH_MAX];
+ int depth = 0, ret = 0, total = 0;
+
+ if (!node || node == root)
+ return -EINVAL;
+
+ if (xbc_node_is_value(node))
+ node = xbc_node_get_parent(node);
+
+ while (node && node != root) {
+ keys[depth++] = xbc_node_index(node);
+ if (depth == XBC_DEPTH_MAX)
+ return -ERANGE;
+ node = xbc_node_get_parent(node);
+ }
+ if (!node && root)
+ return -EINVAL;
+
+ while (--depth >= 0) {
+ node = xbc_nodes + keys[depth];
+ ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node),
+ depth ? "." : "");
+ if (ret < 0)
+ return ret;
+ if (ret > size) {
+ size = 0;
+ } else {
+ size -= ret;
+ buf += ret;
+ }
+ total += ret;
+ }
+
+ return total;
+}
+
+/**
+ * xbc_node_find_next_leaf() - Find the next leaf node under given node
+ * @root: An XBC root node
+ * @node: An XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of @node
+ * under @root node (including @root node itself).
+ * Return the next node or NULL if next leaf node is not found.
+ */
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *node)
+{
+ if (unlikely(!xbc_data))
+ return NULL;
+
+ if (!node) { /* First try */
+ node = root;
+ if (!node)
+ node = xbc_nodes;
+ } else {
+ if (node == root) /* @root was a leaf, no child node. */
+ return NULL;
+
+ while (!node->next) {
+ node = xbc_node_get_parent(node);
+ if (node == root)
+ return NULL;
+ /* User passed a node which is not uder parent */
+ if (WARN_ON(!node))
+ return NULL;
+ }
+ node = xbc_node_get_next(node);
+ }
+
+ while (node && !xbc_node_is_leaf(node))
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+/**
+ * xbc_node_find_next_key_value() - Find the next key-value pair nodes
+ * @root: An XBC root node
+ * @leaf: A container pointer of XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of *@leaf
+ * under @root node. Returns the value and update *@leaf if next leaf node
+ * is found, or NULL if no next leaf node is found.
+ * Note that this returns 0-length string if the key has no value, or
+ * the value of the first entry if the value is an array.
+ */
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf)
+{
+ /* tip must be passed */
+ if (WARN_ON(!leaf))
+ return NULL;
+
+ *leaf = xbc_node_find_next_leaf(root, *leaf);
+ if (!*leaf)
+ return NULL;
+ if ((*leaf)->child)
+ return xbc_node_get_data(xbc_node_get_child(*leaf));
+ else
+ return ""; /* No value key */
+}
+
+/* XBC parse and tree build */
+
+static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
+{
+ struct xbc_node *node;
+ unsigned long offset;
+
+ if (xbc_node_num == XBC_NODE_MAX)
+ return NULL;
+
+ node = &xbc_nodes[xbc_node_num++];
+ offset = data - xbc_data;
+ node->data = (u16)offset;
+ if (WARN_ON(offset >= XBC_DATA_MAX))
+ return NULL;
+ node->data |= flag;
+ node->child = 0;
+ node->next = 0;
+
+ return node;
+}
+
+static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
+{
+ while (node->next)
+ node = xbc_node_get_next(node);
+
+ return node;
+}
+
+static struct xbc_node * __init xbc_add_sibling(char *data, u32 flag)
+{
+ struct xbc_node *sib, *node = xbc_add_node(data, flag);
+
+ if (node) {
+ if (!last_parent) {
+ node->parent = XBC_NODE_MAX;
+ sib = xbc_last_sibling(xbc_nodes);
+ sib->next = xbc_node_index(node);
+ } else {
+ node->parent = xbc_node_index(last_parent);
+ if (!last_parent->child) {
+ last_parent->child = xbc_node_index(node);
+ } else {
+ sib = xbc_node_get_child(last_parent);
+ sib = xbc_last_sibling(sib);
+ sib->next = xbc_node_index(node);
+ }
+ }
+ } else
+ xbc_parse_error("Too many nodes", data);
+
+ return node;
+}
+
+static inline __init struct xbc_node *xbc_add_child(char *data, u32 flag)
+{
+ struct xbc_node *node = xbc_add_sibling(data, flag);
+
+ if (node)
+ last_parent = node;
+
+ return node;
+}
+
+static inline __init bool xbc_valid_keyword(char *key)
+{
+ if (key[0] == '\0')
+ return false;
+
+ while (isalnum(*key) || *key == '-' || *key == '_')
+ key++;
+
+ return *key == '\0';
+}
+
+static char *skip_comment(char *p)
+{
+ char *ret;
+
+ ret = strchr(p, '\n');
+ if (!ret)
+ ret = p + strlen(p);
+ else
+ ret++;
+
+ return ret;
+}
+
+static char *skip_spaces_until_newline(char *p)
+{
+ while (isspace(*p) && *p != '\n')
+ p++;
+ return p;
+}
+
+static int __init __xbc_open_brace(void)
+{
+ /* Mark the last key as open brace */
+ last_parent->next = XBC_NODE_MAX;
+
+ return 0;
+}
+
+static int __init __xbc_close_brace(char *p)
+{
+ struct xbc_node *node;
+
+ if (!last_parent || last_parent->next != XBC_NODE_MAX)
+ return xbc_parse_error("Unexpected closing brace", p);
+
+ node = last_parent;
+ node->next = 0;
+ do {
+ node = xbc_node_get_parent(node);
+ } while (node && node->next != XBC_NODE_MAX);
+ last_parent = node;
+
+ return 0;
+}
+
+/*
+ * Return delimiter or error, no node added. As same as lib/cmdline.c,
+ * you can use " around spaces, but can't escape " for value.
+ */
+static int __init __xbc_parse_value(char **__v, char **__n)
+{
+ char *p, *v = *__v;
+ int c, quotes = 0;
+
+ v = skip_spaces(v);
+ while (*v == '#') {
+ v = skip_comment(v);
+ v = skip_spaces(v);
+ }
+ if (*v == '"' || *v == '\'') {
+ quotes = *v;
+ v++;
+ }
+ p = v - 1;
+ while ((c = *++p)) {
+ if (!isprint(c) && !isspace(c))
+ return xbc_parse_error("Non printable value", p);
+ if (quotes) {
+ if (c != quotes)
+ continue;
+ quotes = 0;
+ *p++ = '\0';
+ p = skip_spaces_until_newline(p);
+ c = *p;
+ if (c && !strchr(",;\n#}", c))
+ return xbc_parse_error("No value delimiter", p);
+ if (*p)
+ p++;
+ break;
+ }
+ if (strchr(",;\n#}", c)) {
+ v = strim(v);
+ *p++ = '\0';
+ break;
+ }
+ }
+ if (quotes)
+ return xbc_parse_error("No closing quotes", p);
+ if (c == '#') {
+ p = skip_comment(p);
+ c = '\n'; /* A comment must be treated as a newline */
+ }
+ *__n = p;
+ *__v = v;
+
+ return c;
+}
+
+static int __init xbc_parse_array(char **__v)
+{
+ struct xbc_node *node;
+ char *next;
+ int c = 0;
+
+ do {
+ c = __xbc_parse_value(__v, &next);
+ if (c < 0)
+ return c;
+
+ node = xbc_add_sibling(*__v, XBC_VALUE);
+ if (!node)
+ return -ENOMEM;
+ *__v = next;
+ } while (c == ',');
+ node->next = 0;
+
+ return c;
+}
+
+static inline __init
+struct xbc_node *find_match_node(struct xbc_node *node, char *k)
+{
+ while (node) {
+ if (!strcmp(xbc_node_get_data(node), k))
+ break;
+ node = xbc_node_get_next(node);
+ }
+ return node;
+}
+
+static int __init __xbc_add_key(char *k)
+{
+ struct xbc_node *node;
+
+ if (!xbc_valid_keyword(k))
+ return xbc_parse_error("Invalid keyword", k);
+
+ if (unlikely(xbc_node_num == 0))
+ goto add_node;
+
+ if (!last_parent) /* the first level */
+ node = find_match_node(xbc_nodes, k);
+ else
+ node = find_match_node(xbc_node_get_child(last_parent), k);
+
+ if (node)
+ last_parent = node;
+ else {
+add_node:
+ node = xbc_add_child(k, XBC_KEY);
+ if (!node)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init __xbc_parse_keys(char *k)
+{
+ char *p;
+ int ret;
+
+ k = strim(k);
+ while ((p = strchr(k, '.'))) {
+ *p++ = '\0';
+ ret = __xbc_add_key(k);
+ if (ret)
+ return ret;
+ k = p;
+ }
+
+ return __xbc_add_key(k);
+}
+
+static int __init xbc_parse_kv(char **k, char *v)
+{
+ struct xbc_node *prev_parent = last_parent;
+ struct xbc_node *node;
+ char *next;
+ int c, ret;
+
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+
+ c = __xbc_parse_value(&v, &next);
+ if (c < 0)
+ return c;
+
+ node = xbc_add_sibling(v, XBC_VALUE);
+ if (!node)
+ return -ENOMEM;
+
+ if (c == ',') { /* Array */
+ c = xbc_parse_array(&next);
+ if (c < 0)
+ return c;
+ }
+
+ last_parent = prev_parent;
+
+ if (c == '}') {
+ ret = __xbc_close_brace(next - 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ *k = next;
+
+ return 0;
+}
+
+static int __init xbc_parse_key(char **k, char *n)
+{
+ struct xbc_node *prev_parent = last_parent;
+ int ret;
+
+ *k = strim(*k);
+ if (**k != '\0') {
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+ last_parent = prev_parent;
+ }
+ *k = n;
+
+ return 0;
+}
+
+static int __init xbc_open_brace(char **k, char *n)
+{
+ int ret;
+
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+ *k = n;
+
+ return __xbc_open_brace();
+}
+
+static int __init xbc_close_brace(char **k, char *n)
+{
+ int ret;
+
+ ret = xbc_parse_key(k, n);
+ if (ret)
+ return ret;
+ /* k is updated in xbc_parse_key() */
+
+ return __xbc_close_brace(n - 1);
+}
+
+static int __init xbc_verify_tree(void)
+{
+ int i, depth, len, wlen;
+ struct xbc_node *n, *m;
+
+ /* Empty tree */
+ if (xbc_node_num == 0) {
+ xbc_parse_error("Empty config", xbc_data);
+ return -ENOENT;
+ }
+
+ for (i = 0; i < xbc_node_num; i++) {
+ if (xbc_nodes[i].next > xbc_node_num) {
+ return xbc_parse_error("No closing brace",
+ xbc_node_get_data(xbc_nodes + i));
+ }
+ }
+
+ /* Key tree limitation check */
+ n = &xbc_nodes[0];
+ depth = 1;
+ len = 0;
+
+ while (n) {
+ wlen = strlen(xbc_node_get_data(n)) + 1;
+ len += wlen;
+ if (len > XBC_KEYLEN_MAX)
+ return xbc_parse_error("Too long key length",
+ xbc_node_get_data(n));
+
+ m = xbc_node_get_child(n);
+ if (m && xbc_node_is_key(m)) {
+ n = m;
+ depth++;
+ if (depth > XBC_DEPTH_MAX)
+ return xbc_parse_error("Too many key words",
+ xbc_node_get_data(n));
+ continue;
+ }
+ len -= wlen;
+ m = xbc_node_get_next(n);
+ while (!m) {
+ n = xbc_node_get_parent(n);
+ if (!n)
+ break;
+ len -= strlen(xbc_node_get_data(n)) + 1;
+ depth--;
+ m = xbc_node_get_next(n);
+ }
+ n = m;
+ }
+
+ return 0;
+}
+
+/**
+ * xbc_destroy_all() - Clean up all parsed bootconfig
+ *
+ * This clears all data structures of parsed bootconfig on memory.
+ * If you need to reuse xbc_init() with new boot config, you can
+ * use this.
+ */
+void __init xbc_destroy_all(void)
+{
+ xbc_data = NULL;
+ xbc_data_size = 0;
+ xbc_node_num = 0;
+ memset(xbc_nodes, 0, sizeof(xbc_nodes));
+}
+
+/**
+ * xbc_init() - Parse given XBC file and build XBC internal tree
+ * @buf: boot config text
+ *
+ * This parses the boot config text in @buf. @buf must be a
+ * null terminated string and smaller than XBC_DATA_MAX.
+ * Return the number of stored nodes (>0) if succeeded, or -errno
+ * if there is any error.
+ */
+int __init xbc_init(char *buf)
+{
+ char *p, *q;
+ int ret, c;
+
+ if (xbc_data) {
+ pr_err("Error: bootconfig is already initialized.\n");
+ return -EBUSY;
+ }
+
+ ret = strlen(buf);
+ if (ret > XBC_DATA_MAX - 1 || ret == 0) {
+ pr_err("Error: Config data is %s.\n",
+ ret ? "too big" : "empty");
+ return -ERANGE;
+ }
+
+ xbc_data = buf;
+ xbc_data_size = ret + 1;
+ last_parent = NULL;
+
+ p = buf;
+ do {
+ q = strpbrk(p, "{}=;\n#");
+ if (!q) {
+ p = skip_spaces(p);
+ if (*p != '\0')
+ ret = xbc_parse_error("No delimiter", p);
+ break;
+ }
+
+ c = *q;
+ *q++ = '\0';
+ switch (c) {
+ case '=':
+ ret = xbc_parse_kv(&p, q);
+ break;
+ case '{':
+ ret = xbc_open_brace(&p, q);
+ break;
+ case '#':
+ q = skip_comment(q);
+ /* fall through */
+ case ';':
+ case '\n':
+ ret = xbc_parse_key(&p, q);
+ break;
+ case '}':
+ ret = xbc_close_brace(&p, q);
+ break;
+ }
+ } while (!ret);
+
+ if (!ret)
+ ret = xbc_verify_tree();
+
+ if (ret < 0)
+ xbc_destroy_all();
+ else
+ ret = xbc_node_num;
+
+ return ret;
+}
+
+/**
+ * xbc_debug_dump() - Dump current XBC node list
+ *
+ * Dump the current XBC node list on printk buffer for debug.
+ */
+void __init xbc_debug_dump(void)
+{
+ int i;
+
+ for (i = 0; i < xbc_node_num; i++) {
+ pr_debug("[%d] %s (%s) .next=%d, .child=%d .parent=%d\n", i,
+ xbc_node_get_data(xbc_nodes + i),
+ xbc_node_is_value(xbc_nodes + i) ? "value" : "key",
+ xbc_nodes[i].next, xbc_nodes[i].child,
+ xbc_nodes[i].parent);
+ }
+}
diff --git a/tools/Makefile b/tools/Makefile
index 7e42f7b8bfa7..bd778812e915 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -28,6 +28,7 @@ help:
@echo ' pci - PCI tools'
@echo ' perf - Linux performance measurement and analysis tool'
@echo ' selftests - various kernel selftests'
+ @echo ' bootconfig - boot config tool'
@echo ' spi - spi tools'
@echo ' tmon - thermal monitoring and tuning tool'
@echo ' turbostat - Intel CPU idle stats and freq reporting tool'
@@ -63,7 +64,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
+cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -96,7 +97,7 @@ kvm_stat: FORCE
$(call descend,kvm/$@)
all: acpi cgroup cpupower gpio hv firewire liblockdep \
- perf selftests spi turbostat usb \
+ perf selftests bootconfig spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
pci debugging
@@ -107,7 +108,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
@@ -141,7 +142,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
+cgroup_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -176,7 +177,7 @@ build_clean:
$(call descend,build,clean)
clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
- perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
+ perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
diff --git a/tools/bootconfig/.gitignore b/tools/bootconfig/.gitignore
new file mode 100644
index 000000000000..e7644dfaa4a7
--- /dev/null
+++ b/tools/bootconfig/.gitignore
@@ -0,0 +1 @@
+bootconfig
diff --git a/tools/bootconfig/Makefile b/tools/bootconfig/Makefile
new file mode 100644
index 000000000000..a6146ac64458
--- /dev/null
+++ b/tools/bootconfig/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for bootconfig command
+
+bindir ?= /usr/bin
+
+HEADER = include/linux/bootconfig.h
+CFLAGS = -Wall -g -I./include
+
+PROGS = bootconfig
+
+all: $(PROGS)
+
+bootconfig: ../../lib/bootconfig.c main.c $(HEADER)
+ $(CC) $(filter %.c,$^) $(CFLAGS) -o $@
+
+install: $(PROGS)
+ install bootconfig $(DESTDIR)$(bindir)
+
+test: bootconfig
+ ./test-bootconfig.sh
+
+clean:
+ $(RM) -f *.o bootconfig
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
new file mode 100644
index 000000000000..078cbd2ba651
--- /dev/null
+++ b/tools/bootconfig/include/linux/bootconfig.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BOOTCONFIG_LINUX_BOOTCONFIG_H
+#define _BOOTCONFIG_LINUX_BOOTCONFIG_H
+
+#include "../../../../include/linux/bootconfig.h"
+
+#endif
diff --git a/tools/bootconfig/include/linux/bug.h b/tools/bootconfig/include/linux/bug.h
new file mode 100644
index 000000000000..7b65a389c0dd
--- /dev/null
+++ b/tools/bootconfig/include/linux/bug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_BUG_H
+#define _SKC_LINUX_BUG_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define WARN_ON(cond) \
+ ((cond) ? printf("Internal warning(%s:%d, %s): %s\n", \
+ __FILE__, __LINE__, __func__, #cond) : 0)
+
+#endif
diff --git a/tools/bootconfig/include/linux/ctype.h b/tools/bootconfig/include/linux/ctype.h
new file mode 100644
index 000000000000..c56ecc136448
--- /dev/null
+++ b/tools/bootconfig/include/linux/ctype.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_CTYPE_H
+#define _SKC_LINUX_CTYPE_H
+
+#include <ctype.h>
+
+#endif
diff --git a/tools/bootconfig/include/linux/errno.h b/tools/bootconfig/include/linux/errno.h
new file mode 100644
index 000000000000..5d9f91ec2fda
--- /dev/null
+++ b/tools/bootconfig/include/linux/errno.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_ERRNO_H
+#define _SKC_LINUX_ERRNO_H
+
+#include <asm/errno.h>
+
+#endif
diff --git a/tools/bootconfig/include/linux/kernel.h b/tools/bootconfig/include/linux/kernel.h
new file mode 100644
index 000000000000..2d93320aa374
--- /dev/null
+++ b/tools/bootconfig/include/linux/kernel.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_KERNEL_H
+#define _SKC_LINUX_KERNEL_H
+
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include <linux/printk.h>
+
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+#define unlikely(cond) (cond)
+
+#define __init
+#define __initdata
+
+#endif
diff --git a/tools/bootconfig/include/linux/printk.h b/tools/bootconfig/include/linux/printk.h
new file mode 100644
index 000000000000..017bcd6912a5
--- /dev/null
+++ b/tools/bootconfig/include/linux/printk.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_PRINTK_H
+#define _SKC_LINUX_PRINTK_H
+
+#include <stdio.h>
+
+/* controllable printf */
+extern int pr_output;
+#define printk(fmt, ...) \
+ (pr_output ? printf(fmt, __VA_ARGS__) : 0)
+
+#define pr_err printk
+#define pr_warn printk
+#define pr_info printk
+#define pr_debug printk
+
+#endif
diff --git a/tools/bootconfig/include/linux/string.h b/tools/bootconfig/include/linux/string.h
new file mode 100644
index 000000000000..8267af75153a
--- /dev/null
+++ b/tools/bootconfig/include/linux/string.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SKC_LINUX_STRING_H
+#define _SKC_LINUX_STRING_H
+
+#include <string.h>
+
+/* Copied from lib/string.c */
+static inline char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+
+static inline char *strim(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return skip_spaces(s);
+}
+
+#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
new file mode 100644
index 000000000000..47f488458328
--- /dev/null
+++ b/tools/bootconfig/main.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Boot config tool for initrd image
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include <linux/kernel.h>
+#include <linux/bootconfig.h>
+
+int pr_output = 1;
+
+static int xbc_show_array(struct xbc_node *node)
+{
+ const char *val;
+ int i = 0;
+
+ xbc_array_for_each_value(node, val) {
+ printf("\"%s\"%s", val, node->next ? ", " : ";\n");
+ i++;
+ }
+ return i;
+}
+
+static void xbc_show_compact_tree(void)
+{
+ struct xbc_node *node, *cnode;
+ int depth = 0, i;
+
+ node = xbc_root_node();
+ while (node && xbc_node_is_key(node)) {
+ for (i = 0; i < depth; i++)
+ printf("\t");
+ cnode = xbc_node_get_child(node);
+ while (cnode && xbc_node_is_key(cnode) && !cnode->next) {
+ printf("%s.", xbc_node_get_data(node));
+ node = cnode;
+ cnode = xbc_node_get_child(node);
+ }
+ if (cnode && xbc_node_is_key(cnode)) {
+ printf("%s {\n", xbc_node_get_data(node));
+ depth++;
+ node = cnode;
+ continue;
+ } else if (cnode && xbc_node_is_value(cnode)) {
+ printf("%s = ", xbc_node_get_data(node));
+ if (cnode->next)
+ xbc_show_array(cnode);
+ else
+ printf("\"%s\";\n", xbc_node_get_data(cnode));
+ } else {
+ printf("%s;\n", xbc_node_get_data(node));
+ }
+
+ if (node->next) {
+ node = xbc_node_get_next(node);
+ continue;
+ }
+ while (!node->next) {
+ node = xbc_node_get_parent(node);
+ if (!node)
+ return;
+ if (!xbc_node_get_child(node)->next)
+ continue;
+ depth--;
+ for (i = 0; i < depth; i++)
+ printf("\t");
+ printf("}\n");
+ }
+ node = xbc_node_get_next(node);
+ }
+}
+
+/* Simple real checksum */
+int checksum(unsigned char *buf, int len)
+{
+ int i, sum = 0;
+
+ for (i = 0; i < len; i++)
+ sum += buf[i];
+
+ return sum;
+}
+
+#define PAGE_SIZE 4096
+
+int load_xbc_fd(int fd, char **buf, int size)
+{
+ int ret;
+
+ *buf = malloc(size + 1);
+ if (!*buf)
+ return -ENOMEM;
+
+ ret = read(fd, *buf, size);
+ if (ret < 0)
+ return -errno;
+ (*buf)[size] = '\0';
+
+ return ret;
+}
+
+/* Return the read size or -errno */
+int load_xbc_file(const char *path, char **buf)
+{
+ struct stat stat;
+ int fd, ret;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+ ret = fstat(fd, &stat);
+ if (ret < 0)
+ return -errno;
+
+ ret = load_xbc_fd(fd, buf, stat.st_size);
+
+ close(fd);
+
+ return ret;
+}
+
+int load_xbc_from_initrd(int fd, char **buf)
+{
+ struct stat stat;
+ int ret;
+ u32 size = 0, csum = 0, rcsum;
+
+ ret = fstat(fd, &stat);
+ if (ret < 0)
+ return -errno;
+
+ if (stat.st_size < 8)
+ return 0;
+
+ if (lseek(fd, -8, SEEK_END) < 0) {
+ printf("Failed to lseek: %d\n", -errno);
+ return -errno;
+ }
+
+ if (read(fd, &size, sizeof(u32)) < 0)
+ return -errno;
+
+ if (read(fd, &csum, sizeof(u32)) < 0)
+ return -errno;
+
+ /* Wrong size, maybe no boot config here */
+ if (stat.st_size < size + 8)
+ return 0;
+
+ if (lseek(fd, stat.st_size - 8 - size, SEEK_SET) < 0) {
+ printf("Failed to lseek: %d\n", -errno);
+ return -errno;
+ }
+
+ ret = load_xbc_fd(fd, buf, size);
+ if (ret < 0)
+ return ret;
+
+ /* Wrong Checksum, maybe no boot config here */
+ rcsum = checksum((unsigned char *)*buf, size);
+ if (csum != rcsum) {
+ printf("checksum error: %d != %d\n", csum, rcsum);
+ return 0;
+ }
+
+ ret = xbc_init(*buf);
+ /* Wrong data, maybe no boot config here */
+ if (ret < 0)
+ return 0;
+
+ return size;
+}
+
+int show_xbc(const char *path)
+{
+ int ret, fd;
+ char *buf = NULL;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ printf("Failed to open initrd %s: %d\n", path, fd);
+ return -errno;
+ }
+
+ ret = load_xbc_from_initrd(fd, &buf);
+ if (ret < 0)
+ printf("Failed to load a boot config from initrd: %d\n", ret);
+ else
+ xbc_show_compact_tree();
+
+ close(fd);
+ free(buf);
+
+ return ret;
+}
+
+int delete_xbc(const char *path)
+{
+ struct stat stat;
+ int ret = 0, fd, size;
+ char *buf = NULL;
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ printf("Failed to open initrd %s: %d\n", path, fd);
+ return -errno;
+ }
+
+ /*
+ * Suppress error messages in xbc_init() because it can be just a
+ * data which concidentally matches the size and checksum footer.
+ */
+ pr_output = 0;
+ size = load_xbc_from_initrd(fd, &buf);
+ pr_output = 1;
+ if (size < 0) {
+ ret = size;
+ printf("Failed to load a boot config from initrd: %d\n", ret);
+ } else if (size > 0) {
+ ret = fstat(fd, &stat);
+ if (!ret)
+ ret = ftruncate(fd, stat.st_size - size - 8);
+ if (ret)
+ ret = -errno;
+ } /* Ignore if there is no boot config in initrd */
+
+ close(fd);
+ free(buf);
+
+ return ret;
+}
+
+int apply_xbc(const char *path, const char *xbc_path)
+{
+ u32 size, csum;
+ char *buf, *data;
+ int ret, fd;
+
+ ret = load_xbc_file(xbc_path, &buf);
+ if (ret < 0) {
+ printf("Failed to load %s : %d\n", xbc_path, ret);
+ return ret;
+ }
+ size = strlen(buf) + 1;
+ csum = checksum((unsigned char *)buf, size);
+
+ /* Prepare xbc_path data */
+ data = malloc(size + 8);
+ if (!data)
+ return -ENOMEM;
+ strcpy(data, buf);
+ *(u32 *)(data + size) = size;
+ *(u32 *)(data + size + 4) = csum;
+
+ /* Check the data format */
+ ret = xbc_init(buf);
+ if (ret < 0) {
+ printf("Failed to parse %s: %d\n", xbc_path, ret);
+ free(data);
+ free(buf);
+ return ret;
+ }
+ printf("Apply %s to %s\n", xbc_path, path);
+ printf("\tNumber of nodes: %d\n", ret);
+ printf("\tSize: %u bytes\n", (unsigned int)size);
+ printf("\tChecksum: %d\n", (unsigned int)csum);
+
+ /* TODO: Check the options by schema */
+ xbc_destroy_all();
+ free(buf);
+
+ /* Remove old boot config if exists */
+ ret = delete_xbc(path);
+ if (ret < 0) {
+ printf("Failed to delete previous boot config: %d\n", ret);
+ return ret;
+ }
+
+ /* Apply new one */
+ fd = open(path, O_RDWR | O_APPEND);
+ if (fd < 0) {
+ printf("Failed to open %s: %d\n", path, fd);
+ return fd;
+ }
+ /* TODO: Ensure the @path is initramfs/initrd image */
+ ret = write(fd, data, size + 8);
+ if (ret < 0) {
+ printf("Failed to apply a boot config: %d\n", ret);
+ return ret;
+ }
+ close(fd);
+ free(data);
+
+ return 0;
+}
+
+int usage(void)
+{
+ printf("Usage: bootconfig [OPTIONS] <INITRD>\n"
+ " Apply, delete or show boot config to initrd.\n"
+ " Options:\n"
+ " -a <config>: Apply boot config to initrd\n"
+ " -d : Delete boot config file from initrd\n\n"
+ " If no option is given, show current applied boot config.\n");
+ return -1;
+}
+
+int main(int argc, char **argv)
+{
+ char *path = NULL;
+ char *apply = NULL;
+ bool delete = false;
+ int opt;
+
+ while ((opt = getopt(argc, argv, "hda:")) != -1) {
+ switch (opt) {
+ case 'd':
+ delete = true;
+ break;
+ case 'a':
+ apply = optarg;
+ break;
+ case 'h':
+ default:
+ return usage();
+ }
+ }
+
+ if (apply && delete) {
+ printf("Error: You can not specify both -a and -d at once.\n");
+ return usage();
+ }
+
+ if (optind >= argc) {
+ printf("Error: No initrd is specified.\n");
+ return usage();
+ }
+
+ path = argv[optind];
+
+ if (apply)
+ return apply_xbc(path, apply);
+ else if (delete)
+ return delete_xbc(path);
+
+ return show_xbc(path);
+}
diff --git a/tools/bootconfig/samples/bad-array-space-comment.bconf b/tools/bootconfig/samples/bad-array-space-comment.bconf
new file mode 100644
index 000000000000..fda19e47d0db
--- /dev/null
+++ b/tools/bootconfig/samples/bad-array-space-comment.bconf
@@ -0,0 +1,5 @@
+key = # comment
+ "value1", # comment1
+ "value2" # comment2
+,
+ "value3"
diff --git a/tools/bootconfig/samples/bad-array.bconf b/tools/bootconfig/samples/bad-array.bconf
new file mode 100644
index 000000000000..0174af019d7f
--- /dev/null
+++ b/tools/bootconfig/samples/bad-array.bconf
@@ -0,0 +1,2 @@
+# Array must be comma separated.
+key = "value1" "value2"
diff --git a/tools/bootconfig/samples/bad-dotword.bconf b/tools/bootconfig/samples/bad-dotword.bconf
new file mode 100644
index 000000000000..ba5557b2bdd3
--- /dev/null
+++ b/tools/bootconfig/samples/bad-dotword.bconf
@@ -0,0 +1,4 @@
+# do not start keyword with .
+key {
+ .word = 1
+}
diff --git a/tools/bootconfig/samples/bad-empty.bconf b/tools/bootconfig/samples/bad-empty.bconf
new file mode 100644
index 000000000000..2ba3f6cc6a47
--- /dev/null
+++ b/tools/bootconfig/samples/bad-empty.bconf
@@ -0,0 +1 @@
+# Wrong boot config: comment only
diff --git a/tools/bootconfig/samples/bad-keyerror.bconf b/tools/bootconfig/samples/bad-keyerror.bconf
new file mode 100644
index 000000000000..b6e247a099d0
--- /dev/null
+++ b/tools/bootconfig/samples/bad-keyerror.bconf
@@ -0,0 +1,2 @@
+# key word can not contain ","
+key,word
diff --git a/tools/bootconfig/samples/bad-longkey.bconf b/tools/bootconfig/samples/bad-longkey.bconf
new file mode 100644
index 000000000000..eb97369f91a8
--- /dev/null
+++ b/tools/bootconfig/samples/bad-longkey.bconf
@@ -0,0 +1 @@
+key_word_is_too_long01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
diff --git a/tools/bootconfig/samples/bad-manywords.bconf b/tools/bootconfig/samples/bad-manywords.bconf
new file mode 100644
index 000000000000..8db81967c48a
--- /dev/null
+++ b/tools/bootconfig/samples/bad-manywords.bconf
@@ -0,0 +1 @@
+key1.is2.too3.long4.5.6.7.8.9.10.11.12.13.14.15.16.17
diff --git a/tools/bootconfig/samples/bad-no-keyword.bconf b/tools/bootconfig/samples/bad-no-keyword.bconf
new file mode 100644
index 000000000000..eff26808566c
--- /dev/null
+++ b/tools/bootconfig/samples/bad-no-keyword.bconf
@@ -0,0 +1,2 @@
+# No keyword
+{}
diff --git a/tools/bootconfig/samples/bad-nonprintable.bconf b/tools/bootconfig/samples/bad-nonprintable.bconf
new file mode 100644
index 000000000000..3bb1a2864e52
--- /dev/null
+++ b/tools/bootconfig/samples/bad-nonprintable.bconf
@@ -0,0 +1,2 @@
+# Non printable
+key = ""
diff --git a/tools/bootconfig/samples/bad-spaceword.bconf b/tools/bootconfig/samples/bad-spaceword.bconf
new file mode 100644
index 000000000000..90c703d32a9a
--- /dev/null
+++ b/tools/bootconfig/samples/bad-spaceword.bconf
@@ -0,0 +1,2 @@
+# No space between words
+key . word
diff --git a/tools/bootconfig/samples/bad-tree.bconf b/tools/bootconfig/samples/bad-tree.bconf
new file mode 100644
index 000000000000..5a6038edcd55
--- /dev/null
+++ b/tools/bootconfig/samples/bad-tree.bconf
@@ -0,0 +1,5 @@
+# brace is not closing
+tree {
+ node {
+ value = 1
+}
diff --git a/tools/bootconfig/samples/bad-value.bconf b/tools/bootconfig/samples/bad-value.bconf
new file mode 100644
index 000000000000..a1217fed86cc
--- /dev/null
+++ b/tools/bootconfig/samples/bad-value.bconf
@@ -0,0 +1,3 @@
+# Quotes error
+value = "data
+
diff --git a/tools/bootconfig/samples/escaped.bconf b/tools/bootconfig/samples/escaped.bconf
new file mode 100644
index 000000000000..9f72043b3216
--- /dev/null
+++ b/tools/bootconfig/samples/escaped.bconf
@@ -0,0 +1,3 @@
+key1 = "A\B\C"
+key2 = '\'\''
+key3 = "\\"
diff --git a/tools/bootconfig/samples/good-array-space-comment.bconf b/tools/bootconfig/samples/good-array-space-comment.bconf
new file mode 100644
index 000000000000..45b938dc0695
--- /dev/null
+++ b/tools/bootconfig/samples/good-array-space-comment.bconf
@@ -0,0 +1,4 @@
+key = # comment
+ "value1", # comment1
+ "value2" , # comment2
+ "value3"
diff --git a/tools/bootconfig/samples/good-comment-after-value.bconf b/tools/bootconfig/samples/good-comment-after-value.bconf
new file mode 100644
index 000000000000..0d92a853df72
--- /dev/null
+++ b/tools/bootconfig/samples/good-comment-after-value.bconf
@@ -0,0 +1 @@
+key = "value" # comment
diff --git a/tools/bootconfig/samples/good-printables.bconf b/tools/bootconfig/samples/good-printables.bconf
new file mode 100644
index 000000000000..ebb985a66ed8
--- /dev/null
+++ b/tools/bootconfig/samples/good-printables.bconf
@@ -0,0 +1,2 @@
+key = "
+ !#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
diff --git a/tools/bootconfig/samples/good-simple.bconf b/tools/bootconfig/samples/good-simple.bconf
new file mode 100644
index 000000000000..37dd6d21c176
--- /dev/null
+++ b/tools/bootconfig/samples/good-simple.bconf
@@ -0,0 +1,11 @@
+# A good simple bootconfig
+
+key.word1 = 1
+key.word2=2
+key.word3 = 3;
+
+key {
+word4 = 4 }
+
+key { word5 = 5; word6 = 6 }
+
diff --git a/tools/bootconfig/samples/good-single.bconf b/tools/bootconfig/samples/good-single.bconf
new file mode 100644
index 000000000000..98e55ad8b711
--- /dev/null
+++ b/tools/bootconfig/samples/good-single.bconf
@@ -0,0 +1,4 @@
+# single key style
+key = 1
+key2 = 2
+key3 = "alpha", "beta"
diff --git a/tools/bootconfig/samples/good-space-after-value.bconf b/tools/bootconfig/samples/good-space-after-value.bconf
new file mode 100644
index 000000000000..56c15cbc5741
--- /dev/null
+++ b/tools/bootconfig/samples/good-space-after-value.bconf
@@ -0,0 +1 @@
+key = "value"
diff --git a/tools/bootconfig/samples/good-tree.bconf b/tools/bootconfig/samples/good-tree.bconf
new file mode 100644
index 000000000000..f2ddefc8b52a
--- /dev/null
+++ b/tools/bootconfig/samples/good-tree.bconf
@@ -0,0 +1,12 @@
+key {
+ word {
+ tree {
+ value = "0"}
+ }
+ word2 {
+ tree {
+ value = 1,2 }
+ }
+}
+other.tree {
+ value = 2; value2 = 3;}
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
new file mode 100755
index 000000000000..87725e8723f8
--- /dev/null
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -0,0 +1,105 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+echo "Boot config test script"
+
+BOOTCONF=./bootconfig
+INITRD=`mktemp initrd-XXXX`
+TEMPCONF=`mktemp temp-XXXX.bconf`
+NG=0
+
+cleanup() {
+ rm -f $INITRD $TEMPCONF
+ exit $NG
+}
+
+trap cleanup EXIT TERM
+
+NO=1
+
+xpass() { # pass test command
+ echo "test case $NO ($3)... "
+ if ! ($@ && echo "\t\t[OK]"); then
+ echo "\t\t[NG]"; NG=$((NG + 1))
+ fi
+ NO=$((NO + 1))
+}
+
+xfail() { # fail test command
+ echo "test case $NO ($3)... "
+ if ! (! $@ && echo "\t\t[OK]"); then
+ echo "\t\t[NG]"; NG=$((NG + 1))
+ fi
+ NO=$((NO + 1))
+}
+
+echo "Basic command test"
+xpass $BOOTCONF $INITRD
+
+echo "Delete command should success without bootconfig"
+xpass $BOOTCONF -d $INITRD
+
+dd if=/dev/zero of=$INITRD bs=4096 count=1
+echo "key = value;" > $TEMPCONF
+bconf_size=$(stat -c %s $TEMPCONF)
+initrd_size=$(stat -c %s $INITRD)
+
+echo "Apply command test"
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+new_size=$(stat -c %s $INITRD)
+
+echo "File size check"
+xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9)
+
+echo "Apply command repeat test"
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "File size check"
+xpass test $new_size -eq $(stat -c %s $INITRD)
+
+echo "Delete command check"
+xpass $BOOTCONF -d $INITRD
+
+echo "File size check"
+new_size=$(stat -c %s $INITRD)
+xpass test $new_size -eq $initrd_size
+
+echo "Max node number check"
+
+echo -n > $TEMPCONF
+for i in `seq 1 1024` ; do
+ echo "node$i" >> $TEMPCONF
+done
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "badnode" >> $TEMPCONF
+xfail $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "Max filesize check"
+
+# Max size is 32767 (including terminal byte)
+echo -n "data = \"" > $TEMPCONF
+dd if=/dev/urandom bs=768 count=32 | base64 -w0 >> $TEMPCONF
+echo "\"" >> $TEMPCONF
+xfail $BOOTCONF -a $TEMPCONF $INITRD
+
+truncate -s 32764 $TEMPCONF
+echo "\"" >> $TEMPCONF # add 2 bytes + terminal ('\"\n\0')
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+
+echo "=== expected failure cases ==="
+for i in samples/bad-* ; do
+ xfail $BOOTCONF -a $i $INITRD
+done
+
+echo "=== expected success cases ==="
+for i in samples/good-* ; do
+ xpass $BOOTCONF -a $i $INITRD
+done
+
+echo
+if [ $NG -eq 0 ]; then
+ echo "All tests passed"
+else
+ echo "$NG tests failed"
+fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc
new file mode 100644
index 000000000000..d44087a2f3d1
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test histogram parser errors
+
+if [ ! -f set_event -o ! -d events/kmem ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f events/kmem/kmalloc/trigger ]; then
+ echo "event trigger is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f events/kmem/kmalloc/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
+[ -f error_log ] || exit_unsupported
+
+check_error() { # command-with-error-pos-by-^
+ ftrace_errlog_check 'hist:kmem:kmalloc' "$1" 'events/kmem/kmalloc/trigger'
+}
+
+check_error 'hist:keys=common_pid:vals=bytes_req:sort=common_pid,^junk' # INVALID_SORT_FIELD
+check_error 'hist:keys=common_pid:vals=bytes_req:^sort=' # EMPTY_ASSIGNMENT
+check_error 'hist:keys=common_pid:vals=bytes_req:^sort=common_pid,' # EMPTY_SORT_FIELD
+check_error 'hist:keys=common_pid:vals=bytes_req:sort=common_pid.^junk' # INVALID_SORT_MODIFIER
+check_error 'hist:keys=common_pid:vals=bytes_req,bytes_alloc:^sort=common_pid,bytes_req,bytes_alloc' # TOO_MANY_SORT_FIELDS
+
+exit 0