summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2016-04-07 03:43:28 +0200
committerDavid S. Miller <davem@davemloft.net>2016-04-08 03:04:26 +0200
commit32bbe0078afe86a8bf4c67c6b3477781b15e94dc (patch)
tree8c5290f51108de3a2c98cb7171942fb9d5e36ab2 /kernel/trace/trace_events.c
parentbpf: support bpf_get_stackid() and bpf_perf_event_output() in tracepoint prog... (diff)
downloadlinux-32bbe0078afe86a8bf4c67c6b3477781b15e94dc.tar.xz
linux-32bbe0078afe86a8bf4c67c6b3477781b15e94dc.zip
bpf: sanitize bpf tracepoint access
during bpf program loading remember the last byte of ctx access and at the time of attaching the program to tracepoint check that the program doesn't access bytes beyond defined in tracepoint fields This also disallows access to __dynamic_array fields, but can be relaxed in the future. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 05ddc0820771..ced963049e0a 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -204,6 +204,24 @@ static void trace_destroy_fields(struct trace_event_call *call)
}
}
+/*
+ * run-time version of trace_event_get_offsets_<call>() that returns the last
+ * accessible offset of trace fields excluding __dynamic_array bytes
+ */
+int trace_event_get_offsets(struct trace_event_call *call)
+{
+ struct ftrace_event_field *tail;
+ struct list_head *head;
+
+ head = trace_get_fields(call);
+ /*
+ * head->next points to the last field with the largest offset,
+ * since it was added last by trace_define_field()
+ */
+ tail = list_first_entry(head, struct ftrace_event_field, link);
+ return tail->offset + tail->size;
+}
+
int trace_event_raw_init(struct trace_event_call *call)
{
int id;