summaryrefslogtreecommitdiffstats
path: root/samples/bpf/lathist_kern.c
diff options
context:
space:
mode:
authorDaniel T. Lee <danieltimlee@gmail.com>2020-08-23 10:53:33 +0200
committerAlexei Starovoitov <ast@kernel.org>2020-08-25 05:59:35 +0200
commit3677d0a13171bb1dc8db0af84d48dea14a899962 (patch)
tree3de96b317e44d7e1eb175a4be093101f317e88ff /samples/bpf/lathist_kern.c
parentsamples: bpf: Cleanup bpf_load.o from Makefile (diff)
downloadlinux-3677d0a13171bb1dc8db0af84d48dea14a899962.tar.xz
linux-3677d0a13171bb1dc8db0af84d48dea14a899962.zip
samples: bpf: Refactor kprobe tracing programs with libbpf
For the problem of increasing fragmentation of the bpf loader programs, instead of using bpf_loader.o, which is used in samples/bpf, this commit refactors the existing kprobe tracing programs with libbbpf bpf loader. - For kprobe events pointing to system calls, the SYSCALL() macro in trace_common.h was used. - Adding a kprobe event and attaching a bpf program to it was done through bpf_program_attach(). - Instead of using the existing BPF MAP definition, MAP definition has been refactored with the new BTF-defined MAP format. Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200823085334.9413-3-danieltimlee@gmail.com
Diffstat (limited to 'samples/bpf/lathist_kern.c')
-rw-r--r--samples/bpf/lathist_kern.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/samples/bpf/lathist_kern.c b/samples/bpf/lathist_kern.c
index ca9c2e4e69aa..4adfcbbe6ef4 100644
--- a/samples/bpf/lathist_kern.c
+++ b/samples/bpf/lathist_kern.c
@@ -18,12 +18,12 @@
* trace_preempt_[on|off] tracepoints hooks is not supported.
*/
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u64),
- .max_entries = MAX_CPU,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU);
+} my_map SEC(".maps");
SEC("kprobe/trace_preempt_off")
int bpf_prog1(struct pt_regs *ctx)
@@ -61,12 +61,12 @@ static unsigned int log2l(unsigned long v)
return log2(v);
}
-struct bpf_map_def SEC("maps") my_lat = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(long),
- .max_entries = MAX_CPU * MAX_ENTRIES,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, long);
+ __uint(max_entries, MAX_CPU * MAX_ENTRIES);
+} my_lat SEC(".maps");
SEC("kprobe/trace_preempt_on")
int bpf_prog2(struct pt_regs *ctx)