diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-07-14 04:13:24 +0200 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-07-14 04:13:24 +0200 |
commit | d2afa89f6690616b9fb55f3f74e6d2927589e43a (patch) | |
tree | 48b5b9b08b3b0049f6674f29e99e0b3eafe8a3f7 /tools/lib | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff) | |
parent | selftests/bpf: Add selftest for PTR_UNTRUSTED (diff) | |
download | linux-d2afa89f6690616b9fb55f3f74e6d2927589e43a.tar.xz linux-d2afa89f6690616b9fb55f3f74e6d2927589e43a.zip |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says:
====================
pull-request: bpf-next 2023-07-13
We've added 67 non-merge commits during the last 15 day(s) which contain
a total of 106 files changed, 4444 insertions(+), 619 deletions(-).
The main changes are:
1) Fix bpftool build in presence of stale vmlinux.h,
from Alexander Lobakin.
2) Introduce bpf_me_mcache_free_rcu() and fix OOM under stress,
from Alexei Starovoitov.
3) Teach verifier actual bounds of bpf_get_smp_processor_id()
and fix perf+libbpf issue related to custom section handling,
from Andrii Nakryiko.
4) Introduce bpf map element count, from Anton Protopopov.
5) Check skb ownership against full socket, from Kui-Feng Lee.
6) Support for up to 12 arguments in BPF trampoline, from Menglong Dong.
7) Export rcu_request_urgent_qs_task, from Paul E. McKenney.
8) Fix BTF walking of unions, from Yafang Shao.
9) Extend link_info for kprobe_multi and perf_event links,
from Yafang Shao.
* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (67 commits)
selftests/bpf: Add selftest for PTR_UNTRUSTED
bpf: Fix an error in verifying a field in a union
selftests/bpf: Add selftests for nested_trust
bpf: Fix an error around PTR_UNTRUSTED
selftests/bpf: add testcase for TRACING with 6+ arguments
bpf, x86: allow function arguments up to 12 for TRACING
bpf, x86: save/restore regs with BPF_DW size
bpftool: Use "fallthrough;" keyword instead of comments
bpf: Add object leak check.
bpf: Convert bpf_cpumask to bpf_mem_cache_free_rcu.
bpf: Introduce bpf_mem_free_rcu() similar to kfree_rcu().
selftests/bpf: Improve test coverage of bpf_mem_alloc.
rcu: Export rcu_request_urgent_qs_task()
bpf: Allow reuse from waiting_for_gp_ttrace list.
bpf: Add a hint to allocated objects.
bpf: Change bpf_mem_cache draining process.
bpf: Further refactor alloc_bulk().
bpf: Factor out inc/dec of active flag into helpers.
bpf: Refactor alloc_bulk().
bpf: Let free_all() return the number of freed elements.
...
====================
Link: https://lore.kernel.org/r/20230714020910.80794-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/lib')
-rw-r--r-- | tools/lib/bpf/bpf.c | 8 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.h | 6 | ||||
-rw-r--r-- | tools/lib/bpf/hashmap.h | 10 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.c | 258 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.h | 15 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.map | 1 | ||||
-rw-r--r-- | tools/lib/bpf/usdt.c | 5 |
7 files changed, 268 insertions, 35 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index ed86b37d8024..3b0da19715e1 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -741,6 +741,14 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, tracing)) return libbpf_err(-EINVAL); break; + case BPF_NETFILTER: + attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0); + attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0); + attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0); + attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0); + if (!OPTS_ZEROED(opts, netfilter)) + return libbpf_err(-EINVAL); + break; default: if (!OPTS_ZEROED(opts, flags)) return libbpf_err(-EINVAL); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 9aa0ee473754..c676295ab9bf 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -349,6 +349,12 @@ struct bpf_link_create_opts { struct { __u64 cookie; } tracing; + struct { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + } netfilter; }; size_t :0; }; diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h index 0a5bf1937a7c..c12f8320e668 100644 --- a/tools/lib/bpf/hashmap.h +++ b/tools/lib/bpf/hashmap.h @@ -80,16 +80,6 @@ struct hashmap { size_t sz; }; -#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \ - .hash_fn = (hash_fn), \ - .equal_fn = (equal_fn), \ - .ctx = (ctx), \ - .buckets = NULL, \ - .cap = 0, \ - .cap_bits = 0, \ - .sz = 0, \ -} - void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, hashmap_equal_fn equal_fn, void *ctx); struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 214f828ece6b..63311a73c16d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5471,6 +5471,10 @@ static int load_module_btfs(struct bpf_object *obj) err = bpf_btf_get_next_id(id, &id); if (err && errno == ENOENT) return 0; + if (err && errno == EPERM) { + pr_debug("skipping module BTFs loading, missing privileges\n"); + return 0; + } if (err) { err = -errno; pr_warn("failed to iterate BTF objects: %d\n", err); @@ -6157,7 +6161,11 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra if (main_prog == subprog) return 0; relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); - if (!relos) + /* if new count is zero, reallocarray can return a valid NULL result; + * in this case the previous pointer will be freed, so we *have to* + * reassign old pointer to the new value (even if it's NULL) + */ + if (!relos && new_cnt) return -ENOMEM; if (subprog->nr_reloc) memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, @@ -8528,7 +8536,8 @@ int bpf_program__set_insns(struct bpf_program *prog, return -EBUSY; insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns)); - if (!insns) { + /* NULL is a valid return from reallocarray if the new count is zero */ + if (!insns && new_insn_cnt) { pr_warn("prog '%s': failed to realloc prog code\n", prog->name); return -ENOMEM; } @@ -8558,13 +8567,31 @@ enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) return prog->type; } +static size_t custom_sec_def_cnt; +static struct bpf_sec_def *custom_sec_defs; +static struct bpf_sec_def custom_fallback_def; +static bool has_custom_fallback_def; +static int last_custom_sec_def_handler_id; + int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { if (prog->obj->loaded) return libbpf_err(-EBUSY); + /* if type is not changed, do nothing */ + if (prog->type == type) + return 0; + prog->type = type; - prog->sec_def = NULL; + + /* If a program type was changed, we need to reset associated SEC() + * handler, as it will be invalid now. The only exception is a generic + * fallback handler, which by definition is program type-agnostic and + * is a catch-all custom handler, optionally set by the application, + * so should be able to handle any type of BPF program. + */ + if (prog->sec_def != &custom_fallback_def) + prog->sec_def = NULL; return 0; } @@ -8740,13 +8767,6 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE), }; -static size_t custom_sec_def_cnt; -static struct bpf_sec_def *custom_sec_defs; -static struct bpf_sec_def custom_fallback_def; -static bool has_custom_fallback_def; - -static int last_custom_sec_def_handler_id; - int libbpf_register_prog_handler(const char *sec, enum bpf_prog_type prog_type, enum bpf_attach_type exp_attach_type, @@ -8826,7 +8846,11 @@ int libbpf_unregister_prog_handler(int handler_id) /* try to shrink the array, but it's ok if we couldn't */ sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); - if (sec_defs) + /* if new count is zero, reallocarray can return a valid NULL result; + * in this case the previous pointer will be freed, so we *have to* + * reassign old pointer to the new value (even if it's NULL) + */ + if (sec_defs || custom_sec_def_cnt == 0) custom_sec_defs = sec_defs; return 0; @@ -10224,6 +10248,18 @@ static const char *tracefs_uprobe_events(void) return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events"; } +static const char *tracefs_available_filter_functions(void) +{ + return use_debugfs() ? DEBUGFS"/available_filter_functions" + : TRACEFS"/available_filter_functions"; +} + +static const char *tracefs_available_filter_functions_addrs(void) +{ + return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs" + : TRACEFS"/available_filter_functions_addrs"; +} + static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, const char *kfunc_name, size_t offset) { @@ -10539,25 +10575,158 @@ struct kprobe_multi_resolve { size_t cnt; }; -static int -resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type, - const char *sym_name, void *ctx) +struct avail_kallsyms_data { + char **syms; + size_t cnt; + struct kprobe_multi_resolve *res; +}; + +static int avail_func_cmp(const void *a, const void *b) { - struct kprobe_multi_resolve *res = ctx; + return strcmp(*(const char **)a, *(const char **)b); +} + +static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct avail_kallsyms_data *data = ctx; + struct kprobe_multi_resolve *res = data->res; int err; - if (!glob_match(sym_name, res->pattern)) + if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp)) return 0; - err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long), - res->cnt + 1); + err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1); if (err) return err; - res->addrs[res->cnt++] = (unsigned long) sym_addr; + res->addrs[res->cnt++] = (unsigned long)sym_addr; return 0; } +static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) +{ + const char *available_functions_file = tracefs_available_filter_functions(); + struct avail_kallsyms_data data; + char sym_name[500]; + FILE *f; + int err = 0, ret, i; + char **syms = NULL; + size_t cap = 0, cnt = 0; + + f = fopen(available_functions_file, "re"); + if (!f) { + err = -errno; + pr_warn("failed to open %s: %d\n", available_functions_file, err); + return err; + } + + while (true) { + char *name; + + ret = fscanf(f, "%499s%*[^\n]\n", sym_name); + if (ret == EOF && feof(f)) + break; + + if (ret != 1) { + pr_warn("failed to parse available_filter_functions entry: %d\n", ret); + err = -EINVAL; + goto cleanup; + } + + if (!glob_match(sym_name, res->pattern)) + continue; + + err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1); + if (err) + goto cleanup; + + name = strdup(sym_name); + if (!name) { + err = -errno; + goto cleanup; + } + + syms[cnt++] = name; + } + + /* no entries found, bail out */ + if (cnt == 0) { + err = -ENOENT; + goto cleanup; + } + + /* sort available functions */ + qsort(syms, cnt, sizeof(*syms), avail_func_cmp); + + data.syms = syms; + data.res = res; + data.cnt = cnt; + libbpf_kallsyms_parse(avail_kallsyms_cb, &data); + + if (res->cnt == 0) + err = -ENOENT; + +cleanup: + for (i = 0; i < cnt; i++) + free((char *)syms[i]); + free(syms); + + fclose(f); + return err; +} + +static bool has_available_filter_functions_addrs(void) +{ + return access(tracefs_available_filter_functions_addrs(), R_OK) != -1; +} + +static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) +{ + const char *available_path = tracefs_available_filter_functions_addrs(); + char sym_name[500]; + FILE *f; + int ret, err = 0; + unsigned long long sym_addr; + + f = fopen(available_path, "re"); + if (!f) { + err = -errno; + pr_warn("failed to open %s: %d\n", available_path, err); + return err; + } + + while (true) { + ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name); + if (ret == EOF && feof(f)) + break; + + if (ret != 2) { + pr_warn("failed to parse available_filter_functions_addrs entry: %d\n", + ret); + err = -EINVAL; + goto cleanup; + } + + if (!glob_match(sym_name, res->pattern)) + continue; + + err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, + sizeof(*res->addrs), res->cnt + 1); + if (err) + goto cleanup; + + res->addrs[res->cnt++] = (unsigned long)sym_addr; + } + + if (res->cnt == 0) + err = -ENOENT; + +cleanup: + fclose(f); + return err; +} + struct bpf_link * bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, @@ -10594,13 +10763,12 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, return libbpf_err_ptr(-EINVAL); if (pattern) { - err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res); + if (has_available_filter_functions_addrs()) + err = libbpf_available_kprobes_parse(&res); + else + err = libbpf_available_kallsyms_parse(&res); if (err) goto error; - if (!res.cnt) { - err = -ENOENT; - goto error; - } addrs = res.addrs; cnt = res.cnt; } @@ -11811,6 +11979,48 @@ static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_l return libbpf_get_error(*link); } +struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, + const struct bpf_netfilter_opts *opts) +{ + LIBBPF_OPTS(bpf_link_create_opts, lopts); + struct bpf_link *link; + int prog_fd, link_fd; + + if (!OPTS_VALID(opts, bpf_netfilter_opts)) + return libbpf_err_ptr(-EINVAL); + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-ENOMEM); + + link->detach = &bpf_link__detach_fd; + + lopts.netfilter.pf = OPTS_GET(opts, pf, 0); + lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0); + lopts.netfilter.priority = OPTS_GET(opts, priority, 0); + lopts.netfilter.flags = OPTS_GET(opts, flags, 0); + + link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); + if (link_fd < 0) { + char errmsg[STRERR_BUFSIZE]; + + link_fd = -errno; + free(link); + pr_warn("prog '%s': failed to attach to netfilter: %s\n", + prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(link_fd); + } + link->fd = link_fd; + + return link; +} + struct bpf_link *bpf_program__attach(const struct bpf_program *prog) { struct bpf_link *link = NULL; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 754da73c643b..10642ad69d76 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -718,6 +718,21 @@ LIBBPF_API struct bpf_link * bpf_program__attach_freplace(const struct bpf_program *prog, int target_fd, const char *attach_func_name); +struct bpf_netfilter_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; +}; +#define bpf_netfilter_opts__last_field flags + +LIBBPF_API struct bpf_link * +bpf_program__attach_netfilter(const struct bpf_program *prog, + const struct bpf_netfilter_opts *opts); + struct bpf_map; LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 7521a2fb7626..d9ec4407befa 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -395,4 +395,5 @@ LIBBPF_1.2.0 { LIBBPF_1.3.0 { global: bpf_obj_pin_opts; + bpf_program__attach_netfilter; } LIBBPF_1.2.0; diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index f1a141555f08..37455d00b239 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -852,8 +852,11 @@ static int bpf_link_usdt_detach(struct bpf_link *link) * system is so exhausted on memory, it's the least of user's * concerns, probably. * So just do our best here to return those IDs to usdt_manager. + * Another edge case when we can legitimately get NULL is when + * new_cnt is zero, which can happen in some edge cases, so we + * need to be careful about that. */ - if (new_free_ids) { + if (new_free_ids || new_cnt == 0) { memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids, usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids)); man->free_spec_ids = new_free_ids; |