diff options
author | Jakub Kicinski <jakub.kicinski@netronome.com> | 2019-08-14 01:24:57 +0200 |
---|---|---|
committer | Jakub Kicinski <jakub.kicinski@netronome.com> | 2019-08-14 01:24:57 +0200 |
commit | 708852dcac84d2b923f2e8c1327f6006f612416a (patch) | |
tree | d140423180b83750ad1eb0095cb80ec5342716d4 /samples | |
parent | net: hns3: Make hclge_func_reset_sync_vf static (diff) | |
parent | Merge branch 'bpf-libbpf-read-sysfs-btf' (diff) | |
download | linux-708852dcac84d2b923f2e8c1327f6006f612416a.tar.xz linux-708852dcac84d2b923f2e8c1327f6006f612416a.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
The following pull-request contains BPF updates for your *net-next* tree.
There is a small merge conflict in libbpf (Cc Andrii so he's in the loop
as well):
for (i = 1; i <= btf__get_nr_types(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
if (!has_datasec && btf_is_var(t)) {
/* replace VAR with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
<<<<<<< HEAD
/*
* using size = 1 is the safest choice, 4 will be too
* big and cause kernel BTF validation failure if
* original variable took less than 4 bytes
*/
t->size = 1;
*(int *)(t+1) = BTF_INT_ENC(0, 0, 8);
} else if (!has_datasec && kind == BTF_KIND_DATASEC) {
=======
t->size = sizeof(int);
*(int *)(t + 1) = BTF_INT_ENC(0, 0, 32);
} else if (!has_datasec && btf_is_datasec(t)) {
>>>>>>> 72ef80b5ee131e96172f19e74b4f98fa3404efe8
/* replace DATASEC with STRUCT */
Conflict is between the two commits 1d4126c4e119 ("libbpf: sanitize VAR to
conservative 1-byte INT") and b03bc6853c0e ("libbpf: convert libbpf code to
use new btf helpers"), so we need to pick the sanitation fixup as well as
use the new btf_is_datasec() helper and the whitespace cleanup. Looks like
the following:
[...]
if (!has_datasec && btf_is_var(t)) {
/* replace VAR with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
/*
* using size = 1 is the safest choice, 4 will be too
* big and cause kernel BTF validation failure if
* original variable took less than 4 bytes
*/
t->size = 1;
*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
} else if (!has_datasec && btf_is_datasec(t)) {
/* replace DATASEC with STRUCT */
[...]
The main changes are:
1) Addition of core parts of compile once - run everywhere (co-re) effort,
that is, relocation of fields offsets in libbpf as well as exposure of
kernel's own BTF via sysfs and loading through libbpf, from Andrii.
More info on co-re: http://vger.kernel.org/bpfconf2019.html#session-2
and http://vger.kernel.org/lpc-bpf2018.html#session-2
2) Enable passing input flags to the BPF flow dissector to customize parsing
and allowing it to stop early similar to the C based one, from Stanislav.
3) Add a BPF helper function that allows generating SYN cookies from XDP and
tc BPF, from Petar.
4) Add devmap hash-based map type for more flexibility in device lookup for
redirects, from Toke.
5) Improvements to XDP forwarding sample code now utilizing recently enabled
devmap lookups, from Jesper.
6) Add support for reporting the effective cgroup progs in bpftool, from Jakub
and Takshak.
7) Fix reading kernel config from bpftool via /proc/config.gz, from Peter.
8) Fix AF_XDP umem pages mapping for 32 bit architectures, from Ivan.
9) Follow-up to add two more BPF loop tests for the selftest suite, from Alexei.
10) Add perf event output helper also for other skb-based program types, from Allan.
11) Fix a co-re related compilation error in selftests, from Yonghong.
====================
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/trace_output_user.c | 43 | ||||
-rw-r--r-- | samples/bpf/xdp_fwd_kern.c | 39 | ||||
-rw-r--r-- | samples/bpf/xdp_fwd_user.c | 35 | ||||
-rw-r--r-- | samples/bpf/xdp_sample_pkts_user.c | 61 |
4 files changed, 84 insertions, 94 deletions
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c index 2dd1d39b152a..8ee47699a870 100644 --- a/samples/bpf/trace_output_user.c +++ b/samples/bpf/trace_output_user.c @@ -18,9 +18,6 @@ #include <libbpf.h> #include "bpf_load.h" #include "perf-sys.h" -#include "trace_helpers.h" - -static int pmu_fd; static __u64 time_get_ns(void) { @@ -31,12 +28,12 @@ static __u64 time_get_ns(void) } static __u64 start_time; +static __u64 cnt; #define MAX_CNT 100000ll -static int print_bpf_output(void *data, int size) +static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size) { - static __u64 cnt; struct { __u64 pid; __u64 cookie; @@ -45,7 +42,7 @@ static int print_bpf_output(void *data, int size) if (e->cookie != 0x12345678) { printf("BUG pid %llx cookie %llx sized %d\n", e->pid, e->cookie, size); - return LIBBPF_PERF_EVENT_ERROR; + return; } cnt++; @@ -53,30 +50,14 @@ static int print_bpf_output(void *data, int size) if (cnt == MAX_CNT) { printf("recv %lld events per sec\n", MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); - return LIBBPF_PERF_EVENT_DONE; + return; } - - return LIBBPF_PERF_EVENT_CONT; -} - -static void test_bpf_perf_event(void) -{ - struct perf_event_attr attr = { - .sample_type = PERF_SAMPLE_RAW, - .type = PERF_TYPE_SOFTWARE, - .config = PERF_COUNT_SW_BPF_OUTPUT, - }; - int key = 0; - - pmu_fd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0); - - assert(pmu_fd >= 0); - assert(bpf_map_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0); - ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); } int main(int argc, char **argv) { + struct perf_buffer_opts pb_opts = {}; + struct perf_buffer *pb; char filename[256]; FILE *f; int ret; @@ -88,16 +69,20 @@ int main(int argc, char **argv) return 1; } - test_bpf_perf_event(); - - if (perf_event_mmap(pmu_fd) < 0) + pb_opts.sample_cb = print_bpf_output; + pb = perf_buffer__new(map_fd[0], 8, &pb_opts); + ret = libbpf_get_error(pb); + if (ret) { + printf("failed to setup perf_buffer: %d\n", ret); return 1; + } f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r"); (void) f; start_time = time_get_ns(); - ret = perf_event_poller(pmu_fd, print_bpf_output); + while ((ret = perf_buffer__poll(pb, 1000)) >= 0 && cnt < MAX_CNT) { + } kill(0, SIGINT); return ret; } diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index a7e94e7ff87d..701a30f258b1 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c @@ -23,7 +23,8 @@ #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) -struct bpf_map_def SEC("maps") tx_port = { +/* For TX-traffic redirect requires net_device ifindex to be in this devmap */ +struct bpf_map_def SEC("maps") xdp_tx_ports = { .type = BPF_MAP_TYPE_DEVMAP, .key_size = sizeof(int), .value_size = sizeof(int), @@ -102,14 +103,34 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) fib_params.ifindex = ctx->ingress_ifindex; rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); - - /* verify egress index has xdp support - * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with - * cannot pass map_type 14 into func bpf_map_lookup_elem#1: - * NOTE: without verification that egress index supports XDP - * forwarding packets are dropped. + /* + * Some rc (return codes) from bpf_fib_lookup() are important, + * to understand how this XDP-prog interacts with network stack. + * + * BPF_FIB_LKUP_RET_NO_NEIGH: + * Even if route lookup was a success, then the MAC-addresses are also + * needed. This is obtained from arp/neighbour table, but if table is + * (still) empty then BPF_FIB_LKUP_RET_NO_NEIGH is returned. To avoid + * doing ARP lookup directly from XDP, then send packet to normal + * network stack via XDP_PASS and expect it will do ARP resolution. + * + * BPF_FIB_LKUP_RET_FWD_DISABLED: + * The bpf_fib_lookup respect sysctl net.ipv{4,6}.conf.all.forwarding + * setting, and will return BPF_FIB_LKUP_RET_FWD_DISABLED if not + * enabled this on ingress device. */ - if (rc == 0) { + if (rc == BPF_FIB_LKUP_RET_SUCCESS) { + /* Verify egress index has been configured as TX-port. + * (Note: User can still have inserted an egress ifindex that + * doesn't support XDP xmit, which will result in packet drops). + * + * Note: lookup in devmap supported since 0cdbb4b09a0. + * If not supported will fail with: + * cannot pass map_type 14 into func bpf_map_lookup_elem#1: + */ + if (!bpf_map_lookup_elem(&xdp_tx_ports, &fib_params.ifindex)) + return XDP_PASS; + if (h_proto == htons(ETH_P_IP)) ip_decrease_ttl(iph); else if (h_proto == htons(ETH_P_IPV6)) @@ -117,7 +138,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); memcpy(eth->h_source, fib_params.smac, ETH_ALEN); - return bpf_redirect_map(&tx_port, fib_params.ifindex, 0); + return bpf_redirect_map(&xdp_tx_ports, fib_params.ifindex, 0); } return XDP_PASS; diff --git a/samples/bpf/xdp_fwd_user.c b/samples/bpf/xdp_fwd_user.c index 5b46ee12c696..97ff1dad7669 100644 --- a/samples/bpf/xdp_fwd_user.c +++ b/samples/bpf/xdp_fwd_user.c @@ -27,14 +27,20 @@ #include "libbpf.h" #include <bpf/bpf.h> - -static int do_attach(int idx, int fd, const char *name) +static int do_attach(int idx, int prog_fd, int map_fd, const char *name) { int err; - err = bpf_set_link_xdp_fd(idx, fd, 0); - if (err < 0) + err = bpf_set_link_xdp_fd(idx, prog_fd, 0); + if (err < 0) { printf("ERROR: failed to attach program to %s\n", name); + return err; + } + + /* Adding ifindex as a possible egress TX port */ + err = bpf_map_update_elem(map_fd, &idx, &idx, 0); + if (err) + printf("ERROR: failed using device %s as TX-port\n", name); return err; } @@ -47,6 +53,9 @@ static int do_detach(int idx, const char *name) if (err < 0) printf("ERROR: failed to detach program from %s\n", name); + /* TODO: Remember to cleanup map, when adding use of shared map + * bpf_map_delete_elem((map_fd, &idx); + */ return err; } @@ -67,10 +76,10 @@ int main(int argc, char **argv) }; const char *prog_name = "xdp_fwd"; struct bpf_program *prog; + int prog_fd, map_fd = -1; char filename[PATH_MAX]; struct bpf_object *obj; int opt, i, idx, err; - int prog_fd, map_fd; int attach = 1; int ret = 0; @@ -103,8 +112,14 @@ int main(int argc, char **argv) return 1; } - if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + err = bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd); + if (err) { + printf("Does kernel support devmap lookup?\n"); + /* If not, the error message will be: + * "cannot pass map_type 14 into func bpf_map_lookup_elem#1" + */ return 1; + } prog = bpf_object__find_program_by_title(obj, prog_name); prog_fd = bpf_program__fd(prog); @@ -113,16 +128,12 @@ int main(int argc, char **argv) return 1; } map_fd = bpf_map__fd(bpf_object__find_map_by_name(obj, - "tx_port")); + "xdp_tx_ports")); if (map_fd < 0) { printf("map not found: %s\n", strerror(map_fd)); return 1; } } - if (attach) { - for (i = 1; i < 64; ++i) - bpf_map_update_elem(map_fd, &i, &i, 0); - } for (i = optind; i < argc; ++i) { idx = if_nametoindex(argv[i]); @@ -138,7 +149,7 @@ int main(int argc, char **argv) if (err) ret = err; } else { - err = do_attach(idx, prog_fd, argv[i]); + err = do_attach(idx, prog_fd, map_fd, argv[i]); if (err) ret = err; } diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c index dc66345a929a..3002714e3cd5 100644 --- a/samples/bpf/xdp_sample_pkts_user.c +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -17,14 +17,13 @@ #include <linux/if_link.h> #include "perf-sys.h" -#include "trace_helpers.h" #define MAX_CPUS 128 -static int pmu_fds[MAX_CPUS], if_idx; -static struct perf_event_mmap_page *headers[MAX_CPUS]; +static int if_idx; static char *if_name; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; static __u32 prog_id; +static struct perf_buffer *pb = NULL; static int do_attach(int idx, int fd, const char *name) { @@ -73,7 +72,7 @@ static int do_detach(int idx, const char *name) #define SAMPLE_SIZE 64 -static int print_bpf_output(void *data, int size) +static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size) { struct { __u16 cookie; @@ -83,45 +82,20 @@ static int print_bpf_output(void *data, int size) int i; if (e->cookie != 0xdead) { - printf("BUG cookie %x sized %d\n", - e->cookie, size); - return LIBBPF_PERF_EVENT_ERROR; + printf("BUG cookie %x sized %d\n", e->cookie, size); + return; } printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len); for (i = 0; i < 14 && i < e->pkt_len; i++) printf("%02x ", e->pkt_data[i]); printf("\n"); - - return LIBBPF_PERF_EVENT_CONT; -} - -static void test_bpf_perf_event(int map_fd, int num) -{ - struct perf_event_attr attr = { - .sample_type = PERF_SAMPLE_RAW, - .type = PERF_TYPE_SOFTWARE, - .config = PERF_COUNT_SW_BPF_OUTPUT, - .wakeup_events = 1, /* get an fd notification for every event */ - }; - int i; - - for (i = 0; i < num; i++) { - int key = i; - - pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/, - -1/*group_fd*/, 0); - - assert(pmu_fds[i] >= 0); - assert(bpf_map_update_elem(map_fd, &key, - &pmu_fds[i], BPF_ANY) == 0); - ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0); - } } static void sig_handler(int signo) { do_detach(if_idx, if_name); + perf_buffer__free(pb); exit(0); } @@ -140,13 +114,13 @@ int main(int argc, char **argv) struct bpf_prog_load_attr prog_load_attr = { .prog_type = BPF_PROG_TYPE_XDP, }; + struct perf_buffer_opts pb_opts = {}; const char *optstr = "F"; int prog_fd, map_fd, opt; struct bpf_object *obj; struct bpf_map *map; char filename[256]; - int ret, err, i; - int numcpus; + int ret, err; while ((opt = getopt(argc, argv, optstr)) != -1) { switch (opt) { @@ -169,10 +143,6 @@ int main(int argc, char **argv) return 1; } - numcpus = get_nprocs(); - if (numcpus > MAX_CPUS) - numcpus = MAX_CPUS; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); prog_load_attr.file = filename; @@ -211,14 +181,17 @@ int main(int argc, char **argv) return 1; } - test_bpf_perf_event(map_fd, numcpus); + pb_opts.sample_cb = print_bpf_output; + pb = perf_buffer__new(map_fd, 8, &pb_opts); + err = libbpf_get_error(pb); + if (err) { + perror("perf_buffer setup failed"); + return 1; + } - for (i = 0; i < numcpus; i++) - if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0) - return 1; + while ((ret = perf_buffer__poll(pb, 1000)) >= 0) { + } - ret = perf_event_poller_multi(pmu_fds, headers, numcpus, - print_bpf_output); kill(0, SIGINT); return ret; } |