diff options
Diffstat (limited to 'tools/testing')
98 files changed, 8975 insertions, 418 deletions
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 1195bd85af38..f4522e0a2cab 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -54,6 +54,7 @@ TARGETS += splice TARGETS += static_keys TARGETS += sync TARGETS += sysctl +TARGETS += tc-testing TARGETS += timens ifneq (1, $(quicktest)) TARGETS += timers diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 22aaec74ea0a..e7a8cf83ba48 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -111,6 +111,7 @@ SCRATCH_DIR := $(OUTPUT)/tools BUILD_DIR := $(SCRATCH_DIR)/build INCLUDE_DIR := $(SCRATCH_DIR)/include BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a +RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids # Define simple and short `make test_progs`, `make test_sysctl`, etc targets # to build individual tests. @@ -134,12 +135,12 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) $(call msg,CC,,$@) $(CC) -c $(CFLAGS) -o $@ $< -VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \ +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ ../../../../vmlinux \ /sys/kernel/btf/vmlinux \ /boot/vmlinux-$(shell uname -r) -VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) $(OUTPUT)/runqslower: $(BPFOBJ) $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ @@ -177,13 +178,28 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers -$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR): +$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR): $(call msg,MKDIR,,$@) mkdir -p $@ $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR) +ifeq ($(VMLINUX_H),) $(call msg,GEN,,$@) $(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@ +else + $(call msg,CP,,$@) + cp "$(VMLINUX_H)" $@ +endif + +$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \ + $(TOOLSDIR)/bpf/resolve_btfids/main.c \ + $(TOOLSDIR)/lib/rbtree.c \ + $(TOOLSDIR)/lib/zalloc.c \ + $(TOOLSDIR)/lib/string.c \ + $(TOOLSDIR)/lib/ctype.c \ + $(TOOLSDIR)/lib/str_error_r.c + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ + OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ) # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, @@ -347,9 +363,11 @@ endif $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ + $(RESOLVE_BTFIDS) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ + $(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@ endef diff --git a/tools/testing/selftests/bpf/bpf_legacy.h b/tools/testing/selftests/bpf/bpf_legacy.h index 6f8988738bc1..719ab56cdb5d 100644 --- a/tools/testing/selftests/bpf/bpf_legacy.h +++ b/tools/testing/selftests/bpf/bpf_legacy.h @@ -2,20 +2,6 @@ #ifndef __BPF_LEGACY__ #define __BPF_LEGACY__ -/* - * legacy bpf_map_def with extra fields supported only by bpf_load(), do not - * use outside of samples/bpf - */ -struct bpf_map_def_legacy { - unsigned int type; - unsigned int key_size; - unsigned int value_size; - unsigned int max_entries; - unsigned int map_flags; - unsigned int inner_map_idx; - unsigned int numa_node; -}; - #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ struct ____btf_map_##name { \ type_key key; \ diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index e36dd1a1780d..f56655690f9b 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -7,8 +7,6 @@ #include <arpa/inet.h> -#include <sys/epoll.h> - #include <linux/err.h> #include <linux/in.h> #include <linux/in6.h> @@ -17,8 +15,13 @@ #include "network_helpers.h" #define clean_errno() (errno == 0 ? "None" : strerror(errno)) -#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ - __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) +#define log_err(MSG, ...) ({ \ + int __save = errno; \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, clean_errno(), \ + ##__VA_ARGS__); \ + errno = __save; \ +}) struct ipv4_packet pkt_v4 = { .eth.h_proto = __bpf_constant_htons(ETH_P_IP), @@ -37,131 +40,169 @@ struct ipv6_packet pkt_v6 = { .tcp.doff = 5, }; -int start_server_with_port(int family, int type, __u16 port) +static int settimeo(int fd, int timeout_ms) { - struct sockaddr_storage addr = {}; - socklen_t len; - int fd; + struct timeval timeout = { .tv_sec = 3 }; - if (family == AF_INET) { - struct sockaddr_in *sin = (void *)&addr; + if (timeout_ms > 0) { + timeout.tv_sec = timeout_ms / 1000; + timeout.tv_usec = (timeout_ms % 1000) * 1000; + } - sin->sin_family = AF_INET; - sin->sin_port = htons(port); - len = sizeof(*sin); - } else { - struct sockaddr_in6 *sin6 = (void *)&addr; + if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, + sizeof(timeout))) { + log_err("Failed to set SO_RCVTIMEO"); + return -1; + } - sin6->sin6_family = AF_INET6; - sin6->sin6_port = htons(port); - len = sizeof(*sin6); + if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout, + sizeof(timeout))) { + log_err("Failed to set SO_SNDTIMEO"); + return -1; } - fd = socket(family, type | SOCK_NONBLOCK, 0); + return 0; +} + +#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; }) + +int start_server(int family, int type, const char *addr_str, __u16 port, + int timeout_ms) +{ + struct sockaddr_storage addr = {}; + socklen_t len; + int fd; + + if (make_sockaddr(family, addr_str, port, &addr, &len)) + return -1; + + fd = socket(family, type, 0); if (fd < 0) { log_err("Failed to create server socket"); return -1; } + if (settimeo(fd, timeout_ms)) + goto error_close; + if (bind(fd, (const struct sockaddr *)&addr, len) < 0) { log_err("Failed to bind socket"); - close(fd); - return -1; + goto error_close; } if (type == SOCK_STREAM) { if (listen(fd, 1) < 0) { log_err("Failed to listed on socket"); - close(fd); - return -1; + goto error_close; } } return fd; + +error_close: + save_errno_close(fd); + return -1; } -int start_server(int family, int type) +static int connect_fd_to_addr(int fd, + const struct sockaddr_storage *addr, + socklen_t addrlen) { - return start_server_with_port(family, type, 0); -} + if (connect(fd, (const struct sockaddr *)addr, addrlen)) { + log_err("Failed to connect to server"); + return -1; + } -static const struct timeval timeo_sec = { .tv_sec = 3 }; -static const size_t timeo_optlen = sizeof(timeo_sec); + return 0; +} -int connect_to_fd(int family, int type, int server_fd) +int connect_to_fd(int server_fd, int timeout_ms) { - int fd, save_errno; + struct sockaddr_storage addr; + struct sockaddr_in *addr_in; + socklen_t addrlen, optlen; + int fd, type; - fd = socket(family, type, 0); - if (fd < 0) { - log_err("Failed to create client socket"); + optlen = sizeof(type); + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); return -1; } - if (connect_fd_to_fd(fd, server_fd) < 0 && errno != EINPROGRESS) { - save_errno = errno; - close(fd); - errno = save_errno; + addrlen = sizeof(addr); + if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) { + log_err("Failed to get server addr"); return -1; } + addr_in = (struct sockaddr_in *)&addr; + fd = socket(addr_in->sin_family, type, 0); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (settimeo(fd, timeout_ms)) + goto error_close; + + if (connect_fd_to_addr(fd, &addr, addrlen)) + goto error_close; + return fd; + +error_close: + save_errno_close(fd); + return -1; } -int connect_fd_to_fd(int client_fd, int server_fd) +int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) { struct sockaddr_storage addr; socklen_t len = sizeof(addr); - int save_errno; - if (setsockopt(client_fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, - timeo_optlen)) { - log_err("Failed to set SO_RCVTIMEO"); + if (settimeo(client_fd, timeout_ms)) return -1; - } if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { log_err("Failed to get server addr"); return -1; } - if (connect(client_fd, (const struct sockaddr *)&addr, len) < 0) { - if (errno != EINPROGRESS) { - save_errno = errno; - log_err("Failed to connect to server"); - errno = save_errno; - } + if (connect_fd_to_addr(client_fd, &addr, len)) return -1; - } return 0; } -int connect_wait(int fd) +int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len) { - struct epoll_event ev = {}, events[2]; - int timeout_ms = 1000; - int efd, nfd; - - efd = epoll_create1(EPOLL_CLOEXEC); - if (efd < 0) { - log_err("Failed to open epoll fd"); - return -1; - } + if (family == AF_INET) { + struct sockaddr_in *sin = (void *)addr; - ev.events = EPOLLRDHUP | EPOLLOUT; - ev.data.fd = fd; + sin->sin_family = AF_INET; + sin->sin_port = htons(port); + if (addr_str && + inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) { + log_err("inet_pton(AF_INET, %s)", addr_str); + return -1; + } + if (len) + *len = sizeof(*sin); + return 0; + } else if (family == AF_INET6) { + struct sockaddr_in6 *sin6 = (void *)addr; - if (epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev) < 0) { - log_err("Failed to register fd=%d on epoll fd=%d", fd, efd); - close(efd); - return -1; + sin6->sin6_family = AF_INET6; + sin6->sin6_port = htons(port); + if (addr_str && + inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) { + log_err("inet_pton(AF_INET6, %s)", addr_str); + return -1; + } + if (len) + *len = sizeof(*sin6); + return 0; } - - nfd = epoll_wait(efd, events, ARRAY_SIZE(events), timeout_ms); - if (nfd < 0) - log_err("Failed to wait for I/O event on epoll fd=%d", efd); - - close(efd); - return nfd; + return -1; } diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 6a8009605670..c3728f6667e4 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -33,10 +33,11 @@ struct ipv6_packet { } __packed; extern struct ipv6_packet pkt_v6; -int start_server(int family, int type); -int start_server_with_port(int family, int type, __u16 port); -int connect_to_fd(int family, int type, int server_fd); -int connect_fd_to_fd(int client_fd, int server_fd); -int connect_wait(int client_fd); +int start_server(int family, int type, const char *addr, __u16 port, + int timeout_ms); +int connect_to_fd(int server_fd, int timeout_ms); +int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); +int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len); #endif diff --git a/tools/testing/selftests/bpf/prog_tests/autoload.c b/tools/testing/selftests/bpf/prog_tests/autoload.c new file mode 100644 index 000000000000..3693f7d133eb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/autoload.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <time.h> +#include "test_autoload.skel.h" + +void test_autoload(void) +{ + int duration = 0, err; + struct test_autoload* skel; + + skel = test_autoload__open_and_load(); + /* prog3 should be broken */ + if (CHECK(skel, "skel_open_and_load", "unexpected success\n")) + goto cleanup; + + skel = test_autoload__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + goto cleanup; + + /* don't load prog3 */ + bpf_program__set_autoload(skel->progs.prog3, false); + + err = test_autoload__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + err = test_autoload__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + usleep(1); + + CHECK(!skel->bss->prog1_called, "prog1", "not called\n"); + CHECK(!skel->bss->prog2_called, "prog2", "not called\n"); + CHECK(skel->bss->prog3_called, "prog3", "called?!\n"); + +cleanup: + test_autoload__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 87c29dde1cf9..fed42755416d 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -5,7 +5,12 @@ #include "bpf_iter_netlink.skel.h" #include "bpf_iter_bpf_map.skel.h" #include "bpf_iter_task.skel.h" +#include "bpf_iter_task_stack.skel.h" #include "bpf_iter_task_file.skel.h" +#include "bpf_iter_tcp4.skel.h" +#include "bpf_iter_tcp6.skel.h" +#include "bpf_iter_udp4.skel.h" +#include "bpf_iter_udp6.skel.h" #include "bpf_iter_test_kern1.skel.h" #include "bpf_iter_test_kern2.skel.h" #include "bpf_iter_test_kern3.skel.h" @@ -106,6 +111,20 @@ static void test_task(void) bpf_iter_task__destroy(skel); } +static void test_task_stack(void) +{ + struct bpf_iter_task_stack *skel; + + skel = bpf_iter_task_stack__open_and_load(); + if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_task_stack); + + bpf_iter_task_stack__destroy(skel); +} + static void test_task_file(void) { struct bpf_iter_task_file *skel; @@ -120,6 +139,62 @@ static void test_task_file(void) bpf_iter_task_file__destroy(skel); } +static void test_tcp4(void) +{ + struct bpf_iter_tcp4 *skel; + + skel = bpf_iter_tcp4__open_and_load(); + if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_tcp4); + + bpf_iter_tcp4__destroy(skel); +} + +static void test_tcp6(void) +{ + struct bpf_iter_tcp6 *skel; + + skel = bpf_iter_tcp6__open_and_load(); + if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_tcp6); + + bpf_iter_tcp6__destroy(skel); +} + +static void test_udp4(void) +{ + struct bpf_iter_udp4 *skel; + + skel = bpf_iter_udp4__open_and_load(); + if (CHECK(!skel, "bpf_iter_udp4__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_udp4); + + bpf_iter_udp4__destroy(skel); +} + +static void test_udp6(void) +{ + struct bpf_iter_udp6 *skel; + + skel = bpf_iter_udp6__open_and_load(); + if (CHECK(!skel, "bpf_iter_udp6__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_udp6); + + bpf_iter_udp6__destroy(skel); +} + /* The expected string is less than 16 bytes */ static int do_read_with_fd(int iter_fd, const char *expected, bool read_one_char) @@ -392,8 +467,18 @@ void test_bpf_iter(void) test_bpf_map(); if (test__start_subtest("task")) test_task(); + if (test__start_subtest("task_stack")) + test_task_stack(); if (test__start_subtest("task_file")) test_task_file(); + if (test__start_subtest("tcp4")) + test_tcp4(); + if (test__start_subtest("tcp6")) + test_tcp6(); + if (test__start_subtest("udp4")) + test_udp4(); + if (test__start_subtest("udp6")) + test_udp6(); if (test__start_subtest("anon")) test_anon_iter(false); if (test__start_subtest("anon-read-one-char")) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c index 059047af7df3..464edc1c1708 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c @@ -13,7 +13,7 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk) socklen_t addr_len = sizeof(addr); __u32 duration = 0; - serv_sk = start_server(AF_INET6, SOCK_STREAM); + serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); if (CHECK(serv_sk < 0, "start_server", "failed to start server\n")) return; @@ -24,17 +24,13 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk) *g_serv_port = addr.sin6_port; /* Client outside of test cgroup should fail to connect by timeout. */ - err = connect_fd_to_fd(out_sk, serv_sk); + err = connect_fd_to_fd(out_sk, serv_sk, 1000); if (CHECK(!err || errno != EINPROGRESS, "connect_fd_to_fd", "unexpected result err %d errno %d\n", err, errno)) goto cleanup; - err = connect_wait(out_sk); - if (CHECK(err, "connect_wait", "unexpected result %d\n", err)) - goto cleanup; - /* Client inside test cgroup should connect just fine. */ - in_sk = connect_to_fd(AF_INET6, SOCK_STREAM, serv_sk); + in_sk = connect_to_fd(serv_sk, 0); if (CHECK(in_sk < 0, "connect_to_fd", "errno %d\n", errno)) goto cleanup; @@ -85,7 +81,7 @@ void test_cgroup_skb_sk_lookup(void) * differs from that of testing cgroup. Moving selftests process to * testing cgroup won't change cgroup id of an already created socket. */ - out_sk = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0); + out_sk = socket(AF_INET6, SOCK_STREAM, 0); if (CHECK_FAIL(out_sk < 0)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c index 17bbf76812ca..9229db2f5ca5 100644 --- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -114,7 +114,7 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type) goto close_bpf_object; } - fd = connect_to_fd(family, type, server_fd); + fd = connect_to_fd(server_fd, 0); if (fd < 0) { err = -1; goto close_bpf_object; @@ -137,25 +137,25 @@ void test_connect_force_port(void) if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server_with_port(AF_INET, SOCK_STREAM, 60123); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM)); close(server_fd); - server_fd = start_server_with_port(AF_INET6, SOCK_STREAM, 60124); + server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM)); close(server_fd); - server_fd = start_server_with_port(AF_INET, SOCK_DGRAM, 60123); + server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM)); close(server_fd); - server_fd = start_server_with_port(AF_INET6, SOCK_DGRAM, 60124); + server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM)); diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c new file mode 100644 index 000000000000..78e30d3a23d5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#define _GNU_SOURCE +#include <test_progs.h> +#include "test_core_retro.skel.h" + +void test_core_retro(void) +{ + int err, zero = 0, res, duration = 0; + struct test_core_retro *skel; + + /* load program */ + skel = test_core_retro__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) + goto out_close; + + /* attach probe */ + err = test_core_retro__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) + goto out_close; + + /* trigger */ + usleep(1); + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); + if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) + goto out_close; + + CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid()); + +out_close: + test_core_retro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/endian.c b/tools/testing/selftests/bpf/prog_tests/endian.c new file mode 100644 index 000000000000..1a11612ace6c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/endian.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include "test_endian.skel.h" + +static int duration; + +#define IN16 0x1234 +#define IN32 0x12345678U +#define IN64 0x123456789abcdef0ULL + +#define OUT16 0x3412 +#define OUT32 0x78563412U +#define OUT64 0xf0debc9a78563412ULL + +void test_endian(void) +{ + struct test_endian* skel; + struct test_endian__bss *bss; + int err; + + skel = test_endian__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + + bss->in16 = IN16; + bss->in32 = IN32; + bss->in64 = IN64; + + err = test_endian__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + usleep(1); + + CHECK(bss->out16 != OUT16, "out16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out16, (__u64)OUT16); + CHECK(bss->out32 != OUT32, "out32", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out32, (__u64)OUT32); + CHECK(bss->out64 != OUT64, "out16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out64, (__u64)OUT64); + + CHECK(bss->const16 != OUT16, "const16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const16, (__u64)OUT16); + CHECK(bss->const32 != OUT32, "const32", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const32, (__u64)OUT32); + CHECK(bss->const64 != OUT64, "const64", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const64, (__u64)OUT64); +cleanup: + test_endian__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c new file mode 100644 index 000000000000..e3d6777226a8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ksyms.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> +#include "test_ksyms.skel.h" +#include <sys/stat.h> + +static int duration; + +static __u64 kallsyms_find(const char *sym) +{ + char type, name[500]; + __u64 addr, res = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (CHECK(!f, "kallsyms_fopen", "failed to open: %d\n", errno)) + return 0; + + while (fscanf(f, "%llx %c %499s%*[^\n]\n", &addr, &type, name) > 0) { + if (strcmp(name, sym) == 0) { + res = addr; + goto out; + } + } + + CHECK(false, "not_found", "symbol %s not found\n", sym); +out: + fclose(f); + return res; +} + +void test_ksyms(void) +{ + __u64 link_fops_addr = kallsyms_find("bpf_link_fops"); + const char *btf_path = "/sys/kernel/btf/vmlinux"; + struct test_ksyms *skel; + struct test_ksyms__data *data; + struct stat st; + __u64 btf_size; + int err; + + if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno)) + return; + btf_size = st.st_size; + + skel = test_ksyms__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n")) + return; + + err = test_ksyms__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops", + "got 0x%llx, exp 0x%llx\n", + data->out__bpf_link_fops, link_fops_addr); + CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1", + "got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0); + CHECK(data->out__btf_size != btf_size, "btf_size", + "got %llu, exp %llu\n", data->out__btf_size, btf_size); + CHECK(data->out__per_cpu_start != 0, "__per_cpu_start", + "got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0); + +cleanup: + test_ksyms__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c index c1168e4a9036..5a2a689dbb68 100644 --- a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c +++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c @@ -23,7 +23,7 @@ void test_load_bytes_relative(void) if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server(AF_INET, SOCK_STREAM); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; @@ -49,7 +49,7 @@ void test_load_bytes_relative(void) if (CHECK_FAIL(err)) goto close_bpf_object; - client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd); + client_fd = connect_to_fd(server_fd, 0); if (CHECK_FAIL(client_fd < 0)) goto close_bpf_object; close(client_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c new file mode 100644 index 000000000000..c230a573c373 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <test_progs.h> +#include <network_helpers.h> + +#include "map_ptr_kern.skel.h" + +void test_map_ptr(void) +{ + struct map_ptr_kern *skel; + __u32 duration = 0, retval; + char buf[128]; + int err; + + skel = map_ptr_kern__open_and_load(); + if (CHECK(!skel, "skel_open_load", "open_load failed\n")) + return; + + err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4, + sizeof(pkt_v4), buf, NULL, &retval, NULL); + + if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) + goto cleanup; + + if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval, + skel->bss->g_map_type, skel->bss->g_line)) + goto cleanup; + +cleanup: + map_ptr_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index a122ce3b360e..c33ec180b3f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -4,6 +4,7 @@ #include <sched.h> #include <sys/socket.h> #include <test_progs.h> +#include "test_perf_buffer.skel.h" #include "bpf/libbpf_internal.h" /* AddressSanitizer sometimes crashes due to data dereference below, due to @@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_perf_buffer(void) { - int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; - const char *prog_name = "kprobe/sys_nanosleep"; - const char *file = "./test_perf_buffer.o"; + int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; struct perf_buffer_opts pb_opts = {}; - struct bpf_map *perf_buf_map; + struct test_perf_buffer *skel; cpu_set_t cpu_set, cpu_seen; - struct bpf_program *prog; - struct bpf_object *obj; struct perf_buffer *pb; - struct bpf_link *link; bool *online; nr_cpus = libbpf_num_possible_cpus(); @@ -51,33 +47,21 @@ void test_perf_buffer(void) nr_on_cpus++; /* load program */ - err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { - obj = NULL; - goto out_close; - } - - prog = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) + skel = test_perf_buffer__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; - /* load map */ - perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) - goto out_close; - - /* attach kprobe */ - link = bpf_program__attach_kprobe(prog, false /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); - if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) + /* attach probe */ + err = test_perf_buffer__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) goto out_close; /* set up perf buffer */ pb_opts.sample_cb = on_sample; pb_opts.ctx = &cpu_seen; - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts); if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) - goto out_detach; + goto out_close; /* trigger kprobe on every CPU */ CPU_ZERO(&cpu_seen); @@ -94,7 +78,7 @@ void test_perf_buffer(void) &cpu_set); if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", i, err)) - goto out_detach; + goto out_close; usleep(1); } @@ -110,9 +94,7 @@ void test_perf_buffer(void) out_free_pb: perf_buffer__free(pb); -out_detach: - bpf_link__destroy(link); out_close: - bpf_object__close(obj); + test_perf_buffer__destroy(skel); free(online); } diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c new file mode 100644 index 000000000000..3b127cab4864 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/err.h> +#include <string.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> +#include <linux/btf.h> +#include <linux/kernel.h> +#define CONFIG_DEBUG_INFO_BTF +#include <linux/btf_ids.h> +#include "test_progs.h" + +static int duration; + +struct symbol { + const char *name; + int type; + int id; +}; + +struct symbol test_symbols[] = { + { "unused", BTF_KIND_UNKN, 0 }, + { "S", BTF_KIND_TYPEDEF, -1 }, + { "T", BTF_KIND_TYPEDEF, -1 }, + { "U", BTF_KIND_TYPEDEF, -1 }, + { "S", BTF_KIND_STRUCT, -1 }, + { "U", BTF_KIND_UNION, -1 }, + { "func", BTF_KIND_FUNC, -1 }, +}; + +BTF_ID_LIST(test_list_local) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +extern __u32 test_list_global[]; +BTF_ID_LIST_GLOBAL(test_list_global) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +static int +__resolve_symbol(struct btf *btf, int type_id) +{ + const struct btf_type *type; + const char *str; + unsigned int i; + + type = btf__type_by_id(btf, type_id); + if (!type) { + PRINT_FAIL("Failed to get type for ID %d\n", type_id); + return -1; + } + + for (i = 0; i < ARRAY_SIZE(test_symbols); i++) { + if (test_symbols[i].id != -1) + continue; + + if (BTF_INFO_KIND(type->info) != test_symbols[i].type) + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id); + return -1; + } + + if (!strcmp(str, test_symbols[i].name)) + test_symbols[i].id = type_id; + } + + return 0; +} + +static int resolve_symbols(void) +{ + struct btf *btf; + int type_id; + __u32 nr; + + btf = btf__parse_elf("btf_data.o", NULL); + if (CHECK(libbpf_get_error(btf), "resolve", + "Failed to load BTF from btf_data.o\n")) + return -1; + + nr = btf__get_nr_types(btf); + + for (type_id = 1; type_id <= nr; type_id++) { + if (__resolve_symbol(btf, type_id)) + break; + } + + btf__free(btf); + return 0; +} + +int test_resolve_btfids(void) +{ + __u32 *test_list, *test_lists[] = { test_list_local, test_list_global }; + unsigned int i, j; + int ret = 0; + + if (resolve_symbols()) + return -1; + + /* Check BTF_ID_LIST(test_list_local) and + * BTF_ID_LIST_GLOBAL(test_list_global) IDs + */ + for (j = 0; j < ARRAY_SIZE(test_lists); j++) { + test_list = test_lists[j]; + for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { + ret = CHECK(test_list[i] != test_symbols[i].id, + "id_check", + "wrong ID for %s (%d != %d)\n", + test_symbols[i].name, + test_list[i], test_symbols[i].id); + } + } + + return ret; +} diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c new file mode 100644 index 000000000000..f1784ae4565a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -0,0 +1,1282 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare +/* + * Test BPF attach point for INET socket lookup (BPF_SK_LOOKUP). + * + * Tests exercise: + * - attaching/detaching/querying programs to BPF_SK_LOOKUP hook, + * - redirecting socket lookup to a socket selected by BPF program, + * - failing a socket lookup on BPF program's request, + * - error scenarios for selecting a socket from BPF program, + * - accessing BPF program context, + * - attaching and running multiple BPF programs. + * + * Tests run in a dedicated network namespace. + */ + +#define _GNU_SOURCE +#include <arpa/inet.h> +#include <assert.h> +#include <errno.h> +#include <error.h> +#include <fcntl.h> +#include <sched.h> +#include <stdio.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <bpf/libbpf.h> +#include <bpf/bpf.h> + +#include "test_progs.h" +#include "bpf_rlimit.h" +#include "bpf_util.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_sk_lookup.skel.h" + +/* External (address, port) pairs the client sends packets to. */ +#define EXT_IP4 "127.0.0.1" +#define EXT_IP6 "fd00::1" +#define EXT_PORT 7007 + +/* Internal (address, port) pairs the server listens/receives at. */ +#define INT_IP4 "127.0.0.2" +#define INT_IP4_V6 "::ffff:127.0.0.2" +#define INT_IP6 "fd00::2" +#define INT_PORT 8008 + +#define IO_TIMEOUT_SEC 3 + +enum server { + SERVER_A = 0, + SERVER_B = 1, + MAX_SERVERS, +}; + +enum { + PROG1 = 0, + PROG2, +}; + +struct inet_addr { + const char *ip; + unsigned short port; +}; + +struct test { + const char *desc; + struct bpf_program *lookup_prog; + struct bpf_program *reuseport_prog; + struct bpf_map *sock_map; + int sotype; + struct inet_addr connect_to; + struct inet_addr listen_at; + enum server accept_on; +}; + +static __u32 duration; /* for CHECK macro */ + +static bool is_ipv6(const char *ip) +{ + return !!strchr(ip, ':'); +} + +static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog) +{ + int err, prog_fd; + + prog_fd = bpf_program__fd(reuseport_prog); + if (prog_fd < 0) { + errno = -prog_fd; + return -1; + } + + err = setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); + if (err) + return -1; + + return 0; +} + +static socklen_t inetaddr_len(const struct sockaddr_storage *addr) +{ + return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) : + addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0); +} + +static int make_socket(int sotype, const char *ip, int port, + struct sockaddr_storage *addr) +{ + struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC }; + int err, family, fd; + + family = is_ipv6(ip) ? AF_INET6 : AF_INET; + err = make_sockaddr(family, ip, port, addr, NULL); + if (CHECK(err, "make_address", "failed\n")) + return -1; + + fd = socket(addr->ss_family, sotype, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to make socket"); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) { + log_err("failed to set SNDTIMEO"); + close(fd); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) { + log_err("failed to set RCVTIMEO"); + close(fd); + return -1; + } + + return fd; +} + +static int make_server(int sotype, const char *ip, int port, + struct bpf_program *reuseport_prog) +{ + struct sockaddr_storage addr = {0}; + const int one = 1; + int err, fd = -1; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */ + if (sotype == SOCK_DGRAM) { + err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IP_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) { + err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IPV6_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_STREAM) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) { + log_err("failed to enable SO_REUSEADDR"); + goto fail; + } + } + + if (reuseport_prog) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) { + log_err("failed to enable SO_REUSEPORT"); + goto fail; + } + } + + err = bind(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "bind", "failed\n")) { + log_err("failed to bind listen socket"); + goto fail; + } + + if (sotype == SOCK_STREAM) { + err = listen(fd, SOMAXCONN); + if (CHECK(err, "make_server", "listen")) { + log_err("failed to listen on port %d", port); + goto fail; + } + } + + /* Late attach reuseport prog so we can have one init path */ + if (reuseport_prog) { + err = attach_reuseport(fd, reuseport_prog); + if (CHECK(err, "attach_reuseport", "failed\n")) { + log_err("failed to attach reuseport prog"); + goto fail; + } + } + + return fd; +fail: + close(fd); + return -1; +} + +static int make_client(int sotype, const char *ip, int port) +{ + struct sockaddr_storage addr = {0}; + int err, fd; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + err = connect(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "make_client", "connect")) { + log_err("failed to connect client socket"); + goto fail; + } + + return fd; +fail: + close(fd); + return -1; +} + +static int send_byte(int fd) +{ + ssize_t n; + + errno = 0; + n = send(fd, "a", 1, 0); + if (CHECK(n <= 0, "send_byte", "send")) { + log_err("failed/partial send"); + return -1; + } + return 0; +} + +static int recv_byte(int fd) +{ + char buf[1]; + ssize_t n; + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv_byte", "recv")) { + log_err("failed/partial recv"); + return -1; + } + return 0; +} + +static int tcp_recv_send(int server_fd) +{ + char buf[1]; + int ret, fd; + ssize_t n; + + fd = accept(server_fd, NULL, NULL); + if (CHECK(fd < 0, "accept", "failed\n")) { + log_err("failed to accept"); + return -1; + } + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv", "failed\n")) { + log_err("failed/partial recv"); + ret = -1; + goto close; + } + + n = send(fd, buf, n, 0); + if (CHECK(n <= 0, "send", "failed\n")) { + log_err("failed/partial send"); + ret = -1; + goto close; + } + + ret = 0; +close: + close(fd); + return ret; +} + +static void v4_to_v6(struct sockaddr_storage *ss) +{ + struct sockaddr_in6 *v6 = (struct sockaddr_in6 *)ss; + struct sockaddr_in v4 = *(struct sockaddr_in *)ss; + + v6->sin6_family = AF_INET6; + v6->sin6_port = v4.sin_port; + v6->sin6_addr.s6_addr[10] = 0xff; + v6->sin6_addr.s6_addr[11] = 0xff; + memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); +} + +static int udp_recv_send(int server_fd) +{ + char cmsg_buf[CMSG_SPACE(sizeof(struct sockaddr_storage))]; + struct sockaddr_storage _src_addr = { 0 }; + struct sockaddr_storage *src_addr = &_src_addr; + struct sockaddr_storage *dst_addr = NULL; + struct msghdr msg = { 0 }; + struct iovec iov = { 0 }; + struct cmsghdr *cm; + char buf[1]; + int ret, fd; + ssize_t n; + + iov.iov_base = buf; + iov.iov_len = sizeof(buf); + + msg.msg_name = src_addr; + msg.msg_namelen = sizeof(*src_addr); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = cmsg_buf; + msg.msg_controllen = sizeof(cmsg_buf); + + errno = 0; + n = recvmsg(server_fd, &msg, 0); + if (CHECK(n <= 0, "recvmsg", "failed\n")) { + log_err("failed to receive"); + return -1; + } + if (CHECK(msg.msg_flags & MSG_CTRUNC, "recvmsg", "truncated cmsg\n")) + return -1; + + for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { + if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_ORIGDSTADDR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_ORIGDSTADDR)) { + dst_addr = (struct sockaddr_storage *)CMSG_DATA(cm); + break; + } + log_err("warning: ignored cmsg at level %d type %d", + cm->cmsg_level, cm->cmsg_type); + } + if (CHECK(!dst_addr, "recvmsg", "missing ORIGDSTADDR\n")) + return -1; + + /* Server socket bound to IPv4-mapped IPv6 address */ + if (src_addr->ss_family == AF_INET6 && + dst_addr->ss_family == AF_INET) { + v4_to_v6(dst_addr); + } + + /* Reply from original destination address. */ + fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to create tx socket"); + return -1; + } + + ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr)); + if (CHECK(ret, "bind", "failed\n")) { + log_err("failed to bind tx socket"); + goto out; + } + + msg.msg_control = NULL; + msg.msg_controllen = 0; + n = sendmsg(fd, &msg, 0); + if (CHECK(n <= 0, "sendmsg", "failed\n")) { + log_err("failed to send echo reply"); + ret = -1; + goto out; + } + + ret = 0; +out: + close(fd); + return ret; +} + +static int tcp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = tcp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static int udp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = udp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static struct bpf_link *attach_lookup_prog(struct bpf_program *prog) +{ + struct bpf_link *link; + int net_fd; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return NULL; + } + + link = bpf_program__attach_netns(prog, net_fd); + if (CHECK(IS_ERR(link), "bpf_program__attach_netns", "failed\n")) { + errno = -PTR_ERR(link); + log_err("failed to attach program '%s' to netns", + bpf_program__name(prog)); + link = NULL; + } + + close(net_fd); + return link; +} + +static int update_lookup_map(struct bpf_map *map, int index, int sock_fd) +{ + int err, map_fd; + uint64_t value; + + map_fd = bpf_map__fd(map); + if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) { + errno = -map_fd; + log_err("failed to get map FD"); + return -1; + } + + value = (uint64_t)sock_fd; + err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) { + log_err("failed to update redir_map @ %d", index); + return -1; + } + + return 0; +} + +static __u32 link_info_prog_id(struct bpf_link *link) +{ + struct bpf_link_info info = {}; + __u32 info_len = sizeof(info); + int link_fd, err; + + link_fd = bpf_link__fd(link); + if (CHECK(link_fd < 0, "bpf_link__fd", "failed\n")) { + errno = -link_fd; + log_err("bpf_link__fd failed"); + return 0; + } + + err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); + if (CHECK(err, "bpf_obj_get_info_by_fd", "failed\n")) { + log_err("bpf_obj_get_info_by_fd"); + return 0; + } + if (CHECK(info_len != sizeof(info), "bpf_obj_get_info_by_fd", + "unexpected info len %u\n", info_len)) + return 0; + + return info.prog_id; +} + +static void query_lookup_prog(struct test_sk_lookup *skel) +{ + struct bpf_link *link[3] = {}; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 prog_id; + int net_fd; + int err; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return; + } + + link[0] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[0]) + goto close; + link[1] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[1]) + goto detach; + link[2] = attach_lookup_prog(skel->progs.lookup_drop); + if (!link[2]) + goto detach; + + err = bpf_prog_query(net_fd, BPF_SK_LOOKUP, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + if (CHECK(err, "bpf_prog_query", "failed\n")) { + log_err("failed to query lookup prog"); + goto detach; + } + + errno = 0; + if (CHECK(attach_flags != 0, "bpf_prog_query", + "wrong attach_flags on query: %u", attach_flags)) + goto detach; + if (CHECK(prog_cnt != 3, "bpf_prog_query", + "wrong program count on query: %u", prog_cnt)) + goto detach; + prog_id = link_info_prog_id(link[0]); + CHECK(prog_ids[0] != prog_id, "bpf_prog_query", + "invalid program #0 id on query: %u != %u\n", + prog_ids[0], prog_id); + prog_id = link_info_prog_id(link[1]); + CHECK(prog_ids[1] != prog_id, "bpf_prog_query", + "invalid program #1 id on query: %u != %u\n", + prog_ids[1], prog_id); + prog_id = link_info_prog_id(link[2]); + CHECK(prog_ids[2] != prog_id, "bpf_prog_query", + "invalid program #2 id on query: %u != %u\n", + prog_ids[2], prog_id); + +detach: + if (link[2]) + bpf_link__destroy(link[2]); + if (link[1]) + bpf_link__destroy(link[1]); + if (link[0]) + bpf_link__destroy(link[0]); +close: + close(net_fd); +} + +static void run_lookup_prog(const struct test *t) +{ + int client_fd, server_fds[MAX_SERVERS] = { -1 }; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(t->sotype, t->listen_at.ip, + t->listen_at.port, + t->reuseport_prog); + if (server_fds[i] < 0) + goto close; + + err = update_lookup_map(t->sock_map, i, server_fds[i]); + if (err) + goto close; + + /* want just one server for non-reuseport test */ + if (!t->reuseport_prog) + break; + } + + client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port); + if (client_fd < 0) + goto close; + + if (t->sotype == SOCK_STREAM) + tcp_echo_test(client_fd, server_fds[t->accept_on]); + else + udp_echo_test(client_fd, server_fds[t->accept_on]); + + close(client_fd); +close: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void test_redirect_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "TCP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "TCP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4_V6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "UDP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .listen_at = { INT_IP4_V6, INT_PORT }, + .connect_to = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 redir and reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + run_lookup_prog(t); + } +} + +static void drop_on_lookup(const struct test *t) +{ + struct sockaddr_storage dst = {}; + int client_fd, server_fd, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server_fd < 0) + goto detach; + + client_fd = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client_fd < 0) + goto close_srv; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client_fd); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client_fd, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client_fd); +close_srv: + close(server_fd); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_lookup(t); + } +} + +static void drop_on_reuseport(const struct test *t) +{ + struct sockaddr_storage dst = { 0 }; + int client, server1, server2, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server1 = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server1 < 0) + goto detach; + + err = update_lookup_map(t->sock_map, SERVER_A, server1); + if (err) + goto detach; + + /* second server on destination address we should never reach */ + server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, + NULL /* reuseport prog */); + if (server2 < 0) + goto close_srv1; + + client = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client < 0) + goto close_srv2; + + err = connect(client, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client); +close_srv2: + close(server2); +close_srv1: + close(server1); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_reuseport(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_reuseport(t); + } +} + +static void run_sk_assign(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog, + const char *listen_ip, const char *connect_ip) +{ + int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 }; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL); + if (server_fds[i] < 0) + goto close_servers; + + err = update_lookup_map(skel->maps.redir_map, i, + server_fds[i]); + if (err) + goto close_servers; + } + + client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT); + if (client_fd < 0) + goto close_servers; + + peer_fd = accept(server_fds[SERVER_B], NULL, NULL); + if (CHECK(peer_fd < 0, "accept", "failed\n")) + goto close_client; + + close(peer_fd); +close_client: + close(client_fd); +close_servers: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void run_sk_assign_v4(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP4, EXT_IP4); +} + +static void run_sk_assign_v6(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP6, EXT_IP6); +} + +static void run_sk_assign_connected(struct test_sk_lookup *skel, + int sotype) +{ + int err, client_fd, connected_fd, server_fd; + struct bpf_link *lookup_link; + + server_fd = make_server(sotype, EXT_IP4, EXT_PORT, NULL); + if (server_fd < 0) + return; + + connected_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (connected_fd < 0) + goto out_close_server; + + /* Put a connected socket in redirect map */ + err = update_lookup_map(skel->maps.redir_map, SERVER_A, connected_fd); + if (err) + goto out_close_connected; + + lookup_link = attach_lookup_prog(skel->progs.sk_assign_esocknosupport); + if (!lookup_link) + goto out_close_connected; + + /* Try to redirect TCP SYN / UDP packet to a connected socket */ + client_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (client_fd < 0) + goto out_unlink_prog; + if (sotype == SOCK_DGRAM) { + send_byte(client_fd); + recv_byte(server_fd); + } + + close(client_fd); +out_unlink_prog: + bpf_link__destroy(lookup_link); +out_close_connected: + close(connected_fd); +out_close_server: + close(server_fd); +} + +static void test_sk_assign_helper(struct test_sk_lookup *skel) +{ + if (test__start_subtest("sk_assign returns EEXIST")) + run_sk_assign_v4(skel, skel->progs.sk_assign_eexist); + if (test__start_subtest("sk_assign honors F_REPLACE")) + run_sk_assign_v4(skel, skel->progs.sk_assign_replace_flag); + if (test__start_subtest("sk_assign accepts NULL socket")) + run_sk_assign_v4(skel, skel->progs.sk_assign_null); + if (test__start_subtest("access ctx->sk")) + run_sk_assign_v4(skel, skel->progs.access_ctx_sk); + if (test__start_subtest("narrow access to ctx v4")) + run_sk_assign_v4(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("narrow access to ctx v6")) + run_sk_assign_v6(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("sk_assign rejects TCP established")) + run_sk_assign_connected(skel, SOCK_STREAM); + if (test__start_subtest("sk_assign rejects UDP connected")) + run_sk_assign_connected(skel, SOCK_DGRAM); +} + +struct test_multi_prog { + const char *desc; + struct bpf_program *prog1; + struct bpf_program *prog2; + struct bpf_map *redir_map; + struct bpf_map *run_map; + int expect_errno; + struct inet_addr listen_at; +}; + +static void run_multi_prog_lookup(const struct test_multi_prog *t) +{ + struct sockaddr_storage dst = {}; + int map_fd, server_fd, client_fd; + struct bpf_link *link1, *link2; + int prog_idx, done, err; + + map_fd = bpf_map__fd(t->run_map); + + done = 0; + prog_idx = PROG1; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + prog_idx = PROG2; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + + link1 = attach_lookup_prog(t->prog1); + if (!link1) + return; + link2 = attach_lookup_prog(t->prog2); + if (!link2) + goto out_unlink1; + + server_fd = make_server(SOCK_STREAM, t->listen_at.ip, + t->listen_at.port, NULL); + if (server_fd < 0) + goto out_unlink2; + + err = update_lookup_map(t->redir_map, SERVER_A, server_fd); + if (err) + goto out_close_server; + + client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst); + if (client_fd < 0) + goto out_close_server; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (CHECK(err && !t->expect_errno, "connect", + "unexpected error %d\n", errno)) + goto out_close_client; + if (CHECK(err && t->expect_errno && errno != t->expect_errno, + "connect", "unexpected error %d\n", errno)) + goto out_close_client; + + done = 0; + prog_idx = PROG1; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG1 !done\n"); + + done = 0; + prog_idx = PROG2; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG2 !done\n"); + +out_close_client: + close(client_fd); +out_close_server: + close(server_fd); +out_unlink2: + bpf_link__destroy(link2); +out_unlink1: + bpf_link__destroy(link1); +} + +static void test_multi_prog_lookup(struct test_sk_lookup *skel) +{ + struct test_multi_prog tests[] = { + { + .desc = "multi prog - pass, pass", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "multi prog - drop, drop", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, drop", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - drop, pass", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, redir", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, pass", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - drop, redir", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, drop", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, redir", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + }; + struct test_multi_prog *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + t->redir_map = skel->maps.redir_map; + t->run_map = skel->maps.run_map; + if (test__start_subtest(t->desc)) + run_multi_prog_lookup(t); + } +} + +static void run_tests(struct test_sk_lookup *skel) +{ + if (test__start_subtest("query lookup prog")) + query_lookup_prog(skel); + test_redirect_lookup(skel); + test_drop_on_lookup(skel); + test_drop_on_reuseport(skel); + test_sk_assign_helper(skel); + test_multi_prog_lookup(skel); +} + +static int switch_netns(void) +{ + static const char * const setup_script[] = { + "ip -6 addr add dev lo " EXT_IP6 "/128 nodad", + "ip -6 addr add dev lo " INT_IP6 "/128 nodad", + "ip link set dev lo up", + NULL, + }; + const char * const *cmd; + int err; + + err = unshare(CLONE_NEWNET); + if (CHECK(err, "unshare", "failed\n")) { + log_err("unshare(CLONE_NEWNET)"); + return -1; + } + + for (cmd = setup_script; *cmd; cmd++) { + err = system(*cmd); + if (CHECK(err, "system", "failed\n")) { + log_err("system(%s)", *cmd); + return -1; + } + } + + return 0; +} + +void test_sk_lookup(void) +{ + struct test_sk_lookup *skel; + int err; + + err = switch_netns(); + if (err) + return; + + skel = test_sk_lookup__open_and_load(); + if (CHECK(!skel, "skel open_and_load", "failed\n")) + return; + + run_tests(skel); + + test_sk_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index fa153cf67b1b..fe87b77af459 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -41,7 +41,7 @@ void test_skeleton(void) CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL); CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL); - CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0); + CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); /* validate we can pre-setup global variables, even in .bss */ @@ -49,7 +49,7 @@ void test_skeleton(void) data->in2 = 11; bss->in3 = 12; bss->in4 = 13; - rodata->in6 = 14; + rodata->in.in6 = 14; err = test_skeleton__load(skel); if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) @@ -60,7 +60,7 @@ void test_skeleton(void) CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12); CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); - CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14); + CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); /* now set new values and attach to get them into outX variables */ data->in1 = 1; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 8547ecbdc61f..ec281b0363b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -193,11 +193,10 @@ static void run_test(int cgroup_fd) if (CHECK_FAIL(server_fd < 0)) goto close_bpf_object; + pthread_mutex_lock(&server_started_mtx); if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, (void *)&server_fd))) goto close_server_fd; - - pthread_mutex_lock(&server_started_mtx); pthread_cond_wait(&server_started, &server_started_mtx); pthread_mutex_unlock(&server_started_mtx); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index 9013a0c01eed..d207e968e6b1 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -118,7 +118,7 @@ static int run_test(int cgroup_fd, int server_fd) goto close_bpf_object; } - client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd); + client_fd = connect_to_fd(server_fd, 0); if (client_fd < 0) { err = -1; goto close_bpf_object; @@ -161,7 +161,7 @@ void test_tcp_rtt(void) if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server(AF_INET, SOCK_STREAM); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c new file mode 100644 index 000000000000..39b0decb1bb2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Oracle and/or its affiliates. */ + +#include <test_progs.h> + +#include "trace_printk.skel.h" + +#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define SEARCHMSG "testing,testing" + +void test_trace_printk(void) +{ + int err, iter = 0, duration = 0, found = 0; + struct trace_printk__bss *bss; + struct trace_printk *skel; + char *buf = NULL; + FILE *fp = NULL; + size_t buflen; + + skel = trace_printk__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + err = trace_printk__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + bss = skel->bss; + + err = trace_printk__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + fp = fopen(TRACEBUF, "r"); + if (CHECK(fp == NULL, "could not open trace buffer", + "error %d opening %s", errno, TRACEBUF)) + goto cleanup; + + /* We do not want to wait forever if this test fails... */ + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); + + /* wait for tracepoint to trigger */ + usleep(1); + trace_printk__detach(skel); + + if (CHECK(bss->trace_printk_ran == 0, + "bpf_trace_printk never ran", + "ran == %d", bss->trace_printk_ran)) + goto cleanup; + + if (CHECK(bss->trace_printk_ret <= 0, + "bpf_trace_printk returned <= 0 value", + "got %d", bss->trace_printk_ret)) + goto cleanup; + + /* verify our search string is in the trace buffer */ + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { + if (strstr(buf, SEARCHMSG) != NULL) + found++; + if (found == bss->trace_printk_ran) + break; + if (++iter > 1000) + break; + } + + if (CHECK(!found, "message from bpf_trace_printk not found", + "no instance of %s in %s", SEARCHMSG, TRACEBUF)) + goto cleanup; + +cleanup: + trace_printk__destroy(skel); + free(buf); + if (fp) + fclose(fp); +} diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c new file mode 100644 index 000000000000..2aba09d4d01b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "udp_limit.skel.h" + +#include <sys/types.h> +#include <sys/socket.h> + +static int duration; + +void test_udp_limit(void) +{ + struct udp_limit *skel; + int fd1 = -1, fd2 = -1; + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/udp_limit"); + if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) + return; + + skel = udp_limit__open_and_load(); + if (CHECK(!skel, "skel-load", "errno %d", errno)) + goto close_cgroup_fd; + + skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd); + skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd); + if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release), + "cg-attach", "sock %ld sock_release %ld", + PTR_ERR(skel->links.sock), + PTR_ERR(skel->links.sock_release))) + goto close_skeleton; + + /* BPF program enforces a single UDP socket per cgroup, + * verify that. + */ + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd1 < 0, "fd1", "errno %d", errno)) + goto close_skeleton; + + fd2 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd2 >= 0, "fd2", "errno %d", errno)) + goto close_skeleton; + + /* We can reopen again after close. */ + close(fd1); + fd1 = -1; + + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno)) + goto close_skeleton; + + /* Make sure the program was invoked the expected + * number of times: + * - open fd1 - BPF_CGROUP_INET_SOCK_CREATE + * - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE + * - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE + * - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE + */ + if (CHECK(skel->bss->invocations != 4, "bss-invocations", + "invocations=%d", skel->bss->invocations)) + goto close_skeleton; + + /* We should still have a single socket in use */ + if (CHECK(skel->bss->in_use != 1, "bss-in_use", + "in_use=%d", skel->bss->in_use)) + goto close_skeleton; + +close_skeleton: + if (fd1 >= 0) + close(fd1); + if (fd2 >= 0) + close(fd2); + udp_limit__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c new file mode 100644 index 000000000000..c75525eab02c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/varlen.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <time.h> +#include "test_varlen.skel.h" + +#define CHECK_VAL(got, exp) \ + CHECK((got) != (exp), "check", "got %ld != exp %ld\n", \ + (long)(got), (long)(exp)) + +void test_varlen(void) +{ + int duration = 0, err; + struct test_varlen* skel; + struct test_varlen__bss *bss; + struct test_varlen__data *data; + const char str1[] = "Hello, "; + const char str2[] = "World!"; + const char exp_str[] = "Hello, \0World!\0"; + const int size1 = sizeof(str1); + const int size2 = sizeof(str2); + + skel = test_varlen__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + data = skel->data; + + err = test_varlen__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + bss->test_pid = getpid(); + + /* trigger everything */ + memcpy(bss->buf_in1, str1, size1); + memcpy(bss->buf_in2, str2, size2); + bss->capture = true; + usleep(1); + bss->capture = false; + + CHECK_VAL(bss->payload1_len1, size1); + CHECK_VAL(bss->payload1_len2, size2); + CHECK_VAL(bss->total1, size1 + size2); + CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check", + "doesn't match!"); + + CHECK_VAL(data->payload2_len1, size1); + CHECK_VAL(data->payload2_len2, size2); + CHECK_VAL(data->total2, size1 + size2); + CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", + "doesn't match!"); + + CHECK_VAL(data->payload3_len1, size1); + CHECK_VAL(data->payload3_len2, size2); + CHECK_VAL(data->total3, size1 + size2); + CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", + "doesn't match!"); + + CHECK_VAL(data->payload4_len1, size1); + CHECK_VAL(data->payload4_len2, size2); + CHECK_VAL(data->total4, size1 + size2); + CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check", + "doesn't match!"); +cleanup: + test_varlen__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c new file mode 100644 index 000000000000..0176573fe4e7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <uapi/linux/bpf.h> +#include <linux/if_link.h> +#include <test_progs.h> + +#include "test_xdp_with_cpumap_helpers.skel.h" + +#define IFINDEX_LO 1 + +void test_xdp_with_cpumap_helpers(void) +{ + struct test_xdp_with_cpumap_helpers *skel; + struct bpf_prog_info info = {}; + struct bpf_cpumap_val val = { + .qsize = 192, + }; + __u32 duration = 0, idx = 0; + __u32 len = sizeof(info); + int err, prog_fd, map_fd; + + skel = test_xdp_with_cpumap_helpers__open_and_load(); + if (CHECK_FAIL(!skel)) { + perror("test_xdp_with_cpumap_helpers__open_and_load"); + return; + } + + /* can not attach program with cpumaps that allow programs + * as xdp generic + */ + prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog); + err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + CHECK(err == 0, "Generic attach of program with 8-byte CPUMAP", + "should have failed\n"); + + prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); + map_fd = bpf_map__fd(skel->maps.cpu_map); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); + if (CHECK_FAIL(err)) + goto out_close; + + val.bpf_prog.fd = prog_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + CHECK(err, "Add program to cpumap entry", "err %d errno %d\n", + err, errno); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + CHECK(err, "Read cpumap entry", "err %d errno %d\n", err, errno); + CHECK(info.id != val.bpf_prog.id, "Expected program id in cpumap entry", + "expected %u read %u\n", info.id, val.bpf_prog.id); + + /* can not attach BPF_XDP_CPUMAP program to a device */ + err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + CHECK(err == 0, "Attach of BPF_XDP_CPUMAP program", + "should have failed\n"); + + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + CHECK(err == 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry", + "should have failed\n"); + +out_close: + test_xdp_with_cpumap_helpers__destroy(skel); +} + +void test_xdp_cpumap_attach(void) +{ + if (test__start_subtest("cpumap_with_progs")) + test_xdp_with_cpumap_helpers(); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h new file mode 100644 index 000000000000..17db3bac518b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Facebook */ +/* "undefine" structs in vmlinux.h, because we "override" them below */ +#define bpf_iter_meta bpf_iter_meta___not_used +#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used +#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used +#define bpf_iter__netlink bpf_iter__netlink___not_used +#define bpf_iter__task bpf_iter__task___not_used +#define bpf_iter__task_file bpf_iter__task_file___not_used +#define bpf_iter__tcp bpf_iter__tcp___not_used +#define tcp6_sock tcp6_sock___not_used +#define bpf_iter__udp bpf_iter__udp___not_used +#define udp6_sock udp6_sock___not_used +#include "vmlinux.h" +#undef bpf_iter_meta +#undef bpf_iter__bpf_map +#undef bpf_iter__ipv6_route +#undef bpf_iter__netlink +#undef bpf_iter__task +#undef bpf_iter__task_file +#undef bpf_iter__tcp +#undef tcp6_sock +#undef bpf_iter__udp +#undef udp6_sock + +struct bpf_iter_meta { + struct seq_file *seq; + __u64 session_id; + __u64 seq_num; +} __attribute__((preserve_access_index)); + +struct bpf_iter__ipv6_route { + struct bpf_iter_meta *meta; + struct fib6_info *rt; +} __attribute__((preserve_access_index)); + +struct bpf_iter__netlink { + struct bpf_iter_meta *meta; + struct netlink_sock *sk; +} __attribute__((preserve_access_index)); + +struct bpf_iter__task { + struct bpf_iter_meta *meta; + struct task_struct *task; +} __attribute__((preserve_access_index)); + +struct bpf_iter__task_file { + struct bpf_iter_meta *meta; + struct task_struct *task; + __u32 fd; + struct file *file; +} __attribute__((preserve_access_index)); + +struct bpf_iter__bpf_map { + struct bpf_iter_meta *meta; + struct bpf_map *map; +} __attribute__((preserve_access_index)); + +struct bpf_iter__tcp { + struct bpf_iter_meta *meta; + struct sock_common *sk_common; + uid_t uid; +} __attribute__((preserve_access_index)); + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; +} __attribute__((preserve_access_index)); + +struct bpf_iter__udp { + struct bpf_iter_meta *meta; + struct udp_sock *udp_sk; + uid_t uid __attribute__((aligned(8))); + int bucket __attribute__((aligned(8))); +} __attribute__((preserve_access_index)); + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c index b57bd6fef208..08651b23edba 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c @@ -1,27 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__bpf_map +#include "bpf_iter.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; -} __attribute__((preserve_access_index)); - SEC("iter/bpf_map") int dump_bpf_map(struct bpf_iter__bpf_map *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c index c8e9ca74c87b..d58d9f1642b5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c @@ -1,35 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__ipv6_route +#include "bpf_iter.h" +#include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__ipv6_route { - struct bpf_iter_meta *meta; - struct fib6_info *rt; -} __attribute__((preserve_access_index)); - char _license[] SEC("license") = "GPL"; extern bool CONFIG_IPV6_SUBTREES __kconfig __weak; -#define RTF_GATEWAY 0x0002 -#define IFNAMSIZ 16 -#define fib_nh_gw_family nh_common.nhc_gw_family -#define fib_nh_gw6 nh_common.nhc_gw.ipv6 -#define fib_nh_dev nh_common.nhc_dev - SEC("iter/ipv6_route") int dump_ipv6_route(struct bpf_iter__ipv6_route *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index 75ecf956a2df..95989f4c99b5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -1,30 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__netlink bpf_iter__netlink___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__netlink +#include "bpf_iter.h" +#include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; -#define sk_rmem_alloc sk_backlog.rmem_alloc -#define sk_refcnt __sk_common.skc_refcnt - -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__netlink { - struct bpf_iter_meta *meta; - struct netlink_sock *sk; -} __attribute__((preserve_access_index)); - static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket) { return &container_of(socket, struct socket_alloc, socket)->vfs_inode; @@ -54,10 +36,10 @@ int dump_netlink(struct bpf_iter__netlink *ctx) if (!nlk->groups) { group = 0; } else { - /* FIXME: temporary use bpf_probe_read here, needs + /* FIXME: temporary use bpf_probe_read_kernel here, needs * verifier support to do direct access. */ - bpf_probe_read(&group, sizeof(group), &nlk->groups[0]); + bpf_probe_read_kernel(&group, sizeof(group), &nlk->groups[0]); } BPF_SEQ_PRINTF(seq, "%-10u %08x %-8d %-8d %-5d %-8d ", nlk->portid, (u32)group, @@ -74,7 +56,7 @@ int dump_netlink(struct bpf_iter__netlink *ctx) * with current verifier. */ inode = SOCK_INODE(sk); - bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); } BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c index ee754021f98e..4983087852a0 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c @@ -1,27 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task bpf_iter__task___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task +#include "bpf_iter.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c index 0f0ec3db20ba..8b787baa2654 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c @@ -1,29 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task_file bpf_iter__task_file___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task_file +#include "bpf_iter.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task_file { - struct bpf_iter_meta *meta; - struct task_struct *task; - __u32 fd; - struct file *file; -} __attribute__((preserve_access_index)); - SEC("iter/task_file") int dump_task_file(struct bpf_iter__task_file *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c new file mode 100644 index 000000000000..50e59a2e142e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#define MAX_STACK_TRACE_DEPTH 64 +unsigned long entries[MAX_STACK_TRACE_DEPTH] = {}; +#define SIZE_OF_ULONG (sizeof(unsigned long)) + +SEC("iter/task") +int dump_task_stack(struct bpf_iter__task *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + long i, retlen; + + if (task == (void *)0) + return 0; + + retlen = bpf_get_task_stack(task, entries, + MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0); + if (retlen < 0) + return 0; + + BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid, + retlen / SIZE_OF_ULONG); + for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) { + if (retlen > i * SIZE_OF_ULONG) + BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]); + } + BPF_SEQ_PRINTF(seq, "\n"); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c new file mode 100644 index 000000000000..54380c5e1069 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +static int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !(h->pprev); +} + +static int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern unsigned CONFIG_HZ __kconfig; + +#define USER_HZ 100 +#define NSEC_PER_SEC 1000000000ULL +static clock_t jiffies_to_clock_t(unsigned long x) +{ + /* The implementation here tailored to a particular + * setting of USER_HZ. + */ + u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ; + u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ; + + if ((tick_nsec % user_hz_nsec) == 0) { + if (CONFIG_HZ < USER_HZ) + return x * (USER_HZ / CONFIG_HZ); + else + return x / (CONFIG_HZ / USER_HZ); + } + return x * tick_nsec/user_hz_nsec; +} + +static clock_t jiffies_delta_to_clock_t(long delta) +{ + if (delta <= 0) + return 0; + + return jiffies_to_clock_t(delta); +} + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +static bool +inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk) +{ + return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; +} + +static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) +{ + return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, + uid_t uid, __u32 seq_num) +{ + const struct inet_connection_sock *icsk; + const struct fastopen_queue *fastopenq; + const struct inet_sock *inet; + unsigned long timer_expires; + const struct sock *sp; + __u16 destp, srcp; + __be32 dest, src; + int timer_active; + int rx_queue; + int state; + + icsk = &tp->inet_conn; + inet = &icsk->icsk_inet; + sp = &inet->sk; + fastopenq = &icsk->icsk_accept_queue.fastopenq; + + dest = inet->inet_daddr; + src = inet->inet_rcv_saddr; + destp = bpf_ntohs(inet->inet_dport); + srcp = bpf_ntohs(inet->inet_sport); + + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + timer_active = 1; + timer_expires = icsk->icsk_timeout; + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + timer_active = 4; + timer_expires = icsk->icsk_timeout; + } else if (timer_pending(&sp->sk_timer)) { + timer_active = 2; + timer_expires = sp->sk_timer.expires; + } else { + timer_active = 0; + timer_expires = bpf_jiffies64(); + } + + state = sp->sk_state; + if (state == TCP_LISTEN) { + rx_queue = sp->sk_ack_backlog; + } else { + rx_queue = tp->rcv_nxt - tp->copied_seq; + if (rx_queue < 0) + rx_queue = 0; + } + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, src, srcp, destp, destp); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", + state, + tp->write_seq - tp->snd_una, rx_queue, + timer_active, + jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()), + icsk->icsk_retransmits, uid, + icsk->icsk_probes_out, + sock_i_ino(sp), + sp->sk_refcnt.refs.counter); + BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n", + tp, + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk), + tp->snd_cwnd, + state == TCP_LISTEN ? fastopenq->max_qlen + : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) + ); + + return 0; +} + +static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw, + uid_t uid, __u32 seq_num) +{ + struct inet_timewait_sock *tw = &ttw->tw_sk; + __u16 destp, srcp; + __be32 dest, src; + long delta; + + delta = tw->tw_timer.expires - bpf_jiffies64(); + dest = tw->tw_daddr; + src = tw->tw_rcv_saddr; + destp = bpf_ntohs(tw->tw_dport); + srcp = bpf_ntohs(tw->tw_sport); + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, src, srcp, dest, destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + tw->tw_substate, 0, 0, + 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, + tw->tw_refcnt.refs.counter, tw); + + return 0; +} + +static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq, + uid_t uid, __u32 seq_num) +{ + struct inet_request_sock *irsk = &treq->req; + struct request_sock *req = &irsk->req; + long ttd; + + ttd = req->rsk_timer.expires - bpf_jiffies64(); + + if (ttd < 0) + ttd = 0; + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, irsk->ir_loc_addr, + irsk->ir_num, irsk->ir_rmt_addr, + bpf_ntohs(irsk->ir_rmt_port)); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd), + req->num_timeout, uid, 0, 0, 0, req); + + return 0; +} + +SEC("iter/tcp") +int dump_tcp4(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + struct seq_file *seq = ctx->meta->seq; + struct tcp_timewait_sock *tw; + struct tcp_request_sock *req; + struct tcp_sock *tp; + uid_t uid = ctx->uid; + __u32 seq_num; + + if (sk_common == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " sl " + "local_address " + "rem_address " + "st tx_queue rx_queue tr tm->when retrnsmt" + " uid timeout inode\n"); + + if (sk_common->skc_family != AF_INET) + return 0; + + tp = bpf_skc_to_tcp_sock(sk_common); + if (tp) + return dump_tcp_sock(seq, tp, uid, seq_num); + + tw = bpf_skc_to_tcp_timewait_sock(sk_common); + if (tw) + return dump_tw_sock(seq, tw, uid, seq_num); + + req = bpf_skc_to_tcp_request_sock(sk_common); + if (req) + return dump_req_sock(seq, req, uid, seq_num); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c new file mode 100644 index 000000000000..b4fbddfa4e10 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +static int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !(h->pprev); +} + +static int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern unsigned CONFIG_HZ __kconfig; + +#define USER_HZ 100 +#define NSEC_PER_SEC 1000000000ULL +static clock_t jiffies_to_clock_t(unsigned long x) +{ + /* The implementation here tailored to a particular + * setting of USER_HZ. + */ + u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ; + u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ; + + if ((tick_nsec % user_hz_nsec) == 0) { + if (CONFIG_HZ < USER_HZ) + return x * (USER_HZ / CONFIG_HZ); + else + return x / (CONFIG_HZ / USER_HZ); + } + return x * tick_nsec/user_hz_nsec; +} + +static clock_t jiffies_delta_to_clock_t(long delta) +{ + if (delta <= 0) + return 0; + + return jiffies_to_clock_t(delta); +} + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +static bool +inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk) +{ + return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; +} + +static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) +{ + return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp, + uid_t uid, __u32 seq_num) +{ + const struct inet_connection_sock *icsk; + const struct fastopen_queue *fastopenq; + const struct in6_addr *dest, *src; + const struct inet_sock *inet; + unsigned long timer_expires; + const struct sock *sp; + __u16 destp, srcp; + int timer_active; + int rx_queue; + int state; + + icsk = &tp->tcp.inet_conn; + inet = &icsk->icsk_inet; + sp = &inet->sk; + fastopenq = &icsk->icsk_accept_queue.fastopenq; + + dest = &sp->sk_v6_daddr; + src = &sp->sk_v6_rcv_saddr; + destp = bpf_ntohs(inet->inet_dport); + srcp = bpf_ntohs(inet->inet_sport); + + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + timer_active = 1; + timer_expires = icsk->icsk_timeout; + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + timer_active = 4; + timer_expires = icsk->icsk_timeout; + } else if (timer_pending(&sp->sk_timer)) { + timer_active = 2; + timer_expires = sp->sk_timer.expires; + } else { + timer_active = 0; + timer_expires = bpf_jiffies64(); + } + + state = sp->sk_state; + if (state == TCP_LISTEN) { + rx_queue = sp->sk_ack_backlog; + } else { + rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; + if (rx_queue < 0) + rx_queue = 0; + } + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", + state, + tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, + timer_active, + jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()), + icsk->icsk_retransmits, uid, + icsk->icsk_probes_out, + sock_i_ino(sp), + sp->sk_refcnt.refs.counter); + BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n", + tp, + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk), + tp->tcp.snd_cwnd, + state == TCP_LISTEN ? fastopenq->max_qlen + : (tcp_in_initial_slowstart(&tp->tcp) ? -1 + : tp->tcp.snd_ssthresh) + ); + + return 0; +} + +static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw, + uid_t uid, __u32 seq_num) +{ + struct inet_timewait_sock *tw = &ttw->tw_sk; + const struct in6_addr *dest, *src; + __u16 destp, srcp; + long delta; + + delta = tw->tw_timer.expires - bpf_jiffies64(); + dest = &tw->tw_v6_daddr; + src = &tw->tw_v6_rcv_saddr; + destp = bpf_ntohs(tw->tw_dport); + srcp = bpf_ntohs(tw->tw_sport); + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + tw->tw_substate, 0, 0, + 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, + tw->tw_refcnt.refs.counter, tw); + + return 0; +} + +static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq, + uid_t uid, __u32 seq_num) +{ + struct inet_request_sock *irsk = &treq->req; + struct request_sock *req = &irsk->req; + struct in6_addr *src, *dest; + long ttd; + + ttd = req->rsk_timer.expires - bpf_jiffies64(); + src = &irsk->ir_v6_loc_addr; + dest = &irsk->ir_v6_rmt_addr; + + if (ttd < 0) + ttd = 0; + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], + irsk->ir_num, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], + bpf_ntohs(irsk->ir_rmt_port)); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd), + req->num_timeout, uid, 0, 0, 0, req); + + return 0; +} + +SEC("iter/tcp") +int dump_tcp6(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + struct seq_file *seq = ctx->meta->seq; + struct tcp_timewait_sock *tw; + struct tcp_request_sock *req; + struct tcp6_sock *tp; + uid_t uid = ctx->uid; + __u32 seq_num; + + if (sk_common == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " sl " + "local_address " + "remote_address " + "st tx_queue rx_queue tr tm->when retrnsmt" + " uid timeout inode\n"); + + if (sk_common->skc_family != AF_INET6) + return 0; + + tp = bpf_skc_to_tcp6_sock(sk_common); + if (tp) + return dump_tcp6_sock(seq, tp, uid, seq_num); + + tw = bpf_skc_to_tcp_timewait_sock(sk_common); + if (tw) + return dump_tw_sock(seq, tw, uid, seq_num); + + req = bpf_skc_to_tcp_request_sock(sk_common); + if (req) + return dump_req_sock(seq, req, uid, seq_num); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c index 13c2c90c835f..2a4647f20c46 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c @@ -1,25 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task bpf_iter__task___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task +#include "bpf_iter.h" #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c index 0aa71b333cf3..ee49493dc125 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c @@ -1,25 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__bpf_map +#include "bpf_iter.h" #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; -} __attribute__((preserve_access_index)); - __u32 map1_id = 0, map2_id = 0; __u32 map1_accessed = 0, map2_accessed = 0; __u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h index dee1339e6905..d5e3df66ad9a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h @@ -1,27 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task bpf_iter__task___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task +#include "bpf_iter.h" #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; int count = 0; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c new file mode 100644 index 000000000000..f258583afbbd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +SEC("iter/udp") +int dump_udp4(struct bpf_iter__udp *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct udp_sock *udp_sk = ctx->udp_sk; + struct inet_sock *inet; + __u16 srcp, destp; + __be32 dest, src; + __u32 seq_num; + int rqueue; + + if (udp_sk == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, + " sl local_address rem_address st tx_queue " + "rx_queue tr tm->when retrnsmt uid timeout " + "inode ref pointer drops\n"); + + /* filter out udp6 sockets */ + inet = &udp_sk->inet; + if (inet->sk.sk_family == AF_INET6) + return 0; + + inet = &udp_sk->inet; + dest = inet->inet_daddr; + src = inet->inet_rcv_saddr; + srcp = bpf_ntohs(inet->inet_sport); + destp = bpf_ntohs(inet->inet_dport); + rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; + + BPF_SEQ_PRINTF(seq, "%5d: %08X:%04X %08X:%04X ", + ctx->bucket, src, srcp, dest, destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", + inet->sk.sk_state, + inet->sk.sk_wmem_alloc.refs.counter - 1, + rqueue, + 0, 0L, 0, ctx->uid, 0, + sock_i_ino(&inet->sk), + inet->sk.sk_refcnt.refs.counter, udp_sk, + inet->sk.sk_drops.counter); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c new file mode 100644 index 000000000000..65f93bb03f0f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_endian.h> + +char _license[] SEC("license") = "GPL"; + +#define IPV6_SEQ_DGRAM_HEADER \ + " sl " \ + "local_address " \ + "remote_address " \ + "st tx_queue rx_queue tr tm->when retrnsmt" \ + " uid timeout inode ref pointer drops\n" + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +SEC("iter/udp") +int dump_udp6(struct bpf_iter__udp *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct udp_sock *udp_sk = ctx->udp_sk; + const struct in6_addr *dest, *src; + struct udp6_sock *udp6_sk; + struct inet_sock *inet; + __u16 srcp, destp; + __u32 seq_num; + int rqueue; + + if (udp_sk == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, IPV6_SEQ_DGRAM_HEADER); + + udp6_sk = bpf_skc_to_udp6_sock(udp_sk); + if (udp6_sk == (void *)0) + return 0; + + inet = &udp_sk->inet; + srcp = bpf_ntohs(inet->inet_sport); + destp = bpf_ntohs(inet->inet_dport); + rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; + dest = &inet->sk.sk_v6_daddr; + src = &inet->sk.sk_v6_rcv_saddr; + + BPF_SEQ_PRINTF(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + ctx->bucket, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", + inet->sk.sk_state, + inet->sk.sk_wmem_alloc.refs.counter - 1, + rqueue, + 0, 0L, 0, ctx->uid, 0, + sock_i_ino(&inet->sk), + inet->sk.sk_refcnt.refs.counter, udp_sk, + inet->sk.sk_drops.counter); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h new file mode 100644 index 000000000000..01378911252b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_TRACING_NET_H__ +#define __BPF_TRACING_NET_H__ + +#define AF_INET 2 +#define AF_INET6 10 + +#define ICSK_TIME_RETRANS 1 +#define ICSK_TIME_PROBE0 3 +#define ICSK_TIME_LOSS_PROBE 5 +#define ICSK_TIME_REO_TIMEOUT 6 + +#define IFNAMSIZ 16 + +#define RTF_GATEWAY 0x0002 + +#define TCP_INFINITE_SSTHRESH 0x7fffffff +#define TCP_PINGPONG_THRESH 3 + +#define fib_nh_dev nh_common.nhc_dev +#define fib_nh_gw_family nh_common.nhc_gw_family +#define fib_nh_gw6 nh_common.nhc_gw.ipv6 + +#define inet_daddr sk.__sk_common.skc_daddr +#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr +#define inet_dport sk.__sk_common.skc_dport + +#define ir_loc_addr req.__req_common.skc_rcv_saddr +#define ir_num req.__req_common.skc_num +#define ir_rmt_addr req.__req_common.skc_daddr +#define ir_rmt_port req.__req_common.skc_dport +#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr +#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr + +#define sk_family __sk_common.skc_family +#define sk_rmem_alloc sk_backlog.rmem_alloc +#define sk_refcnt __sk_common.skc_refcnt +#define sk_state __sk_common.skc_state +#define sk_v6_daddr __sk_common.skc_v6_daddr +#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr + +#define s6_addr32 in6_u.u6_addr32 + +#define tw_daddr __tw_common.skc_daddr +#define tw_rcv_saddr __tw_common.skc_rcv_saddr +#define tw_dport __tw_common.skc_dport +#define tw_refcnt __tw_common.skc_refcnt +#define tw_v6_daddr __tw_common.skc_v6_daddr +#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr + +#endif diff --git a/tools/testing/selftests/bpf/progs/btf_data.c b/tools/testing/selftests/bpf/progs/btf_data.c new file mode 100644 index 000000000000..baa525275bde --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_data.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +struct S { + int a; + int b; + int c; +}; + +union U { + int a; + int b; + int c; +}; + +struct S1 { + int a; + int b; + int c; +}; + +union U1 { + int a; + int b; + int c; +}; + +typedef int T; +typedef int S; +typedef int U; +typedef int T1; +typedef int S1; +typedef int U1; + +struct root_struct { + S m_1; + T m_2; + U m_3; + S1 m_4; + T1 m_5; + U1 m_6; + struct S m_7; + struct S1 m_8; + union U m_9; + union U1 m_10; +}; + +int func(struct root_struct *root) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index 1ab2c5eba86c..b1b2773c0b9d 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -104,6 +104,30 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx) return 0; } +static __inline int set_keepalive(struct bpf_sock_addr *ctx) +{ + int zero = 0, one = 1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one))) + return 1; + if (ctx->type == SOCK_STREAM) { + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one))) + return 1; + } + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero))) + return 1; + + return 0; +} + SEC("cgroup/connect4") int connect_v4_prog(struct bpf_sock_addr *ctx) { @@ -121,6 +145,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx) if (bind_to_device(ctx)) return 0; + if (set_keepalive(ctx)) + return 0; + if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM) return 0; else if (ctx->type == SOCK_STREAM) diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c new file mode 100644 index 000000000000..473665cac67e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +#define LOOP_BOUND 0xf +#define MAX_ENTRIES 8 +#define HALF_ENTRIES (MAX_ENTRIES >> 1) + +_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND"); + +enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC; +__u32 g_line = 0; + +#define VERIFY_TYPE(type, func) ({ \ + g_map_type = type; \ + if (!func()) \ + return 0; \ +}) + + +#define VERIFY(expr) ({ \ + g_line = __LINE__; \ + if (!(expr)) \ + return 0; \ +}) + +struct bpf_map_memory { + __u32 pages; +} __attribute__((preserve_access_index)); + +struct bpf_map { + enum bpf_map_type map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 id; + struct bpf_map_memory memory; +} __attribute__((preserve_access_index)); + +static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size, + __u32 value_size, __u32 max_entries) +{ + VERIFY(map->map_type == g_map_type); + VERIFY(map->key_size == key_size); + VERIFY(map->value_size == value_size); + VERIFY(map->max_entries == max_entries); + VERIFY(map->id > 0); + VERIFY(map->memory.pages > 0); + + return 1; +} + +static inline int check_bpf_map_ptr(struct bpf_map *indirect, + struct bpf_map *direct) +{ + VERIFY(indirect->map_type == direct->map_type); + VERIFY(indirect->key_size == direct->key_size); + VERIFY(indirect->value_size == direct->value_size); + VERIFY(indirect->max_entries == direct->max_entries); + VERIFY(indirect->id == direct->id); + VERIFY(indirect->memory.pages == direct->memory.pages); + + return 1; +} + +static inline int check(struct bpf_map *indirect, struct bpf_map *direct, + __u32 key_size, __u32 value_size, __u32 max_entries) +{ + VERIFY(check_bpf_map_ptr(indirect, direct)); + VERIFY(check_bpf_map_fields(indirect, key_size, value_size, + max_entries)); + return 1; +} + +static inline int check_default(struct bpf_map *indirect, + struct bpf_map *direct) +{ + VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32), + MAX_ENTRIES)); + return 1; +} + +typedef struct { + int counter; +} atomic_t; + +struct bpf_htab { + struct bpf_map map; + atomic_t count; + __u32 n_buckets; + __u32 elem_size; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */ + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_hash SEC(".maps"); + +static inline int check_hash(void) +{ + struct bpf_htab *hash = (struct bpf_htab *)&m_hash; + struct bpf_map *map = (struct bpf_map *)&m_hash; + int i; + + VERIFY(check_default(&hash->map, map)); + + VERIFY(hash->n_buckets == MAX_ENTRIES); + VERIFY(hash->elem_size == 64); + + VERIFY(hash->count.counter == 0); + for (i = 0; i < HALF_ENTRIES; ++i) { + const __u32 key = i; + const __u32 val = 1; + + if (bpf_map_update_elem(hash, &key, &val, 0)) + return 0; + } + VERIFY(hash->count.counter == HALF_ENTRIES); + + return 1; +} + +struct bpf_array { + struct bpf_map map; + __u32 elem_size; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_array SEC(".maps"); + +static inline int check_array(void) +{ + struct bpf_array *array = (struct bpf_array *)&m_array; + struct bpf_map *map = (struct bpf_map *)&m_array; + int i, n_lookups = 0, n_keys = 0; + + VERIFY(check_default(&array->map, map)); + + VERIFY(array->elem_size == 8); + + for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) { + const __u32 key = i; + __u32 *val = bpf_map_lookup_elem(array, &key); + + ++n_lookups; + if (val) + ++n_keys; + } + + VERIFY(n_lookups == MAX_ENTRIES); + VERIFY(n_keys == MAX_ENTRIES); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_prog_array SEC(".maps"); + +static inline int check_prog_array(void) +{ + struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array; + struct bpf_map *map = (struct bpf_map *)&m_prog_array; + + VERIFY(check_default(&prog_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_perf_event_array SEC(".maps"); + +static inline int check_perf_event_array(void) +{ + struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array; + struct bpf_map *map = (struct bpf_map *)&m_perf_event_array; + + VERIFY(check_default(&perf_event_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_percpu_hash SEC(".maps"); + +static inline int check_percpu_hash(void) +{ + struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash; + struct bpf_map *map = (struct bpf_map *)&m_percpu_hash; + + VERIFY(check_default(&percpu_hash->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_percpu_array SEC(".maps"); + +static inline int check_percpu_array(void) +{ + struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array; + struct bpf_map *map = (struct bpf_map *)&m_percpu_array; + + VERIFY(check_default(&percpu_array->map, map)); + + return 1; +} + +struct bpf_stack_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u64); +} m_stack_trace SEC(".maps"); + +static inline int check_stack_trace(void) +{ + struct bpf_stack_map *stack_trace = + (struct bpf_stack_map *)&m_stack_trace; + struct bpf_map *map = (struct bpf_map *)&m_stack_trace; + + VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64), + MAX_ENTRIES)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_cgroup_array SEC(".maps"); + +static inline int check_cgroup_array(void) +{ + struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array; + struct bpf_map *map = (struct bpf_map *)&m_cgroup_array; + + VERIFY(check_default(&cgroup_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_lru_hash SEC(".maps"); + +static inline int check_lru_hash(void) +{ + struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash; + struct bpf_map *map = (struct bpf_map *)&m_lru_hash; + + VERIFY(check_default(&lru_hash->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_lru_percpu_hash SEC(".maps"); + +static inline int check_lru_percpu_hash(void) +{ + struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash; + struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash; + + VERIFY(check_default(&lru_percpu_hash->map, map)); + + return 1; +} + +struct lpm_trie { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct lpm_key { + struct bpf_lpm_trie_key trie_key; + __u32 data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, MAX_ENTRIES); + __type(key, struct lpm_key); + __type(value, __u32); +} m_lpm_trie SEC(".maps"); + +static inline int check_lpm_trie(void) +{ + struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie; + struct bpf_map *map = (struct bpf_map *)&m_lpm_trie; + + VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32), + MAX_ENTRIES)); + + return 1; +} + +struct inner_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); + }); +} m_array_of_maps SEC(".maps") = { + .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 }, +}; + +static inline int check_array_of_maps(void) +{ + struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps; + struct bpf_map *map = (struct bpf_map *)&m_array_of_maps; + + VERIFY(check_default(&array_of_maps->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); + __array(values, struct inner_map); +} m_hash_of_maps SEC(".maps") = { + .values = { + [2] = &inner_map, + }, +}; + +static inline int check_hash_of_maps(void) +{ + struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps; + struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps; + + VERIFY(check_default(&hash_of_maps->map, map)); + + return 1; +} + +struct bpf_dtab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_devmap SEC(".maps"); + +static inline int check_devmap(void) +{ + struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap; + struct bpf_map *map = (struct bpf_map *)&m_devmap; + + VERIFY(check_default(&devmap->map, map)); + + return 1; +} + +struct bpf_stab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_sockmap SEC(".maps"); + +static inline int check_sockmap(void) +{ + struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap; + struct bpf_map *map = (struct bpf_map *)&m_sockmap; + + VERIFY(check_default(&sockmap->map, map)); + + return 1; +} + +struct bpf_cpu_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_cpumap SEC(".maps"); + +static inline int check_cpumap(void) +{ + struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap; + struct bpf_map *map = (struct bpf_map *)&m_cpumap; + + VERIFY(check_default(&cpumap->map, map)); + + return 1; +} + +struct xsk_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_xskmap SEC(".maps"); + +static inline int check_xskmap(void) +{ + struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap; + struct bpf_map *map = (struct bpf_map *)&m_xskmap; + + VERIFY(check_default(&xskmap->map, map)); + + return 1; +} + +struct bpf_shtab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_sockhash SEC(".maps"); + +static inline int check_sockhash(void) +{ + struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash; + struct bpf_map *map = (struct bpf_map *)&m_sockhash; + + VERIFY(check_default(&sockhash->map, map)); + + return 1; +} + +struct bpf_cgroup_storage_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} m_cgroup_storage SEC(".maps"); + +static inline int check_cgroup_storage(void) +{ + struct bpf_cgroup_storage_map *cgroup_storage = + (struct bpf_cgroup_storage_map *)&m_cgroup_storage; + struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage; + + VERIFY(check(&cgroup_storage->map, map, + sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0)); + + return 1; +} + +struct reuseport_array { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_reuseport_sockarray SEC(".maps"); + +static inline int check_reuseport_sockarray(void) +{ + struct reuseport_array *reuseport_sockarray = + (struct reuseport_array *)&m_reuseport_sockarray; + struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray; + + VERIFY(check_default(&reuseport_sockarray->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} m_percpu_cgroup_storage SEC(".maps"); + +static inline int check_percpu_cgroup_storage(void) +{ + struct bpf_cgroup_storage_map *percpu_cgroup_storage = + (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage; + struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage; + + VERIFY(check(&percpu_cgroup_storage->map, map, + sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0)); + + return 1; +} + +struct bpf_queue_stack { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_ENTRIES); + __type(value, __u32); +} m_queue SEC(".maps"); + +static inline int check_queue(void) +{ + struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue; + struct bpf_map *map = (struct bpf_map *)&m_queue; + + VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_STACK); + __uint(max_entries, MAX_ENTRIES); + __type(value, __u32); +} m_stack SEC(".maps"); + +static inline int check_stack(void) +{ + struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack; + struct bpf_map *map = (struct bpf_map *)&m_stack; + + VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES)); + + return 1; +} + +struct bpf_sk_storage_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, __u32); + __type(value, __u32); +} m_sk_storage SEC(".maps"); + +static inline int check_sk_storage(void) +{ + struct bpf_sk_storage_map *sk_storage = + (struct bpf_sk_storage_map *)&m_sk_storage; + struct bpf_map *map = (struct bpf_map *)&m_sk_storage; + + VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_devmap_hash SEC(".maps"); + +static inline int check_devmap_hash(void) +{ + struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash; + struct bpf_map *map = (struct bpf_map *)&m_devmap_hash; + + VERIFY(check_default(&devmap_hash->map, map)); + + return 1; +} + +struct bpf_ringbuf_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 12); +} m_ringbuf SEC(".maps"); + +static inline int check_ringbuf(void) +{ + struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf; + struct bpf_map *map = (struct bpf_map *)&m_ringbuf; + + VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12)); + + return 1; +} + +SEC("cgroup_skb/egress") +int cg_skb(void *ctx) +{ + VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash); + VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array); + VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array); + VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array); + VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace); + VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array); + VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash); + VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash); + VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie); + VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps); + VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps); + VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap); + VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap); + VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap); + VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap); + VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash); + VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage); + VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + check_reuseport_sockarray); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + check_percpu_cgroup_storage); + VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue); + VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack); + VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage); + VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash); + VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf); + + return 1; +} + +__u32 _version SEC("version") = 1; +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_autoload.c b/tools/testing/selftests/bpf/progs/test_autoload.c new file mode 100644 index 000000000000..62c8cdec6d5d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_autoload.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +bool prog1_called = false; +bool prog2_called = false; +bool prog3_called = false; + +SEC("raw_tp/sys_enter") +int prog1(const void *ctx) +{ + prog1_called = true; + return 0; +} + +SEC("raw_tp/sys_exit") +int prog2(const void *ctx) +{ + prog2_called = true; + return 0; +} + +struct fake_kernel_struct { + int whatever; +} __attribute__((preserve_access_index)); + +SEC("fentry/unexisting-kprobe-will-fail-if-loaded") +int prog3(const void *ctx) +{ + struct fake_kernel_struct *fake = (void *)ctx; + fake->whatever = 123; + prog3_called = true; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_core_retro.c b/tools/testing/selftests/bpf/progs/test_core_retro.c new file mode 100644 index 000000000000..75c60c3c29cf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_retro.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> + +struct task_struct { + int tgid; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} results SEC(".maps"); + +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) +{ + struct task_struct *task = (void *)bpf_get_current_task(); + int tgid = BPF_CORE_READ(task, tgid); + int zero = 0; + + bpf_map_update_elem(&results, &zero, &tgid, 0); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_endian.c b/tools/testing/selftests/bpf/progs/test_endian.c new file mode 100644 index 000000000000..ddb687c5d125 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_endian.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#define IN16 0x1234 +#define IN32 0x12345678U +#define IN64 0x123456789abcdef0ULL + +__u16 in16 = 0; +__u32 in32 = 0; +__u64 in64 = 0; + +__u16 out16 = 0; +__u32 out32 = 0; +__u64 out64 = 0; + +__u16 const16 = 0; +__u32 const32 = 0; +__u64 const64 = 0; + +SEC("raw_tp/sys_enter") +int sys_enter(const void *ctx) +{ + out16 = __builtin_bswap16(in16); + out32 = __builtin_bswap32(in32); + out64 = __builtin_bswap64(in64); + const16 = ___bpf_swab16(IN16); + const32 = ___bpf_swab32(IN32); + const64 = ___bpf_swab64(IN64); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c index 29817a703984..b6a6eb279e54 100644 --- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c @@ -57,8 +57,9 @@ struct { SEC("raw_tracepoint/sys_enter") int bpf_prog1(void *ctx) { - int max_len, max_buildid_len, usize, ksize, total_size; + int max_len, max_buildid_len, total_size; struct stack_trace_t *data; + long usize, ksize; void *raw_data; __u32 key = 0; diff --git a/tools/testing/selftests/bpf/progs/test_ksyms.c b/tools/testing/selftests/bpf/progs/test_ksyms.c new file mode 100644 index 000000000000..6c9cbb5a3bdf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ksyms.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <stdbool.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__u64 out__bpf_link_fops = -1; +__u64 out__bpf_link_fops1 = -1; +__u64 out__btf_size = -1; +__u64 out__per_cpu_start = -1; + +extern const void bpf_link_fops __ksym; +extern const void __start_BTF __ksym; +extern const void __stop_BTF __ksym; +extern const void __per_cpu_start __ksym; +/* non-existing symbol, weak, default to zero */ +extern const void bpf_link_fops1 __ksym __weak; + +SEC("raw_tp/sys_enter") +int handler(const void *ctx) +{ + out__bpf_link_fops = (__u64)&bpf_link_fops; + out__btf_size = (__u64)(&__stop_BTF - &__start_BTF); + out__per_cpu_start = (__u64)&__per_cpu_start; + + out__bpf_link_fops1 = (__u64)&bpf_link_fops1; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index ad59c4c9aba8..8207a2dc2f9d 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -12,8 +12,8 @@ struct { __uint(value_size, sizeof(int)); } perf_buf_map SEC(".maps"); -SEC("kprobe/sys_nanosleep") -int BPF_KPROBE(handle_sys_nanosleep_entry) +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) { int cpu = bpf_get_smp_processor_id(); diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c new file mode 100644 index 000000000000..bbf8296f4d66 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare + +#include <errno.h> +#include <stdbool.h> +#include <stddef.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <sys/socket.h> + +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> + +#define IP4(a, b, c, d) \ + bpf_htonl((((__u32)(a) & 0xffU) << 24) | \ + (((__u32)(b) & 0xffU) << 16) | \ + (((__u32)(c) & 0xffU) << 8) | \ + (((__u32)(d) & 0xffU) << 0)) +#define IP6(aaaa, bbbb, cccc, dddd) \ + { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) } + +#define MAX_SOCKS 32 + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, MAX_SOCKS); + __type(key, __u32); + __type(value, __u64); +} redir_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, int); +} run_map SEC(".maps"); + +enum { + PROG1 = 0, + PROG2, +}; + +enum { + SERVER_A = 0, + SERVER_B, +}; + +/* Addressable key/value constants for convenience */ +static const int KEY_PROG1 = PROG1; +static const int KEY_PROG2 = PROG2; +static const int PROG_DONE = 1; + +static const __u32 KEY_SERVER_A = SERVER_A; +static const __u32 KEY_SERVER_B = SERVER_B; + +static const __u16 DST_PORT = 7007; /* Host byte order */ +static const __u32 DST_IP4 = IP4(127, 0, 0, 1); +static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001); + +SEC("sk_lookup/lookup_pass") +int lookup_pass(struct bpf_sk_lookup *ctx) +{ + return SK_PASS; +} + +SEC("sk_lookup/lookup_drop") +int lookup_drop(struct bpf_sk_lookup *ctx) +{ + return SK_DROP; +} + +SEC("sk_reuseport/reuse_pass") +int reuseport_pass(struct sk_reuseport_md *ctx) +{ + return SK_PASS; +} + +SEC("sk_reuseport/reuse_drop") +int reuseport_drop(struct sk_reuseport_md *ctx) +{ + return SK_DROP; +} + +/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */ +SEC("sk_lookup/redir_port") +int redir_port(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->local_port != DST_PORT) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */ +SEC("sk_lookup/redir_ip4") +int redir_ip4(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->family != AF_INET) + return SK_PASS; + if (ctx->local_port != DST_PORT) + return SK_PASS; + if (ctx->local_ip4 != DST_IP4) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */ +SEC("sk_lookup/redir_ip6") +int redir_ip6(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->family != AF_INET6) + return SK_PASS; + if (ctx->local_port != DST_PORT) + return SK_PASS; + if (ctx->local_ip6[0] != DST_IP6[0] || + ctx->local_ip6[1] != DST_IP6[1] || + ctx->local_ip6[2] != DST_IP6[2] || + ctx->local_ip6[3] != DST_IP6[3]) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_lookup/select_sock_a") +int select_sock_a(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_lookup/select_sock_a_no_reuseport") +int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_DROP; + + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_reuseport/select_sock_b") +int select_sock_b(struct sk_reuseport_md *ctx) +{ + __u32 key = KEY_SERVER_B; + int err; + + err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0); + return err ? SK_DROP : SK_PASS; +} + +/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */ +SEC("sk_lookup/sk_assign_eexist") +int sk_assign_eexist(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err) + goto out; + bpf_sk_release(sk); + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err != -EEXIST) { + bpf_printk("sk_assign returned %d, expected %d\n", + err, -EEXIST); + goto out; + } + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */ +SEC("sk_lookup/sk_assign_replace_flag") +int sk_assign_replace_flag(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err) + goto out; + bpf_sk_release(sk); + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that bpf_sk_assign(sk=NULL) is accepted. */ +SEC("sk_lookup/sk_assign_null") +int sk_assign_null(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk = NULL; + int err, ret; + + ret = SK_DROP; + + err = bpf_sk_assign(ctx, NULL, 0); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + if (ctx->sk != sk) + goto out; + err = bpf_sk_assign(ctx, NULL, 0); + if (err != -EEXIST) + goto out; + err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that selected sk is accessible through context. */ +SEC("sk_lookup/access_ctx_sk") +int access_ctx_sk(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk1 = NULL, *sk2 = NULL; + int err, ret; + + ret = SK_DROP; + + /* Try accessing unassigned (NULL) ctx->sk field */ + if (ctx->sk && ctx->sk->family != AF_INET) + goto out; + + /* Assign a value to ctx->sk */ + sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk1) + goto out; + err = bpf_sk_assign(ctx, sk1, 0); + if (err) + goto out; + if (ctx->sk != sk1) + goto out; + + /* Access ctx->sk fields */ + if (ctx->sk->family != AF_INET || + ctx->sk->type != SOCK_STREAM || + ctx->sk->state != BPF_TCP_LISTEN) + goto out; + + /* Reset selection */ + err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + if (ctx->sk) + goto out; + + /* Assign another socket */ + sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk2) + goto out; + err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + if (ctx->sk != sk2) + goto out; + + /* Access reassigned ctx->sk fields */ + if (ctx->sk->family != AF_INET || + ctx->sk->type != SOCK_STREAM || + ctx->sk->state != BPF_TCP_LISTEN) + goto out; + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk1) + bpf_sk_release(sk1); + if (sk2) + bpf_sk_release(sk2); + return ret; +} + +/* Check narrow loads from ctx fields that support them. + * + * Narrow loads of size >= target field size from a non-zero offset + * are not covered because they give bogus results, that is the + * verifier ignores the offset. + */ +SEC("sk_lookup/ctx_narrow_access") +int ctx_narrow_access(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, family; + __u16 *half; + __u8 *byte; + bool v4; + + v4 = (ctx->family == AF_INET); + + /* Narrow loads from family field */ + byte = (__u8 *)&ctx->family; + half = (__u16 *)&ctx->family; + if (byte[0] != (v4 ? AF_INET : AF_INET6) || + byte[1] != 0 || byte[2] != 0 || byte[3] != 0) + return SK_DROP; + if (half[0] != (v4 ? AF_INET : AF_INET6)) + return SK_DROP; + + byte = (__u8 *)&ctx->protocol; + if (byte[0] != IPPROTO_TCP || + byte[1] != 0 || byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->protocol; + if (half[0] != IPPROTO_TCP) + return SK_DROP; + + /* Narrow loads from remote_port field. Expect non-0 value. */ + byte = (__u8 *)&ctx->remote_port; + if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_port; + if (half[0] == 0) + return SK_DROP; + + /* Narrow loads from local_port field. Expect DST_PORT. */ + byte = (__u8 *)&ctx->local_port; + if (byte[0] != ((DST_PORT >> 0) & 0xff) || + byte[1] != ((DST_PORT >> 8) & 0xff) || + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_port; + if (half[0] != DST_PORT) + return SK_DROP; + + /* Narrow loads from IPv4 fields */ + if (v4) { + /* Expect non-0.0.0.0 in remote_ip4 */ + byte = (__u8 *)&ctx->remote_ip4; + if (byte[0] == 0 && byte[1] == 0 && + byte[2] == 0 && byte[3] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip4; + if (half[0] == 0 && half[1] == 0) + return SK_DROP; + + /* Expect DST_IP4 in local_ip4 */ + byte = (__u8 *)&ctx->local_ip4; + if (byte[0] != ((DST_IP4 >> 0) & 0xff) || + byte[1] != ((DST_IP4 >> 8) & 0xff) || + byte[2] != ((DST_IP4 >> 16) & 0xff) || + byte[3] != ((DST_IP4 >> 24) & 0xff)) + return SK_DROP; + half = (__u16 *)&ctx->local_ip4; + if (half[0] != ((DST_IP4 >> 0) & 0xffff) || + half[1] != ((DST_IP4 >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect 0.0.0.0 IPs when family != AF_INET */ + byte = (__u8 *)&ctx->remote_ip4; + if (byte[0] != 0 || byte[1] != 0 && + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip4; + if (half[0] != 0 || half[1] != 0) + return SK_DROP; + + byte = (__u8 *)&ctx->local_ip4; + if (byte[0] != 0 || byte[1] != 0 && + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_ip4; + if (half[0] != 0 || half[1] != 0) + return SK_DROP; + } + + /* Narrow loads from IPv6 fields */ + if (!v4) { + /* Expenct non-:: IP in remote_ip6 */ + byte = (__u8 *)&ctx->remote_ip6; + if (byte[0] == 0 && byte[1] == 0 && + byte[2] == 0 && byte[3] == 0 && + byte[4] == 0 && byte[5] == 0 && + byte[6] == 0 && byte[7] == 0 && + byte[8] == 0 && byte[9] == 0 && + byte[10] == 0 && byte[11] == 0 && + byte[12] == 0 && byte[13] == 0 && + byte[14] == 0 && byte[15] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip6; + if (half[0] == 0 && half[1] == 0 && + half[2] == 0 && half[3] == 0 && + half[4] == 0 && half[5] == 0 && + half[6] == 0 && half[7] == 0) + return SK_DROP; + + /* Expect DST_IP6 in local_ip6 */ + byte = (__u8 *)&ctx->local_ip6; + if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) || + byte[1] != ((DST_IP6[0] >> 8) & 0xff) || + byte[2] != ((DST_IP6[0] >> 16) & 0xff) || + byte[3] != ((DST_IP6[0] >> 24) & 0xff) || + byte[4] != ((DST_IP6[1] >> 0) & 0xff) || + byte[5] != ((DST_IP6[1] >> 8) & 0xff) || + byte[6] != ((DST_IP6[1] >> 16) & 0xff) || + byte[7] != ((DST_IP6[1] >> 24) & 0xff) || + byte[8] != ((DST_IP6[2] >> 0) & 0xff) || + byte[9] != ((DST_IP6[2] >> 8) & 0xff) || + byte[10] != ((DST_IP6[2] >> 16) & 0xff) || + byte[11] != ((DST_IP6[2] >> 24) & 0xff) || + byte[12] != ((DST_IP6[3] >> 0) & 0xff) || + byte[13] != ((DST_IP6[3] >> 8) & 0xff) || + byte[14] != ((DST_IP6[3] >> 16) & 0xff) || + byte[15] != ((DST_IP6[3] >> 24) & 0xff)) + return SK_DROP; + half = (__u16 *)&ctx->local_ip6; + if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) || + half[1] != ((DST_IP6[0] >> 16) & 0xffff) || + half[2] != ((DST_IP6[1] >> 0) & 0xffff) || + half[3] != ((DST_IP6[1] >> 16) & 0xffff) || + half[4] != ((DST_IP6[2] >> 0) & 0xffff) || + half[5] != ((DST_IP6[2] >> 16) & 0xffff) || + half[6] != ((DST_IP6[3] >> 0) & 0xffff) || + half[7] != ((DST_IP6[3] >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect :: IPs when family != AF_INET6 */ + byte = (__u8 *)&ctx->remote_ip6; + if (byte[0] != 0 || byte[1] != 0 || + byte[2] != 0 || byte[3] != 0 || + byte[4] != 0 || byte[5] != 0 || + byte[6] != 0 || byte[7] != 0 || + byte[8] != 0 || byte[9] != 0 || + byte[10] != 0 || byte[11] != 0 || + byte[12] != 0 || byte[13] != 0 || + byte[14] != 0 || byte[15] != 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip6; + if (half[0] != 0 || half[1] != 0 || + half[2] != 0 || half[3] != 0 || + half[4] != 0 || half[5] != 0 || + half[6] != 0 || half[7] != 0) + return SK_DROP; + + byte = (__u8 *)&ctx->local_ip6; + if (byte[0] != 0 || byte[1] != 0 || + byte[2] != 0 || byte[3] != 0 || + byte[4] != 0 || byte[5] != 0 || + byte[6] != 0 || byte[7] != 0 || + byte[8] != 0 || byte[9] != 0 || + byte[10] != 0 || byte[11] != 0 || + byte[12] != 0 || byte[13] != 0 || + byte[14] != 0 || byte[15] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_ip6; + if (half[0] != 0 || half[1] != 0 || + half[2] != 0 || half[3] != 0 || + half[4] != 0 || half[5] != 0 || + half[6] != 0 || half[7] != 0) + return SK_DROP; + } + + /* Success, redirect to KEY_SERVER_B */ + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (sk) { + bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + } + return SK_PASS; +} + +/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */ +SEC("sk_lookup/sk_assign_esocknosupport") +int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + + err = bpf_sk_assign(ctx, sk, 0); + if (err != -ESOCKTNOSUPPORT) { + bpf_printk("sk_assign returned %d, expected %d\n", + err, -ESOCKTNOSUPPORT); + goto out; + } + + ret = SK_PASS; /* Success, pass to regular lookup */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +SEC("sk_lookup/multi_prog_pass1") +int multi_prog_pass1(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_pass2") +int multi_prog_pass2(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_drop1") +int multi_prog_drop1(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_DROP; +} + +SEC("sk_lookup/multi_prog_drop2") +int multi_prog_drop2(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_DROP; +} + +static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_DROP; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + if (err) + return SK_DROP; + + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_redir1") +int multi_prog_redir1(struct bpf_sk_lookup *ctx) +{ + int ret; + + ret = select_server_a(ctx); + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_redir2") +int multi_prog_redir2(struct bpf_sk_lookup *ctx) +{ + int ret; + + ret = select_server_a(ctx); + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +char _license[] SEC("license") = "Dual BSD/GPL"; +__u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 77ae86f44db5..374ccef704e1 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0; struct s in5 = {}; /* .rodata section */ -const volatile int in6 = 0; +const volatile struct { + const int in6; +} in = {}; /* .data section */ int out1 = -1; @@ -46,7 +48,7 @@ int handler(const void *ctx) out3 = in3; out4 = in4; out5 = in5; - out6 = in6; + out6 = in.in6; bpf_syscall = CONFIG_BPF_SYSCALL; kern_ver = LINUX_KERNEL_VERSION; diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c new file mode 100644 index 000000000000..cd4b72c55dfe --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_varlen.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> + +#define MAX_LEN 256 + +char buf_in1[MAX_LEN] = {}; +char buf_in2[MAX_LEN] = {}; + +int test_pid = 0; +bool capture = false; + +/* .bss */ +long payload1_len1 = 0; +long payload1_len2 = 0; +long total1 = 0; +char payload1[MAX_LEN + MAX_LEN] = {}; + +/* .data */ +int payload2_len1 = -1; +int payload2_len2 = -1; +int total2 = -1; +char payload2[MAX_LEN + MAX_LEN] = { 1 }; + +int payload3_len1 = -1; +int payload3_len2 = -1; +int total3= -1; +char payload3[MAX_LEN + MAX_LEN] = { 1 }; + +int payload4_len1 = -1; +int payload4_len2 = -1; +int total4= -1; +char payload4[MAX_LEN + MAX_LEN] = { 1 }; + +SEC("raw_tp/sys_enter") +int handler64_unsigned(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload1; + u64 len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len <= MAX_LEN) { + payload += len; + payload1_len1 = len; + } + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len <= MAX_LEN) { + payload += len; + payload1_len2 = len; + } + + total1 = payload - (void *)payload1; + + return 0; +} + +SEC("raw_tp/sys_exit") +int handler64_signed(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload3; + long len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len >= 0) { + payload += len; + payload3_len1 = len; + } + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len >= 0) { + payload += len; + payload3_len2 = len; + } + total3 = payload - (void *)payload3; + + return 0; +} + +SEC("tp/raw_syscalls/sys_enter") +int handler32_unsigned(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload2; + u32 len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len <= MAX_LEN) { + payload += len; + payload2_len1 = len; + } + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len <= MAX_LEN) { + payload += len; + payload2_len2 = len; + } + + total2 = payload - (void *)payload2; + + return 0; +} + +SEC("tp/raw_syscalls/sys_exit") +int handler32_signed(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload4; + int len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len >= 0) { + payload += len; + payload4_len1 = len; + } + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len >= 0) { + payload += len; + payload4_len2 = len; + } + total4 = payload - (void *)payload4; + + return 0; +} + +SEC("tp/syscalls/sys_exit_getpid") +int handler_exit(void *regs) +{ + long bla; + + if (bpf_probe_read_kernel(&bla, sizeof(bla), 0)) + return 1; + else + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c index 5611b564d3b1..29fa09d6a6c6 100644 --- a/tools/testing/selftests/bpf/progs/test_vmlinux.c +++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c @@ -63,20 +63,20 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id) return 0; } -SEC("kprobe/hrtimer_nanosleep") -int BPF_KPROBE(handle__kprobe, - ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid) +SEC("kprobe/hrtimer_start_range_ns") +int BPF_KPROBE(handle__kprobe, struct hrtimer *timer, ktime_t tim, u64 delta_ns, + const enum hrtimer_mode mode) { - if (rqtp == MY_TV_NSEC) + if (tim == MY_TV_NSEC) kprobe_called = true; return 0; } -SEC("fentry/hrtimer_nanosleep") -int BPF_PROG(handle__fentry, - ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid) +SEC("fentry/hrtimer_start_range_ns") +int BPF_PROG(handle__fentry, struct hrtimer *timer, ktime_t tim, u64 delta_ns, + const enum hrtimer_mode mode) { - if (rqtp == MY_TV_NSEC) + if (tim == MY_TV_NSEC) fentry_called = true; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c new file mode 100644 index 000000000000..59ee4f182ff8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +#define IFINDEX_LO 1 + +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_cpumap_val)); + __uint(max_entries, 4); +} cpu_map SEC(".maps"); + +SEC("xdp_redir") +int xdp_redir_prog(struct xdp_md *ctx) +{ + return bpf_redirect_map(&cpu_map, 1, 0); +} + +SEC("xdp_dummy") +int xdp_dummy_prog(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +SEC("xdp_cpumap/dummy_cm") +int xdp_dummy_cm(struct xdp_md *ctx) +{ + if (ctx->ingress_ifindex == IFINDEX_LO) + return XDP_DROP; + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c new file mode 100644 index 000000000000..8ca7f399b670 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/trace_printk.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020, Oracle and/or its affiliates. + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int trace_printk_ret = 0; +int trace_printk_ran = 0; + +SEC("tp/raw_syscalls/sys_enter") +int sys_enter(void *ctx) +{ + static const char fmt[] = "testing,testing %d\n"; + + trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt), + ++trace_printk_ran); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c new file mode 100644 index 000000000000..8429b22525a7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/udp_limit.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <sys/socket.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +int invocations = 0, in_use = 0; + +SEC("cgroup/sock_create") +int sock(struct bpf_sock *ctx) +{ + __u32 key; + + if (ctx->type != SOCK_DGRAM) + return 1; + + __sync_fetch_and_add(&invocations, 1); + + if (in_use > 0) { + /* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called + * when we return an error from the BPF + * program! + */ + return 0; + } + + __sync_fetch_and_add(&in_use, 1); + return 1; +} + +SEC("cgroup/sock_release") +int sock_release(struct bpf_sock *ctx) +{ + __u32 key; + + if (ctx->type != SOCK_DGRAM) + return 1; + + __sync_fetch_and_add(&invocations, 1); + __sync_fetch_and_add(&in_use, -1); + return 1; +} diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index 9df0d2ac45f8..4f6444bcd53f 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh @@ -10,7 +10,13 @@ if [ "$(id -u)" != "0" ]; then exit $ksft_skip fi -SRC_TREE=../../../../ +if [ "$building_out_of_srctree" ]; then + # We are in linux-build/kselftest/bpf + OUTPUT=../../ +else + # We are in linux/tools/testing/selftests/bpf + OUTPUT=../../../../ +fi test_run() { @@ -19,8 +25,8 @@ test_run() echo "[ JIT enabled:$1 hardened:$2 ]" dmesg -C - if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then - insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null + if [ -f ${OUTPUT}/lib/test_bpf.ko ]; then + insmod ${OUTPUT}/lib/test_bpf.ko 2> /dev/null if [ $? -ne 0 ]; then rc=1 fi diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 785eabf2a593..5620919fde9e 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh @@ -140,7 +140,7 @@ ip netns exec ns6 sysctl net.ipv6.conf.veth10.seg6_enabled=1 > /dev/null ip netns exec ns6 nc -l -6 -u -d 7330 > $TMP_FILE & ip netns exec ns1 bash -c "echo 'foobar' | nc -w0 -6 -u -p 2121 -s fb00::1 fb00::6 7330" sleep 5 # wait enough time to ensure the UDP datagram arrived to the last segment -kill -INT $! +kill -TERM $! if [[ $(< $TMP_FILE) != "foobar" ]]; then exit 1 diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 54fa5fa688ce..b1e4dadacd9b 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -12,6 +12,9 @@ #include <string.h> #include <execinfo.h> /* backtrace */ +#define EXIT_NO_TEST 2 +#define EXIT_ERR_SETUP_INFRA 3 + /* defined in test_progs.h */ struct test_env env = {}; @@ -111,13 +114,31 @@ static void reset_affinity() { if (err < 0) { stdio_restore(); fprintf(stderr, "Failed to reset process affinity: %d!\n", err); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); if (err < 0) { stdio_restore(); fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); + } +} + +static void save_netns(void) +{ + env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); + if (env.saved_netns_fd == -1) { + perror("open(/proc/self/ns/net)"); + exit(EXIT_ERR_SETUP_INFRA); + } +} + +static void restore_netns(void) +{ + if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { + stdio_restore(); + perror("setns(CLONE_NEWNS)"); + exit(EXIT_ERR_SETUP_INFRA); } } @@ -138,8 +159,6 @@ void test__end_subtest() test->test_num, test->subtest_num, test->subtest_name, sub_error_cnt ? "FAIL" : "OK"); - reset_affinity(); - free(test->subtest_name); test->subtest_name = NULL; } @@ -366,6 +385,8 @@ enum ARG_KEYS { ARG_TEST_NAME_BLACKLIST = 'b', ARG_VERIFIER_STATS = 's', ARG_VERBOSE = 'v', + ARG_GET_TEST_CNT = 'c', + ARG_LIST_TEST_NAMES = 'l', }; static const struct argp_option opts[] = { @@ -379,6 +400,10 @@ static const struct argp_option opts[] = { "Output verifier statistics", }, { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, "Verbose output (use -vv or -vvv for progressively verbose output)" }, + { "count", ARG_GET_TEST_CNT, NULL, 0, + "Get number of selected top-level tests " }, + { "list", ARG_LIST_TEST_NAMES, NULL, 0, + "List test names that would run (without running them) " }, {}, }; @@ -511,6 +536,12 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) } } break; + case ARG_GET_TEST_CNT: + env->get_test_cnt = true; + break; + case ARG_LIST_TEST_NAMES: + env->list_test_names = true; + break; case ARGP_KEY_ARG: argp_usage(state); break; @@ -643,6 +674,7 @@ int main(int argc, char **argv) return -1; } + save_netns(); stdio_hijack(); for (i = 0; i < prog_test_cnt; i++) { struct prog_test_def *test = &prog_test_defs[i]; @@ -654,6 +686,17 @@ int main(int argc, char **argv) test->test_num, test->test_name)) continue; + if (env.get_test_cnt) { + env.succ_cnt++; + continue; + } + + if (env.list_test_names) { + fprintf(env.stdout, "%s\n", test->test_name); + env.succ_cnt++; + continue; + } + test->run_test(); /* ensure last sub-test is finalized properly */ if (test->subtest_name) @@ -673,19 +716,34 @@ int main(int argc, char **argv) test->error_cnt ? "FAIL" : "OK"); reset_affinity(); + restore_netns(); if (test->need_cgroup_cleanup) cleanup_cgroup_environment(); } stdio_restore(); + + if (env.get_test_cnt) { + printf("%d\n", env.succ_cnt); + goto out; + } + + if (env.list_test_names) + goto out; + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); +out: free_str_set(&env.test_selector.blacklist); free_str_set(&env.test_selector.whitelist); free(env.test_selector.num_set); free_str_set(&env.subtest_selector.blacklist); free_str_set(&env.subtest_selector.whitelist); free(env.subtest_selector.num_set); + close(env.saved_netns_fd); + + if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) + return EXIT_NO_TEST; return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index f4503c926aca..6e09bf738473 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -66,6 +66,8 @@ struct test_env { enum verbosity verbosity; bool jit_enabled; + bool get_test_cnt; + bool list_test_names; struct prog_test_def *test; FILE *stdout; @@ -78,6 +80,8 @@ struct test_env { int sub_succ_cnt; /* successful sub-tests */ int fail_cnt; /* total failed tests + sub-tests */ int skip_cnt; /* skipped tests */ + + int saved_netns_fd; }; extern struct test_env env; diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c new file mode 100644 index 000000000000..2ad5f974451c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c @@ -0,0 +1,492 @@ +{ + "valid 1,2,4,8-byte reads from bpf_sk_lookup", + .insns = { + /* 1-byte read from family field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 3), + /* 2-byte read from family field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 2), + /* 4-byte read from family field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + + /* 1-byte read from protocol field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 3), + /* 2-byte read from protocol field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 2), + /* 4-byte read from protocol field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + + /* 1-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 3), + /* 2-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 2), + /* 4-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + + /* 1-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 6), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 7), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 9), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 11), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 13), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 14), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 15), + /* 2-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 6), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 10), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 14), + /* 4-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + + /* 1-byte read from remote_port field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 3), + /* 2-byte read from remote_port field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 2), + /* 4-byte read from remote_port field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + + /* 1-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 3), + /* 2-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 2), + /* 4-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + + /* 1-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 6), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 7), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 9), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 11), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 13), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 14), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 15), + /* 2-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 6), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 10), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 14), + /* 4-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + + /* 1-byte read from local_port field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 3), + /* 2-byte read from local_port field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 2), + /* 4-byte read from local_port field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + + /* 8-byte read from sk field */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */ +{ + "invalid 8-byte read from bpf_sk_lookup family field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup protocol field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_ip4 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_ip6 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_port field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_ip4 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_ip6 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_port field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */ +{ + "invalid 4-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 2-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 1-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* out of bounds and unaligned reads from bpf_sk_lookup */ +{ + "invalid 4-byte read past end of bpf_sk_lookup", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + sizeof(struct bpf_sk_lookup)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte unaligned read from bpf_sk_lookup at odd offset", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte unaligned read from bpf_sk_lookup at even offset", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 2), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* in-bound and out-of-bound writes to bpf_sk_lookup */ +{ + "invalid 8-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 2-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 1-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte write past end of bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + sizeof(struct bpf_sk_lookup)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c new file mode 100644 index 000000000000..b52209db8250 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/map_ptr.c @@ -0,0 +1,62 @@ +{ + "bpf_map_ptr: read with negative offset rejected", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "R1 is bpf_array invalid negative access: off=-8", +}, +{ + "bpf_map_ptr: write rejected", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "only read from bpf_array is supported", +}, +{ + "bpf_map_ptr: read non-existent field rejected", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4", +}, +{ + "bpf_map_ptr: read ops field accepted", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = ACCEPT, + .retval = 1, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c index cd26ee6b7b1d..1f2b8c4cb26d 100644 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c @@ -56,7 +56,7 @@ .fixup_map_in_map = { 16 }, .fixup_map_array_48b = { 13 }, .result = REJECT, - .errstr = "R0 invalid mem access 'map_ptr'", + .errstr = "only read from bpf_array is supported", }, { "cond: two branches returning different map pointers for lookup (tail, tail)", diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 97ee658e1242..ed4e76b24649 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -836,3 +836,41 @@ .errstr = "R0 invalid mem access 'inv'", .errstr_unpriv = "R0 pointer -= pointer prohibited", }, +{ + "32bit pkt_ptr -= scalar", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), + BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_7), + BPF_ALU32_REG(BPF_SUB, BPF_REG_6, BPF_REG_4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "32bit scalar -= pkt_ptr", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), + BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_6), + BPF_ALU32_REG(BPF_SUB, BPF_REG_4, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh index 0d347d48c112..45042105ead7 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh @@ -121,6 +121,7 @@ h1_destroy() h2_create() { host_create $h2 2 + tc qdisc add dev $h2 clsact # Some of the tests in this suite use multicast traffic. As this traffic # enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus @@ -141,6 +142,7 @@ h2_create() h2_destroy() { ethtool -s $h2 autoneg on + tc qdisc del dev $h2 clsact host_destroy $h2 } @@ -336,6 +338,17 @@ get_qdisc_npackets() qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .packets } +send_packets() +{ + local vlan=$1; shift + local proto=$1; shift + local pkts=$1; shift + + $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \ + -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \ + -t $proto -q -c $pkts "$@" +} + # This sends traffic in an attempt to build a backlog of $size. Returns 0 on # success. After 10 failed attempts it bails out and returns 1. It dumps the # backlog size to stdout. @@ -364,9 +377,7 @@ build_backlog() return 1 fi - $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \ - -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \ - -t $proto -q -c $pkts "$@" + send_packets $vlan $proto $pkts "$@" done } @@ -531,3 +542,92 @@ do_mc_backlog_test() log_test "TC $((vlan - 10)): Qdisc reports MC backlog" } + +do_drop_test() +{ + local vlan=$1; shift + local limit=$1; shift + local trigger=$1; shift + local subtest=$1; shift + local fetch_counter=$1; shift + local backlog + local base + local now + local pct + + RET=0 + + start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac + + # Create a bit of a backlog and observe no mirroring due to drops. + qevent_rule_install_$subtest + base=$($fetch_counter) + + build_backlog $vlan $((2 * limit / 3)) udp >/dev/null + + busywait 1100 until_counter_is ">= $((base + 1))" $fetch_counter >/dev/null + check_fail $? "Spurious packets observed without buffer pressure" + + qevent_rule_uninstall_$subtest + + # Push to the queue until it's at the limit. The configured limit is + # rounded by the qdisc and then by the driver, so this is the best we + # can do to get to the real limit of the system. Do this with the rules + # uninstalled so that the inevitable drops don't get counted. + build_backlog $vlan $((3 * limit / 2)) udp >/dev/null + + qevent_rule_install_$subtest + base=$($fetch_counter) + + send_packets $vlan udp 11 + + now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter) + check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen" + + # When no extra traffic is injected, there should be no mirroring. + busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null + check_fail $? "Spurious packets observed" + + # When the rule is uninstalled, there should be no mirroring. + qevent_rule_uninstall_$subtest + send_packets $vlan udp 11 + busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null + check_fail $? "Spurious packets observed after uninstall" + + log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd" + + stop_traffic + sleep 1 +} + +qevent_rule_install_mirror() +{ + tc filter add block 10 pref 1234 handle 102 matchall skip_sw \ + action mirred egress mirror dev $swp2 hw_stats disabled +} + +qevent_rule_uninstall_mirror() +{ + tc filter del block 10 pref 1234 handle 102 matchall +} + +qevent_counter_fetch_mirror() +{ + tc_rule_handle_stats_get "dev $h2 ingress" 101 +} + +do_drop_mirror_test() +{ + local vlan=$1; shift + local limit=$1; shift + local qevent_name=$1; shift + + tc filter add dev $h2 ingress pref 1 handle 101 prot ip \ + flower skip_sw ip_proto udp \ + action drop + + do_drop_test "$vlan" "$limit" "$qevent_name" mirror \ + qevent_counter_fetch_mirror + + tc filter del dev $h2 ingress pref 1 handle 101 flower +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh index 1c36c576613b..c8968b041bea 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh @@ -7,6 +7,7 @@ ALL_TESTS=" ecn_nodrop_test red_test mc_backlog_test + red_mirror_test " : ${QDISC:=ets} source sch_red_core.sh @@ -83,6 +84,16 @@ mc_backlog_test() uninstall_qdisc } +red_mirror_test() +{ + install_qdisc qevent early_drop block 10 + + do_drop_mirror_test 10 $BACKLOG1 early_drop + do_drop_mirror_test 11 $BACKLOG2 early_drop + + uninstall_qdisc +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh index 558667ea11ec..ede9c38d3eff 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh @@ -7,6 +7,7 @@ ALL_TESTS=" ecn_nodrop_test red_test mc_backlog_test + red_mirror_test " source sch_red_core.sh @@ -57,6 +58,13 @@ mc_backlog_test() uninstall_qdisc } +red_mirror_test() +{ + install_qdisc qevent early_drop block 10 + do_drop_mirror_test 10 $BACKLOG + uninstall_qdisc +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh index fd583a171db7..d7cf33a3f18d 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh @@ -28,7 +28,7 @@ cleanup() trap cleanup EXIT -ALL_TESTS="router tc_flower mirror_gre" +ALL_TESTS="router tc_flower mirror_gre tc_police" for current_test in ${TESTS:-$ALL_TESTS}; do source ${current_test}_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh new file mode 100644 index 000000000000..e79ac0dad1f4 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../tc_police_scale.sh + +tc_police_get_target() +{ + local should_fail=$1; shift + local target + + target=$(devlink_resource_size_get global_policers single_rate_policers) + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh index 43ba1b438f6d..43f662401bc3 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh @@ -22,7 +22,7 @@ cleanup() devlink_sp_read_kvd_defaults trap cleanup EXIT -ALL_TESTS="router tc_flower mirror_gre" +ALL_TESTS="router tc_flower mirror_gre tc_police" for current_test in ${TESTS:-$ALL_TESTS}; do source ${current_test}_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh new file mode 100644 index 000000000000..e79ac0dad1f4 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../tc_police_scale.sh + +tc_police_get_target() +{ + local should_fail=$1; shift + local target + + target=$(devlink_resource_size_get global_policers single_rate_policers) + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh new file mode 100755 index 000000000000..448b75c1545a --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test that policers shared by different tc filters are correctly reference +# counted by observing policers' occupancy via devlink-resource. + +lib_dir=$(dirname $0)/../../../net/forwarding + +ALL_TESTS=" + tc_police_occ_test +" +NUM_NETIFS=2 +source $lib_dir/lib.sh +source $lib_dir/devlink_lib.sh + +h1_create() +{ + simple_if_init $h1 +} + +h1_destroy() +{ + simple_if_fini $h1 +} + +switch_create() +{ + simple_if_init $swp1 + tc qdisc add dev $swp1 clsact +} + +switch_destroy() +{ + tc qdisc del dev $swp1 clsact + simple_if_fini $swp1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + vrf_prepare + + h1_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h1_destroy + + vrf_cleanup +} + +tc_police_occ_get() +{ + devlink_resource_occ_get global_policers single_rate_policers +} + +tc_police_occ_test() +{ + RET=0 + + local occ=$(tc_police_occ_get) + + tc filter add dev $swp1 ingress pref 1 handle 101 proto ip \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/ok + (( occ + 1 == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))" + + tc filter del dev $swp1 ingress pref 1 handle 101 flower + (( occ == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $occ" + + tc filter add dev $swp1 ingress pref 1 handle 101 proto ip \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/ok \ + index 10 + tc filter add dev $swp1 ingress pref 2 handle 102 proto ip \ + flower skip_sw action police index 10 + + (( occ + 1 == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))" + + tc filter del dev $swp1 ingress pref 2 handle 102 flower + (( occ + 1 == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))" + + tc filter del dev $swp1 ingress pref 1 handle 101 flower + (( occ == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $occ" + + log_test "tc police occupancy" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh new file mode 100644 index 000000000000..4b96561c462f --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0 + +TC_POLICE_NUM_NETIFS=2 + +tc_police_h1_create() +{ + simple_if_init $h1 +} + +tc_police_h1_destroy() +{ + simple_if_fini $h1 +} + +tc_police_switch_create() +{ + simple_if_init $swp1 + tc qdisc add dev $swp1 clsact +} + +tc_police_switch_destroy() +{ + tc qdisc del dev $swp1 clsact + simple_if_fini $swp1 +} + +tc_police_rules_create() +{ + local count=$1; shift + local should_fail=$1; shift + + TC_POLICE_BATCH_FILE="$(mktemp)" + + for ((i = 0; i < count; ++i)); do + cat >> $TC_POLICE_BATCH_FILE <<-EOF + filter add dev $swp1 ingress \ + prot ip \ + flower skip_sw \ + action police rate 10mbit burst 100k \ + conform-exceed drop/ok + EOF + done + + tc -b $TC_POLICE_BATCH_FILE + check_err_fail $should_fail $? "Rule insertion" +} + +__tc_police_test() +{ + local count=$1; shift + local should_fail=$1; shift + + tc_police_rules_create $count $should_fail + + offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l) + ((offload_count == count)) + check_err_fail $should_fail $? "tc police offload count" +} + +tc_police_test() +{ + local count=$1; shift + local should_fail=$1; shift + + if ! tc_offload_check $TC_POLICE_NUM_NETIFS; then + check_err 1 "Could not test offloaded functionality" + return + fi + + __tc_police_test $count $should_fail +} + +tc_police_setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + vrf_prepare + + tc_police_h1_create + tc_police_switch_create +} + +tc_police_cleanup() +{ + pre_cleanup + + tc_police_switch_destroy + tc_police_h1_destroy + + vrf_cleanup +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh index 9241250c5921..553cb9fad508 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh @@ -11,6 +11,8 @@ ALL_TESTS=" matchall_mirror_behind_flower_ingress_test matchall_sample_behind_flower_ingress_test matchall_mirror_behind_flower_egress_test + police_limits_test + multi_police_test " NUM_NETIFS=2 @@ -287,6 +289,80 @@ matchall_mirror_behind_flower_egress_test() matchall_behind_flower_egress_test "mirror" "mirred egress mirror dev $swp2" } +police_limits_test() +{ + RET=0 + + tc qdisc add dev $swp1 clsact + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 0.5kbit burst 1m conform-exceed drop/ok + check_fail $? "Incorrect success to add police action with too low rate" + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 2.5tbit burst 1g conform-exceed drop/ok + check_fail $? "Incorrect success to add police action with too high rate" + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.5kbit burst 1m conform-exceed drop/ok + check_err $? "Failed to add police action with low rate" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.9tbit burst 1g conform-exceed drop/ok + check_err $? "Failed to add police action with high rate" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.5kbit burst 512b conform-exceed drop/ok + check_fail $? "Incorrect success to add police action with too low burst size" + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.5kbit burst 2k conform-exceed drop/ok + check_err $? "Failed to add police action with low burst size" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc qdisc del dev $swp1 clsact + + log_test "police rate and burst limits" +} + +multi_police_test() +{ + RET=0 + + # It is forbidden in mlxsw driver to have multiple police + # actions in a single rule. + + tc qdisc add dev $swp1 clsact + + tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/ok + check_err $? "Failed to add rule with single police action" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/pipe \ + action police rate 200mbit burst 200k conform-exceed drop/ok + check_fail $? "Incorrect success to add rule with two police actions" + + tc qdisc del dev $swp1 clsact + + log_test "multi police" +} + setup_prepare() { swp1=${NETIFS[p1]} diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh new file mode 100644 index 000000000000..ba1d53b9f815 --- /dev/null +++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh @@ -0,0 +1,786 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only + +VNI_GEN=$RANDOM +NSIM_ID=$((RANDOM % 1024)) +NSIM_DEV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_ID +NSIM_DEV_DFS=/sys/kernel/debug/netdevsim/netdevsim$NSIM_ID +NSIM_NETDEV= +HAS_ETHTOOL= +EXIT_STATUS=0 +num_cases=0 +num_errors=0 + +clean_up_devs=( ) + +function err_cnt { + echo "ERROR:" $@ + EXIT_STATUS=1 + ((num_errors++)) + ((num_cases++)) +} + +function pass_cnt { + ((num_cases++)) +} + +function cleanup_tuns { + for dev in "${clean_up_devs[@]}"; do + [ -e /sys/class/net/$dev ] && ip link del dev $dev + done + clean_up_devs=( ) +} + +function cleanup_nsim { + if [ -e $NSIM_DEV_SYS ]; then + echo $NSIM_ID > /sys/bus/netdevsim/del_device + fi +} + +function cleanup { + cleanup_tuns + cleanup_nsim +} + +trap cleanup EXIT + +function new_vxlan { + local dev=$1 + local dstport=$2 + local lower=$3 + local ipver=$4 + local flags=$5 + + local group ipfl + + [ "$ipver" != '6' ] && group=239.1.1.1 || group=fff1::1 + [ "$ipver" != '6' ] || ipfl="-6" + + [[ ! "$flags" =~ "external" ]] && flags="$flags id $((VNI_GEN++))" + + ip $ipfl link add $dev type vxlan \ + group $group \ + dev $lower \ + dstport $dstport \ + $flags + + ip link set dev $dev up + + clean_up_devs=("${clean_up_devs[@]}" $dev) + + check_tables +} + +function new_geneve { + local dev=$1 + local dstport=$2 + local ipver=$3 + local flags=$4 + + local group ipfl + + [ "$ipver" != '6' ] && remote=1.1.1.2 || group=::2 + [ "$ipver" != '6' ] || ipfl="-6" + + [[ ! "$flags" =~ "external" ]] && flags="$flags vni $((VNI_GEN++))" + + ip $ipfl link add $dev type geneve \ + remote $remote \ + dstport $dstport \ + $flags + + ip link set dev $dev up + + clean_up_devs=("${clean_up_devs[@]}" $dev) + + check_tables +} + +function del_dev { + local dev=$1 + + ip link del dev $dev + check_tables +} + +# Helpers for netdevsim port/type encoding +function mke { + local port=$1 + local type=$2 + + echo $((port << 16 | type)) +} + +function pre { + local val=$1 + + echo -e "port: $((val >> 16))\ttype: $((val & 0xffff))" +} + +function pre_ethtool { + local val=$1 + local port=$((val >> 16)) + local type=$((val & 0xffff)) + + case $type in + 1) + type_name="vxlan" + ;; + 2) + type_name="geneve" + ;; + 4) + type_name="vxlan-gpe" + ;; + *) + type_name="bit X" + ;; + esac + + echo "port $port, $type_name" +} + +function check_table { + local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1 + local -n expected=$2 + local last=$3 + + read -a have < $path + + if [ ${#expected[@]} -ne ${#have[@]} ]; then + echo "check_table: BAD NUMBER OF ITEMS" + return 0 + fi + + for i in "${!expected[@]}"; do + if [ -n "$HAS_ETHTOOL" -a ${expected[i]} -ne 0 ]; then + pp_expected=`pre_ethtool ${expected[i]}` + ethtool --show-tunnels $NSIM_NETDEV | grep "$pp_expected" >/dev/null + if [ $? -ne 0 -a $last -ne 0 ]; then + err_cnt "ethtool table $1 on port $port: $pfx - $msg" + echo " check_table: ethtool does not contain '$pp_expected'" + ethtool --show-tunnels $NSIM_NETDEV + return 0 + + fi + fi + + if [ ${expected[i]} != ${have[i]} ]; then + if [ $last -ne 0 ]; then + err_cnt "table $1 on port $port: $pfx - $msg" + echo " check_table: wrong entry $i" + echo " expected: `pre ${expected[i]}`" + echo " have: `pre ${have[i]}`" + return 0 + fi + return 1 + fi + done + + pass_cnt + return 0 +} + +function check_tables { + # Need retries in case we have workqueue making the changes + local retries=10 + + while ! check_table 0 exp0 $((retries == 0)); do + sleep 0.02 + ((retries--)) + done + while ! check_table 1 exp1 $((retries == 0)); do + sleep 0.02 + ((retries--)) + done +} + +function print_table { + local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1 + read -a have < $path + + tree $NSIM_DEV_DFS/ + + echo "Port $port table $1:" + + for i in "${!have[@]}"; do + echo " `pre ${have[i]}`" + done + +} + +function print_tables { + print_table 0 + print_table 1 +} + +function get_netdev_name { + local -n old=$1 + + new=$(ls /sys/class/net) + + for netdev in $new; do + for check in $old; do + [ $netdev == $check ] && break + done + + if [ $netdev != $check ]; then + echo $netdev + break + fi + done +} + +### +### Code start +### + +# Probe ethtool support +ethtool -h | grep show-tunnels 2>&1 >/dev/null && HAS_ETHTOOL=y + +modprobe netdevsim + +# Basic test +pfx="basic" + +for port in 0 1; do + old_netdevs=$(ls /sys/class/net) + if [ $port -eq 0 ]; then + echo $NSIM_ID > /sys/bus/netdevsim/new_device + else + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + echo 1 > $NSIM_DEV_SYS/new_port + fi + NSIM_NETDEV=`get_netdev_name old_netdevs` + + msg="new NIC device created" + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + check_tables + + msg="VxLAN v4 devices" + exp0=( `mke 4789 1` 0 0 0 ) + new_vxlan vxlan0 4789 $NSIM_NETDEV + new_vxlan vxlan1 4789 $NSIM_NETDEV + + msg="VxLAN v4 devices go down" + exp0=( 0 0 0 0 ) + ifconfig vxlan1 down + ifconfig vxlan0 down + check_tables + + msg="VxLAN v6 devices" + exp0=( `mke 4789 1` 0 0 0 ) + new_vxlan vxlanA 4789 $NSIM_NETDEV 6 + + for ifc in vxlan0 vxlan1; do + ifconfig $ifc up + done + + new_vxlan vxlanB 4789 $NSIM_NETDEV 6 + + msg="another VxLAN v6 devices" + exp0=( `mke 4789 1` `mke 4790 1` 0 0 ) + new_vxlan vxlanC 4790 $NSIM_NETDEV 6 + + msg="Geneve device" + exp1=( `mke 6081 2` 0 0 0 ) + new_geneve gnv0 6081 + + msg="NIC device goes down" + ifconfig $NSIM_NETDEV down + if [ $port -eq 1 ]; then + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + fi + check_tables + msg="NIC device goes up again" + ifconfig $NSIM_NETDEV up + exp0=( `mke 4789 1` `mke 4790 1` 0 0 ) + exp1=( `mke 6081 2` 0 0 0 ) + check_tables + + cleanup_tuns + + msg="tunnels destroyed" + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + check_tables + + modprobe -r geneve + modprobe -r vxlan + modprobe -r udp_tunnel + + check_tables +done + +modprobe -r netdevsim + +# Module tests +pfx="module tests" + +if modinfo netdevsim | grep udp_tunnel >/dev/null; then + err_cnt "netdevsim depends on udp_tunnel" +else + pass_cnt +fi + +modprobe netdevsim + +old_netdevs=$(ls /sys/class/net) +port=0 +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port +echo 1000 > $NSIM_DEV_DFS/udp_ports_sleep +echo 0 > $NSIM_DEV_SYS/new_port +NSIM_NETDEV=`get_netdev_name old_netdevs` + +msg="create VxLANs" +exp0=( 0 0 0 0 ) # sleep is longer than out wait +new_vxlan vxlan0 10000 $NSIM_NETDEV + +modprobe -r vxlan +modprobe -r udp_tunnel + +msg="remove tunnels" +exp0=( 0 0 0 0 ) +check_tables + +msg="create VxLANs" +exp0=( 0 0 0 0 ) # sleep is longer than out wait +new_vxlan vxlan0 10000 $NSIM_NETDEV + +exp0=( 0 0 0 0 ) + +modprobe -r netdevsim +modprobe netdevsim + +# Overflow the table + +function overflow_table0 { + local pfx=$1 + + msg="create VxLANs 1/5" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="create VxLANs 2/5" + exp0=( `mke 10000 1` `mke 10001 1` 0 0 ) + new_vxlan vxlan1 10001 $NSIM_NETDEV + + msg="create VxLANs 3/5" + exp0=( `mke 10000 1` `mke 10001 1` `mke 10002 1` 0 ) + new_vxlan vxlan2 10002 $NSIM_NETDEV + + msg="create VxLANs 4/5" + exp0=( `mke 10000 1` `mke 10001 1` `mke 10002 1` `mke 10003 1` ) + new_vxlan vxlan3 10003 $NSIM_NETDEV + + msg="create VxLANs 5/5" + new_vxlan vxlan4 10004 $NSIM_NETDEV +} + +function overflow_table1 { + local pfx=$1 + + msg="create GENEVE 1/5" + exp1=( `mke 20000 2` 0 0 0 ) + new_geneve gnv0 20000 + + msg="create GENEVE 2/5" + exp1=( `mke 20000 2` `mke 20001 2` 0 0 ) + new_geneve gnv1 20001 + + msg="create GENEVE 3/5" + exp1=( `mke 20000 2` `mke 20001 2` `mke 20002 2` 0 ) + new_geneve gnv2 20002 + + msg="create GENEVE 4/5" + exp1=( `mke 20000 2` `mke 20001 2` `mke 20002 2` `mke 20003 2` ) + new_geneve gnv3 20003 + + msg="create GENEVE 5/5" + new_geneve gnv4 20004 +} + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + overflow_table0 "overflow NIC table" + overflow_table1 "overflow NIC table" + + msg="replace VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` ) + del_dev vxlan1 + + msg="vacate VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` ) + del_dev vxlan2 + + msg="replace GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` `mke 20002 2` `mke 20003 2` ) + del_dev gnv1 + + msg="vacate GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` 0 `mke 20003 2` ) + del_dev gnv2 + + msg="table sharing - share" + exp1=( `mke 20000 2` `mke 20004 2` `mke 30001 4` `mke 20003 2` ) + new_vxlan vxlanG0 30001 $NSIM_NETDEV 4 "gpe external" + + msg="table sharing - overflow" + new_vxlan vxlanG1 30002 $NSIM_NETDEV 4 "gpe external" + msg="table sharing - overflow v6" + new_vxlan vxlanG2 30002 $NSIM_NETDEV 6 "gpe external" + + exp1=( `mke 20000 2` `mke 30002 4` `mke 30001 4` `mke 20003 2` ) + del_dev gnv4 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# Sync all +pfx="sync all" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port +echo 1 > $NSIM_DEV_DFS/udp_ports_sync_all + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + overflow_table0 "overflow NIC table" + overflow_table1 "overflow NIC table" + + msg="replace VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` ) + del_dev vxlan1 + + msg="vacate VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` ) + del_dev vxlan2 + + msg="replace GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` `mke 20002 2` `mke 20003 2` ) + del_dev gnv1 + + msg="vacate GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` 0 `mke 20003 2` ) + del_dev gnv2 + + msg="table sharing - share" + exp1=( `mke 20000 2` `mke 20004 2` `mke 30001 4` `mke 20003 2` ) + new_vxlan vxlanG0 30001 $NSIM_NETDEV 4 "gpe external" + + msg="table sharing - overflow" + new_vxlan vxlanG1 30002 $NSIM_NETDEV 4 "gpe external" + msg="table sharing - overflow v6" + new_vxlan vxlanG2 30002 $NSIM_NETDEV 6 "gpe external" + + exp1=( `mke 20000 2` `mke 30002 4` `mke 30001 4` `mke 20003 2` ) + del_dev gnv4 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# Destroy full NIC +pfx="destroy full" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + overflow_table0 "destroy NIC" + overflow_table1 "destroy NIC" + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# IPv4 only +pfx="IPv4 only" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port +echo 1 > $NSIM_DEV_DFS/udp_ports_ipv4_only + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + msg="create VxLANs v6" + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="create VxLANs v6" + new_vxlan vxlanA1 10000 $NSIM_NETDEV 6 + + ip link set dev vxlanA0 down + ip link set dev vxlanA0 up + check_tables + + msg="create VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="down VxLANs v4" + exp0=( 0 0 0 0 ) + ip link set dev vxlan0 down + check_tables + + msg="up VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + ip link set dev vxlan0 up + check_tables + + msg="destroy VxLANs v4" + exp0=( 0 0 0 0 ) + del_dev vxlan0 + + msg="recreate VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + del_dev vxlanA0 + del_dev vxlanA1 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# Failures +pfx="error injection" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error + + msg="1 - create VxLANs v6" + exp0=( 0 0 0 0 ) + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="1 - create VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="1 - remove VxLANs v4" + del_dev vxlan0 + + msg="1 - remove VxLANs v6" + exp0=( 0 0 0 0 ) + del_dev vxlanA0 + + msg="2 - create GENEVE" + exp1=( `mke 20000 2` 0 0 0 ) + new_geneve gnv0 20000 + + msg="2 - destroy GENEVE" + echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error + exp1=( `mke 20000 2` 0 0 0 ) + del_dev gnv0 + + msg="2 - create second GENEVE" + exp1=( 0 `mke 20001 2` 0 0 ) + new_geneve gnv0 20001 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# netdev flags +pfx="netdev flags" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + msg="create VxLANs v6" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="create VxLANs v4" + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="turn off" + exp0=( 0 0 0 0 ) + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off + check_tables + + msg="turn on" + exp0=( `mke 10000 1` 0 0 0 ) + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on + check_tables + + msg="remove both" + del_dev vxlanA0 + exp0=( 0 0 0 0 ) + del_dev vxlan0 + check_tables + + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off + + msg="create VxLANs v4 - off" + exp0=( 0 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="created off - turn on" + exp0=( `mke 10000 1` 0 0 0 ) + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on + check_tables + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# device initiated reset +pfx="reset notification" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + msg="create VxLANs v6" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="create VxLANs v4" + new_vxlan vxlan0 10000 $NSIM_NETDEV + + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + check_tables + + msg="NIC device goes down" + ifconfig $NSIM_NETDEV down + if [ $port -eq 1 ]; then + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + fi + check_tables + + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + check_tables + + msg="NIC device goes up again" + ifconfig $NSIM_NETDEV up + exp0=( `mke 10000 1` 0 0 0 ) + check_tables + + msg="remove both" + del_dev vxlanA0 + exp0=( 0 0 0 0 ) + del_dev vxlan0 + check_tables + + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + check_tables + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +modprobe -r netdevsim + +if [ $num_errors -eq 0 ]; then + echo "PASSED all $num_cases checks" +else + echo "FAILED $num_errors/$num_cases checks" +fi + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 895ec992b2f1..9491bbaa0831 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -17,6 +17,8 @@ TEST_PROGS += route_localnet.sh TEST_PROGS += reuseaddr_ports_exhausted.sh TEST_PROGS += txtimestamp.sh TEST_PROGS += vrf-xfrm-tests.sh +TEST_PROGS += rxtimestamp.sh +TEST_PROGS += devlink_port_split.py TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py new file mode 100755 index 000000000000..58bb7e9b88ce --- /dev/null +++ b/tools/testing/selftests/net/devlink_port_split.py @@ -0,0 +1,277 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 + +from subprocess import PIPE, Popen +import json +import time +import argparse +import collections +import sys + +# +# Test port split configuration using devlink-port lanes attribute. +# The test is skipped in case the attribute is not available. +# +# First, check that all the ports with 1 lane fail to split. +# Second, check that all the ports with more than 1 lane can be split +# to all valid configurations (e.g., split to 2, split to 4 etc.) +# + + +Port = collections.namedtuple('Port', 'bus_info name') + + +def run_command(cmd, should_fail=False): + """ + Run a command in subprocess. + Return: Tuple of (stdout, stderr). + """ + + p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) + stdout, stderr = p.communicate() + stdout, stderr = stdout.decode(), stderr.decode() + + if stderr != "" and not should_fail: + print("Error sending command: %s" % cmd) + print(stdout) + print(stderr) + return stdout, stderr + + +class devlink_ports(object): + """ + Class that holds information on the devlink ports, required to the tests; + if_names: A list of interfaces in the devlink ports. + """ + + def get_if_names(dev): + """ + Get a list of physical devlink ports. + Return: Array of tuples (bus_info/port, if_name). + """ + + arr = [] + + cmd = "devlink -j port show" + stdout, stderr = run_command(cmd) + assert stderr == "" + ports = json.loads(stdout)['port'] + + for port in ports: + if dev in port: + if ports[port]['flavour'] == 'physical': + arr.append(Port(bus_info=port, name=ports[port]['netdev'])) + + return arr + + def __init__(self, dev): + self.if_names = devlink_ports.get_if_names(dev) + + +def get_max_lanes(port): + """ + Get the $port's maximum number of lanes. + Return: number of lanes, e.g. 1, 2, 4 and 8. + """ + + cmd = "devlink -j port show %s" % port + stdout, stderr = run_command(cmd) + assert stderr == "" + values = list(json.loads(stdout)['port'].values())[0] + + if 'lanes' in values: + lanes = values['lanes'] + else: + lanes = 0 + return lanes + + +def get_split_ability(port): + """ + Get the $port split ability. + Return: split ability, true or false. + """ + + cmd = "devlink -j port show %s" % port.name + stdout, stderr = run_command(cmd) + assert stderr == "" + values = list(json.loads(stdout)['port'].values())[0] + + return values['splittable'] + + +def split(k, port, should_fail=False): + """ + Split $port into $k ports. + If should_fail == True, the split should fail. Otherwise, should pass. + Return: Array of sub ports after splitting. + If the $port wasn't split, the array will be empty. + """ + + cmd = "devlink port split %s count %s" % (port.bus_info, k) + stdout, stderr = run_command(cmd, should_fail=should_fail) + + if should_fail: + if not test(stderr != "", "%s is unsplittable" % port.name): + print("split an unsplittable port %s" % port.name) + return create_split_group(port, k) + else: + if stderr == "": + return create_split_group(port, k) + print("didn't split a splittable port %s" % port.name) + + return [] + + +def unsplit(port): + """ + Unsplit $port. + """ + + cmd = "devlink port unsplit %s" % port + stdout, stderr = run_command(cmd) + test(stderr == "", "Unsplit port %s" % port) + + +def exists(port, dev): + """ + Check if $port exists in the devlink ports. + Return: True is so, False otherwise. + """ + + return any(dev_port.name == port + for dev_port in devlink_ports.get_if_names(dev)) + + +def exists_and_lanes(ports, lanes, dev): + """ + Check if every port in the list $ports exists in the devlink ports and has + $lanes number of lanes after splitting. + Return: True if both are True, False otherwise. + """ + + for port in ports: + max_lanes = get_max_lanes(port) + if not exists(port, dev): + print("port %s doesn't exist in devlink ports" % port) + return False + if max_lanes != lanes: + print("port %s has %d lanes, but %s were expected" + % (port, lanes, max_lanes)) + return False + return True + + +def test(cond, msg): + """ + Check $cond and print a message accordingly. + Return: True is pass, False otherwise. + """ + + if cond: + print("TEST: %-60s [ OK ]" % msg) + else: + print("TEST: %-60s [FAIL]" % msg) + + return cond + + +def create_split_group(port, k): + """ + Create the split group for $port. + Return: Array with $k elements, which are the split port group. + """ + + return list(port.name + "s" + str(i) for i in range(k)) + + +def split_unsplittable_port(port, k): + """ + Test that splitting of unsplittable port fails. + """ + + # split to max + new_split_group = split(k, port, should_fail=True) + + if new_split_group != []: + unsplit(port.bus_info) + + +def split_splittable_port(port, k, lanes, dev): + """ + Test that splitting of splittable port passes correctly. + """ + + new_split_group = split(k, port) + + # Once the split command ends, it takes some time to the sub ifaces' + # to get their names. Use udevadm to continue only when all current udev + # events are handled. + cmd = "udevadm settle" + stdout, stderr = run_command(cmd) + assert stderr == "" + + if new_split_group != []: + test(exists_and_lanes(new_split_group, lanes/k, dev), + "split port %s into %s" % (port.name, k)) + + unsplit(port.bus_info) + + +def make_parser(): + parser = argparse.ArgumentParser(description='A test for port splitting.') + parser.add_argument('--dev', + help='The devlink handle of the device under test. ' + + 'The default is the first registered devlink ' + + 'handle.') + + return parser + + +def main(cmdline=None): + parser = make_parser() + args = parser.parse_args(cmdline) + + dev = args.dev + if not dev: + cmd = "devlink -j dev show" + stdout, stderr = run_command(cmd) + assert stderr == "" + + devs = json.loads(stdout)['dev'] + dev = list(devs.keys())[0] + + cmd = "devlink dev show %s" % dev + stdout, stderr = run_command(cmd) + if stderr != "": + print("devlink device %s can not be found" % dev) + sys.exit(1) + + ports = devlink_ports(dev) + + for port in ports.if_names: + max_lanes = get_max_lanes(port.name) + + # If max lanes is 0, do not test port splitting at all + if max_lanes == 0: + continue + + # If 1 lane, shouldn't be able to split + elif max_lanes == 1: + test(not get_split_ability(port), + "%s should not be able to split" % port.name) + split_unsplittable_port(port, max_lanes) + + # Else, splitting should pass and all the split ports should exist. + else: + lane = max_lanes + test(get_split_ability(port), + "%s should be able to split" % port.name) + while lane > 1: + split_splittable_port(port, lane, max_lanes, dev) + + lane //= 2 + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh index f0e6be4c09e9..75fe24bcb9cd 100644 --- a/tools/testing/selftests/net/forwarding/devlink_lib.sh +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -98,6 +98,11 @@ devlink_resource_size_set() check_err $? "Failed setting path $path to size $size" } +devlink_resource_occ_get() +{ + devlink_resource_get "$@" | jq '.["occ"]' +} + devlink_reload() { local still_pending diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index 43a948feed26..dbb9fcf759e0 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -50,23 +50,6 @@ cleanup() h1_destroy } -different_speeds_get() -{ - local dev1=$1; shift - local dev2=$1; shift - local with_mode=$1; shift - local adver=$1; shift - - local -a speeds_arr - - speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver)) - if [[ ${#speeds_arr[@]} < 2 ]]; then - check_err 1 "cannot check different speeds. There are not enough speeds" - fi - - echo ${speeds_arr[0]} ${speeds_arr[1]} -} - same_speeds_autoneg_off() { # Check that when each of the reported speeds is forced, the links come diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh new file mode 100755 index 000000000000..4b42dfd4efd1 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + autoneg + autoneg_force_mode + no_cable +" + +NUM_NETIFS=2 +source lib.sh +source ethtool_lib.sh + +setup_prepare() +{ + swp1=${NETIFS[p1]} + swp2=${NETIFS[p2]} + swp3=$NETIF_NO_CABLE +} + +ethtool_extended_state_check() +{ + local dev=$1; shift + local expected_ext_state=$1; shift + local expected_ext_substate=${1:-""}; shift + + local ext_state=$(ethtool $dev | grep "Link detected" \ + | cut -d "(" -f2 | cut -d ")" -f1) + local ext_substate=$(echo $ext_state | cut -sd "," -f2 \ + | sed -e 's/^[[:space:]]*//') + ext_state=$(echo $ext_state | cut -d "," -f1) + + [[ $ext_state == $expected_ext_state ]] + check_err $? "Expected \"$expected_ext_state\", got \"$ext_state\"" + + [[ $ext_substate == $expected_ext_substate ]] + check_err $? "Expected \"$expected_ext_substate\", got \"$ext_substate\"" +} + +autoneg() +{ + RET=0 + + ip link set dev $swp1 up + + sleep 4 + ethtool_extended_state_check $swp1 "Autoneg" "No partner detected" + + log_test "Autoneg, No partner detected" + + ip link set dev $swp1 down +} + +autoneg_force_mode() +{ + RET=0 + + ip link set dev $swp1 up + ip link set dev $swp2 up + + local -a speeds_arr=($(different_speeds_get $swp1 $swp2 0 0)) + local speed1=${speeds_arr[0]} + local speed2=${speeds_arr[1]} + + ethtool_set $swp1 speed $speed1 autoneg off + ethtool_set $swp2 speed $speed2 autoneg off + + sleep 4 + ethtool_extended_state_check $swp1 "Autoneg" \ + "No partner detected during force mode" + + ethtool_extended_state_check $swp2 "Autoneg" \ + "No partner detected during force mode" + + log_test "Autoneg, No partner detected during force mode" + + ethtool -s $swp2 autoneg on + ethtool -s $swp1 autoneg on + + ip link set dev $swp2 down + ip link set dev $swp1 down +} + +no_cable() +{ + RET=0 + + ip link set dev $swp3 up + + sleep 1 + ethtool_extended_state_check $swp3 "No cable" + + log_test "No cable" + + ip link set dev $swp3 down +} + +setup_prepare + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh index 925d229a59d8..9188e624dec0 100644 --- a/tools/testing/selftests/net/forwarding/ethtool_lib.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_lib.sh @@ -67,3 +67,20 @@ common_speeds_get() <(printf '%s\n' "${dev1_speeds[@]}" | sort -u) \ <(printf '%s\n' "${dev2_speeds[@]}" | sort -u) } + +different_speeds_get() +{ + local dev1=$1; shift + local dev2=$1; shift + local with_mode=$1; shift + local adver=$1; shift + + local -a speeds_arr + + speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver)) + if [[ ${#speeds_arr[@]} < 2 ]]; then + check_err 1 "cannot check different speeds. There are not enough speeds" + fi + + echo ${speeds_arr[0]} ${speeds_arr[1]} +} diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample index e2adb533c8fc..b802c14d2950 100644 --- a/tools/testing/selftests/net/forwarding/forwarding.config.sample +++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample @@ -14,6 +14,9 @@ NETIFS[p6]=veth5 NETIFS[p7]=veth6 NETIFS[p8]=veth7 +# Port that does not have a cable connected. +NETIF_NO_CABLE=eth8 + ############################################################################## # Defines diff --git a/tools/testing/selftests/net/forwarding/pedit_l4port.sh b/tools/testing/selftests/net/forwarding/pedit_l4port.sh new file mode 100755 index 000000000000..5f20d289ee43 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/pedit_l4port.sh @@ -0,0 +1,198 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test sends traffic from H1 to H2. Either on ingress of $swp1, or on egress of $swp2, the +# traffic is acted upon by a pedit action. An ingress filter installed on $h2 verifies that the +# packet looks like expected. +# +# +----------------------+ +----------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +----|-----------------+ +----------------|-----+ +# | | +# +----|----------------------------------------------------------------|-----+ +# | SW | | | +# | +-|----------------------------------------------------------------|-+ | +# | | + $swp1 BR $swp2 + | | +# | +--------------------------------------------------------------------+ | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + ping_ipv4 + test_udp_sport + test_udp_dport + test_tcp_sport + test_tcp_dport +" + +NUM_NETIFS=4 +source lib.sh +source tc_common.sh + +: ${HIT_TIMEOUT:=2000} # ms + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h1_destroy() +{ + simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/28 2001:db8:1::2/64 + tc qdisc add dev $h2 clsact +} + +h2_destroy() +{ + tc qdisc del dev $h2 clsact + simple_if_fini $h2 192.0.2.2/28 2001:db8:1::2/64 +} + +switch_create() +{ + ip link add name br1 up type bridge vlan_filtering 1 + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + ip link set dev $swp2 master br1 + ip link set dev $swp2 up + + tc qdisc add dev $swp1 clsact + tc qdisc add dev $swp2 clsact +} + +switch_destroy() +{ + tc qdisc del dev $swp2 clsact + tc qdisc del dev $swp1 clsact + + ip link set dev $swp2 nomaster + ip link set dev $swp1 nomaster + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + h2mac=$(mac_get $h2) + + vrf_prepare + h1_create + h2_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h2_destroy + h1_destroy + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.2 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:1::2 +} + +do_test_pedit_l4port_one() +{ + local pedit_locus=$1; shift + local pedit_prot=$1; shift + local pedit_action=$1; shift + local match_prot=$1; shift + local match_flower=$1; shift + local mz_flags=$1; shift + local saddr=$1; shift + local daddr=$1; shift + + tc filter add $pedit_locus handle 101 pref 1 \ + flower action pedit ex munge $pedit_action + tc filter add dev $h2 ingress handle 101 pref 1 prot $match_prot \ + flower skip_hw $match_flower action pass + + RET=0 + + $MZ $mz_flags $h1 -c 10 -d 20msec -p 100 \ + -a own -b $h2mac -q -t $pedit_prot sp=54321,dp=12345 + + local pkts + pkts=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= 10" \ + tc_rule_handle_stats_get "dev $h2 ingress" 101) + check_err $? "Expected to get 10 packets, but got $pkts." + + pkts=$(tc_rule_handle_stats_get "$pedit_locus" 101) + ((pkts >= 10)) + check_err $? "Expected to get 10 packets on pedit rule, but got $pkts." + + log_test "$pedit_locus pedit $pedit_action" + + tc filter del dev $h2 ingress pref 1 + tc filter del $pedit_locus pref 1 +} + +do_test_pedit_l4port() +{ + local locus=$1; shift + local prot=$1; shift + local pedit_port=$1; shift + local flower_port=$1; shift + local port + + for port in 1 11111 65535; do + do_test_pedit_l4port_one "$locus" "$prot" \ + "$prot $pedit_port set $port" \ + ip "ip_proto $prot $flower_port $port" \ + "-A 192.0.2.1 -B 192.0.2.2" + done +} + +test_udp_sport() +{ + do_test_pedit_l4port "dev $swp1 ingress" udp sport src_port + do_test_pedit_l4port "dev $swp2 egress" udp sport src_port +} + +test_udp_dport() +{ + do_test_pedit_l4port "dev $swp1 ingress" udp dport dst_port + do_test_pedit_l4port "dev $swp2 egress" udp dport dst_port +} + +test_tcp_sport() +{ + do_test_pedit_l4port "dev $swp1 ingress" tcp sport src_port + do_test_pedit_l4port "dev $swp2 egress" tcp sport src_port +} + +test_tcp_dport() +{ + do_test_pedit_l4port "dev $swp1 ingress" tcp dport dst_port + do_test_pedit_l4port "dev $swp2 egress" tcp dport dst_port +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh new file mode 100755 index 000000000000..e714bae473fb --- /dev/null +++ b/tools/testing/selftests/net/forwarding/sch_red.sh @@ -0,0 +1,492 @@ +# SPDX-License-Identifier: GPL-2.0 + +# This test sends one stream of traffic from H1 through a TBF shaper, to a RED +# within TBF shaper on $swp3. The two shapers have the same configuration, and +# thus the resulting stream should fill all available bandwidth on the latter +# shaper. A second stream is sent from H2 also via $swp3, and used to inject +# additional traffic. Since all available bandwidth is taken, this traffic has +# to go to backlog. +# +# +--------------------------+ +--------------------------+ +# | H1 | | H2 | +# | + $h1 | | + $h2 | +# | | 192.0.2.1/28 | | | 192.0.2.2/28 | +# | | TBF 10Mbps | | | | +# +-----|--------------------+ +-----|--------------------+ +# | | +# +-----|------------------------------------------------|--------------------+ +# | SW | | | +# | +--|------------------------------------------------|----------------+ | +# | | + $swp1 + $swp2 | | +# | | BR | | +# | | | | +# | | + $swp3 | | +# | | | TBF 10Mbps / RED | | +# | +--------------------------------|-----------------------------------+ | +# | | | +# +-----------------------------------|---------------------------------------+ +# | +# +-----|--------------------+ +# | H3 | | +# | + $h1 | +# | 192.0.2.3/28 | +# | | +# +--------------------------+ + +ALL_TESTS=" + ping_ipv4 + ecn_test + ecn_nodrop_test + red_test + red_qevent_test + ecn_qevent_test +" + +NUM_NETIFS=6 +CHECK_TC="yes" +source lib.sh + +BACKLOG=30000 +PKTSZ=1400 + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 + mtu_set $h1 10000 + tc qdisc replace dev $h1 root handle 1: tbf \ + rate 10Mbit burst 10K limit 1M +} + +h1_destroy() +{ + tc qdisc del dev $h1 root + mtu_restore $h1 + simple_if_fini $h1 192.0.2.1/28 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/28 + mtu_set $h2 10000 +} + +h2_destroy() +{ + mtu_restore $h2 + simple_if_fini $h2 192.0.2.2/28 +} + +h3_create() +{ + simple_if_init $h3 192.0.2.3/28 + mtu_set $h3 10000 +} + +h3_destroy() +{ + mtu_restore $h3 + simple_if_fini $h3 192.0.2.3/28 +} + +switch_create() +{ + ip link add dev br up type bridge + ip link set dev $swp1 up master br + ip link set dev $swp2 up master br + ip link set dev $swp3 up master br + + mtu_set $swp1 10000 + mtu_set $swp2 10000 + mtu_set $swp3 10000 + + tc qdisc replace dev $swp3 root handle 1: tbf \ + rate 10Mbit burst 10K limit 1M + ip link add name _drop_test up type dummy +} + +switch_destroy() +{ + ip link del dev _drop_test + tc qdisc del dev $swp3 root + + mtu_restore $h3 + mtu_restore $h2 + mtu_restore $h1 + + ip link set dev $swp3 down nomaster + ip link set dev $swp2 down nomaster + ip link set dev $swp1 down nomaster + ip link del dev br +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + h2=${NETIFS[p3]} + swp2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + h3_mac=$(mac_get $h3) + + vrf_prepare + + h1_create + h2_create + h3_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h3_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.3 " from host 1" + ping_test $h2 192.0.2.3 " from host 2" +} + +get_qdisc_backlog() +{ + qdisc_stats_get $swp3 11: .backlog +} + +get_nmarked() +{ + qdisc_stats_get $swp3 11: .marked +} + +get_qdisc_npackets() +{ + qdisc_stats_get $swp3 11: .packets +} + +get_nmirrored() +{ + link_stats_get _drop_test tx packets +} + +send_packets() +{ + local proto=$1; shift + local pkts=$1; shift + + $MZ $h2 -p $PKTSZ -a own -b $h3_mac -A 192.0.2.2 -B 192.0.2.3 -t $proto -q -c $pkts "$@" +} + +# This sends traffic in an attempt to build a backlog of $size. Returns 0 on +# success. After 10 failed attempts it bails out and returns 1. It dumps the +# backlog size to stdout. +build_backlog() +{ + local size=$1; shift + local proto=$1; shift + + local i=0 + + while :; do + local cur=$(get_qdisc_backlog) + local diff=$((size - cur)) + local pkts=$(((diff + PKTSZ - 1) / PKTSZ)) + + if ((cur >= size)); then + echo $cur + return 0 + elif ((i++ > 10)); then + echo $cur + return 1 + fi + + send_packets $proto $pkts "$@" + sleep 1 + done +} + +check_marking() +{ + local cond=$1; shift + + local npackets_0=$(get_qdisc_npackets) + local nmarked_0=$(get_nmarked) + sleep 5 + local npackets_1=$(get_qdisc_npackets) + local nmarked_1=$(get_nmarked) + + local nmarked_d=$((nmarked_1 - nmarked_0)) + local npackets_d=$((npackets_1 - npackets_0)) + local pct=$((100 * nmarked_d / npackets_d)) + + echo $pct + ((pct $cond)) +} + +check_mirroring() +{ + local cond=$1; shift + + local npackets_0=$(get_qdisc_npackets) + local nmirrored_0=$(get_nmirrored) + sleep 5 + local npackets_1=$(get_qdisc_npackets) + local nmirrored_1=$(get_nmirrored) + + local nmirrored_d=$((nmirrored_1 - nmirrored_0)) + local npackets_d=$((npackets_1 - npackets_0)) + local pct=$((100 * nmirrored_d / npackets_d)) + + echo $pct + ((pct $cond)) +} + +ecn_test_common() +{ + local name=$1; shift + local limit=$1; shift + local backlog + local pct + + # Build the below-the-limit backlog using UDP. We could use TCP just + # fine, but this way we get a proof that UDP is accepted when queue + # length is below the limit. The main stream is using TCP, and if the + # limit is misconfigured, we would see this traffic being ECN marked. + RET=0 + backlog=$(build_backlog $((2 * limit / 3)) udp) + check_err $? "Could not build the requested backlog" + pct=$(check_marking "== 0") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + log_test "$name backlog < limit" + + # Now push TCP, because non-TCP traffic would be early-dropped after the + # backlog crosses the limit, and we want to make sure that the backlog + # is above the limit. + RET=0 + backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_marking ">= 95") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected >= 95." + log_test "$name backlog > limit" +} + +do_ecn_test() +{ + local limit=$1; shift + local name=ECN + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + sleep 1 + + ecn_test_common "$name" $limit + + # Up there we saw that UDP gets accepted when backlog is below the + # limit. Now that it is above, it should all get dropped, and backlog + # building should fail. + RET=0 + build_backlog $((2 * limit)) udp >/dev/null + check_fail $? "UDP traffic went into backlog instead of being early-dropped" + log_test "$name backlog > limit: UDP early-dropped" + + stop_traffic + sleep 1 +} + +do_ecn_nodrop_test() +{ + local limit=$1; shift + local name="ECN nodrop" + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + sleep 1 + + ecn_test_common "$name" $limit + + # Up there we saw that UDP gets accepted when backlog is below the + # limit. Now that it is above, in nodrop mode, make sure it goes to + # backlog as well. + RET=0 + build_backlog $((2 * limit)) udp >/dev/null + check_err $? "UDP traffic was early-dropped instead of getting into backlog" + log_test "$name backlog > limit: UDP not dropped" + + stop_traffic + sleep 1 +} + +do_red_test() +{ + local limit=$1; shift + local backlog + local pct + + # Use ECN-capable TCP to verify there's no marking even though the queue + # is above limit. + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + + # Pushing below the queue limit should work. + RET=0 + backlog=$(build_backlog $((2 * limit / 3)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_marking "== 0") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + log_test "RED backlog < limit" + + # Pushing above should not. + RET=0 + backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01) + check_fail $? "Traffic went into backlog instead of being early-dropped" + pct=$(check_marking "== 0") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + log_test "RED backlog > limit" + + stop_traffic + sleep 1 +} + +do_red_qevent_test() +{ + local limit=$1; shift + local backlog + local base + local now + local pct + + RET=0 + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t udp -q & + sleep 1 + + tc filter add block 10 pref 1234 handle 102 matchall skip_hw \ + action mirred egress mirror dev _drop_test + + # Push to the queue until it's at the limit. The configured limit is + # rounded by the qdisc, so this is the best we can do to get to the real + # limit. + build_backlog $((3 * limit / 2)) udp >/dev/null + + base=$(get_nmirrored) + send_packets udp 100 + sleep 1 + now=$(get_nmirrored) + ((now >= base + 100)) + check_err $? "Dropped packets not observed: 100 expected, $((now - base)) seen" + + tc filter del block 10 pref 1234 handle 102 matchall + + base=$(get_nmirrored) + send_packets udp 100 + sleep 1 + now=$(get_nmirrored) + ((now == base)) + check_err $? "Dropped packets still observed: 0 expected, $((now - base)) seen" + + log_test "RED early_dropped packets mirrored" + + stop_traffic + sleep 1 +} + +do_ecn_qevent_test() +{ + local limit=$1; shift + local name=ECN + + RET=0 + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + sleep 1 + + tc filter add block 10 pref 1234 handle 102 matchall skip_hw \ + action mirred egress mirror dev _drop_test + + backlog=$(build_backlog $((2 * limit / 3)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_mirroring "== 0") + check_err $? "backlog $backlog / $limit Got $pct% mirrored packets, expected == 0." + + backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_mirroring ">= 95") + check_err $? "backlog $backlog / $limit Got $pct% mirrored packets, expected >= 95." + + tc filter del block 10 pref 1234 handle 102 matchall + + log_test "ECN marked packets mirrored" + + stop_traffic + sleep 1 +} + +install_qdisc() +{ + local -a args=("$@") + + tc qdisc replace dev $swp3 parent 1:1 handle 11: red \ + limit 1M avpkt $PKTSZ probability 1 \ + min $BACKLOG max $((BACKLOG + 1)) burst 38 "${args[@]}" + sleep 1 +} + +uninstall_qdisc() +{ + tc qdisc del dev $swp3 parent 1:1 +} + +ecn_test() +{ + install_qdisc ecn + do_ecn_test $BACKLOG + uninstall_qdisc +} + +ecn_nodrop_test() +{ + install_qdisc ecn nodrop + do_ecn_nodrop_test $BACKLOG + uninstall_qdisc +} + +red_test() +{ + install_qdisc + do_red_test $BACKLOG + uninstall_qdisc +} + +red_qevent_test() +{ + install_qdisc qevent early_drop block 10 + do_red_qevent_test $BACKLOG + uninstall_qdisc +} + +ecn_qevent_test() +{ + install_qdisc ecn qevent mark block 10 + do_ecn_qevent_test $BACKLOG + uninstall_qdisc +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh new file mode 100755 index 000000000000..160f9cccdfb7 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/tc_police.sh @@ -0,0 +1,333 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test tc-police action. +# +# +---------------------------------+ +# | H1 (vrf) | +# | + $h1 | +# | | 192.0.2.1/24 | +# | | | +# | | default via 192.0.2.2 | +# +----|----------------------------+ +# | +# +----|----------------------------------------------------------------------+ +# | SW | | +# | + $rp1 | +# | 192.0.2.2/24 | +# | | +# | 198.51.100.2/24 203.0.113.2/24 | +# | + $rp2 + $rp3 | +# | | | | +# +----|-----------------------------------------|----------------------------+ +# | | +# +----|----------------------------+ +----|----------------------------+ +# | | default via 198.51.100.2 | | | default via 203.0.113.2 | +# | | | | | | +# | | 198.51.100.1/24 | | | 203.0.113.1/24 | +# | + $h2 | | + $h3 | +# | H2 (vrf) | | H3 (vrf) | +# +---------------------------------+ +---------------------------------+ + +ALL_TESTS=" + police_rx_test + police_tx_test + police_shared_test + police_rx_mirror_test + police_tx_mirror_test +" +NUM_NETIFS=6 +source tc_common.sh +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 + + ip -4 route add default vrf v$h1 nexthop via 192.0.2.2 +} + +h1_destroy() +{ + ip -4 route del default vrf v$h1 nexthop via 192.0.2.2 + + simple_if_fini $h1 192.0.2.1/24 +} + +h2_create() +{ + simple_if_init $h2 198.51.100.1/24 + + ip -4 route add default vrf v$h2 nexthop via 198.51.100.2 + + tc qdisc add dev $h2 clsact +} + +h2_destroy() +{ + tc qdisc del dev $h2 clsact + + ip -4 route del default vrf v$h2 nexthop via 198.51.100.2 + + simple_if_fini $h2 198.51.100.1/24 +} + +h3_create() +{ + simple_if_init $h3 203.0.113.1/24 + + ip -4 route add default vrf v$h3 nexthop via 203.0.113.2 + + tc qdisc add dev $h3 clsact +} + +h3_destroy() +{ + tc qdisc del dev $h3 clsact + + ip -4 route del default vrf v$h3 nexthop via 203.0.113.2 + + simple_if_fini $h3 203.0.113.1/24 +} + +router_create() +{ + ip link set dev $rp1 up + ip link set dev $rp2 up + ip link set dev $rp3 up + + __addr_add_del $rp1 add 192.0.2.2/24 + __addr_add_del $rp2 add 198.51.100.2/24 + __addr_add_del $rp3 add 203.0.113.2/24 + + tc qdisc add dev $rp1 clsact + tc qdisc add dev $rp2 clsact +} + +router_destroy() +{ + tc qdisc del dev $rp2 clsact + tc qdisc del dev $rp1 clsact + + __addr_add_del $rp3 del 203.0.113.2/24 + __addr_add_del $rp2 del 198.51.100.2/24 + __addr_add_del $rp1 del 192.0.2.2/24 + + ip link set dev $rp3 down + ip link set dev $rp2 down + ip link set dev $rp1 down +} + +police_common_test() +{ + local test_name=$1; shift + + RET=0 + + # Rule to measure bandwidth on ingress of $h2 + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=54321 -p 1000 -c 0 -q & + + local t0=$(tc_rule_stats_get $h2 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h2 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + log_test "$test_name" + + { kill %% && wait %%; } 2>/dev/null + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower +} + +police_rx_test() +{ + # Rule to police traffic destined to $h2 on ingress of $rp1 + tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/ok + + police_common_test "police on rx" + + tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower +} + +police_tx_test() +{ + # Rule to police traffic destined to $h2 on egress of $rp2 + tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/ok + + police_common_test "police on tx" + + tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower +} + +police_shared_common_test() +{ + local dport=$1; shift + local test_name=$1; shift + + RET=0 + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=$dport -p 1000 -c 0 -q & + + local t0=$(tc_rule_stats_get $h2 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h2 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + log_test "$test_name" + + { kill %% && wait %%; } 2>/dev/null +} + +police_shared_test() +{ + # Rule to measure bandwidth on ingress of $h2 + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp src_port 12345 \ + action drop + + # Rule to police traffic destined to $h2 on ingress of $rp1 + tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/ok \ + index 10 + + # Rule to police a different flow destined to $h2 on egress of $rp2 + # using same policer + tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 22222 \ + action police index 10 + + police_shared_common_test 54321 "police with shared policer - rx" + + police_shared_common_test 22222 "police with shared policer - tx" + + tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower + tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower +} + +police_mirror_common_test() +{ + local pol_if=$1; shift + local dir=$1; shift + local test_name=$1; shift + + RET=0 + + # Rule to measure bandwidth on ingress of $h2 + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + # Rule to measure bandwidth of mirrored traffic on ingress of $h3 + tc filter add dev $h3 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + # Rule to police traffic destined to $h2 and mirror to $h3 + tc filter add dev $pol_if $dir protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/pipe \ + action mirred egress mirror dev $rp3 + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=54321 -p 1000 -c 0 -q & + + local t0=$(tc_rule_stats_get $h2 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h2 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + local t0=$(tc_rule_stats_get $h3 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h3 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + log_test "$test_name" + + { kill %% && wait %%; } 2>/dev/null + tc filter del dev $pol_if $dir protocol ip pref 1 handle 101 flower + tc filter del dev $h3 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower +} + +police_rx_mirror_test() +{ + police_mirror_common_test $rp1 ingress "police rx and mirror" +} + +police_tx_mirror_test() +{ + police_mirror_common_test $rp2 egress "police tx and mirror" +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + rp1=${NETIFS[p2]} + + rp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + rp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + forwarding_enable + + h1_create + h2_create + h3_create + router_create +} + +cleanup() +{ + pre_cleanup + + router_destroy + h3_destroy + h2_destroy + h1_destroy + + forwarding_restore + vrf_cleanup +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index f50976ee7d44..aa254aefc2c3 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -5,7 +5,7 @@ KSFT_KHDR_INSTALL := 1 CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include -TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh +TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh TEST_GEN_FILES = mptcp_connect pm_nl_ctl diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh new file mode 100755 index 000000000000..39edce4f541c --- /dev/null +++ b/tools/testing/selftests/net/mptcp/diag.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +rndh=$(printf %x $sec)-$(mktemp -u XXXXXX) +ns="ns1-$rndh" +ksft_skip=4 +test_cnt=1 +ret=0 +pids=() + +flush_pids() +{ + # mptcp_connect in join mode will sleep a bit before completing, + # give it some time + sleep 1.1 + + for pid in ${pids[@]}; do + [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1 + done + pids=() +} + +cleanup() +{ + ip netns del $ns + for pid in ${pids[@]}; do + [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1 + done +} + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi +ss -h | grep -q MPTCP +if [ $? -ne 0 ];then + echo "SKIP: ss tool does not support MPTCP" + exit $ksft_skip +fi + +__chk_nr() +{ + local condition="$1" + local expected=$2 + local msg nr + + shift 2 + msg=$* + nr=$(ss -inmHMN $ns | $condition) + + printf "%-50s" "$msg" + if [ $nr != $expected ]; then + echo "[ fail ] expected $expected found $nr" + ret=$test_cnt + else + echo "[ ok ]" + fi + test_cnt=$((test_cnt+1)) +} + +chk_msk_nr() +{ + __chk_nr "grep -c token:" $* +} + +chk_msk_fallback_nr() +{ + __chk_nr "grep -c fallback" $* +} + +chk_msk_remote_key_nr() +{ + __chk_nr "grep -c remote_key" $* +} + + +trap cleanup EXIT +ip netns add $ns +ip -n $ns link set dev lo up + +echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null & +sleep 0.1 +pids[0]=$! +chk_msk_nr 0 "no msk on netns creation" + +echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null & +sleep 0.1 +pids[1]=$! +chk_msk_nr 2 "after MPC handshake " +chk_msk_remote_key_nr 2 "....chk remote_key" +chk_msk_fallback_nr 0 "....chk no fallback" +flush_pids + + +echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null & +pids[0]=$! +sleep 0.1 +echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null & +pids[1]=$! +sleep 0.1 +chk_msk_fallback_nr 1 "check fallback" +flush_pids + +NR_CLIENTS=100 +for I in `seq 1 $NR_CLIENTS`; do + echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null & + pids[$((I*2))]=$! +done +sleep 0.1 + +for I in `seq 1 $NR_CLIENTS`; do + echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null & + pids[$((I*2 + 1))]=$! +done +sleep 1.5 + +chk_msk_nr $((NR_CLIENTS*2)) "many msk socket present" +flush_pids + +exit $ret diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index cedee5b952ba..cad6f73a5fd0 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -11,6 +11,7 @@ #include <stdio.h> #include <stdlib.h> #include <strings.h> +#include <signal.h> #include <unistd.h> #include <sys/poll.h> @@ -36,6 +37,7 @@ extern int optind; static int poll_timeout = 10 * 1000; static bool listen_mode; +static bool quit; enum cfg_mode { CFG_MODE_POLL, @@ -52,11 +54,12 @@ static int pf = AF_INET; static int cfg_sndbuf; static int cfg_rcvbuf; static bool cfg_join; +static int cfg_wait; static void die_usage(void) { fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]" - "[-l] connect_address\n"); + "[-l] [-w sec] connect_address\n"); fprintf(stderr, "\t-6 use ipv6\n"); fprintf(stderr, "\t-t num -- set poll timeout to num\n"); fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n"); @@ -65,9 +68,15 @@ static void die_usage(void) fprintf(stderr, "\t-m [MPTCP|TCP] -- use tcp or mptcp sockets\n"); fprintf(stderr, "\t-s [mmap|poll] -- use poll (default) or mmap\n"); fprintf(stderr, "\t-u -- check mptcp ulp\n"); + fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n"); exit(1); } +static void handle_signal(int nr) +{ + quit = true; +} + static const char *getxinfo_strerr(int err) { if (err == EAI_SYSTEM) @@ -418,8 +427,8 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd) } /* leave some time for late join/announce */ - if (cfg_join) - usleep(400000); + if (cfg_wait) + usleep(cfg_wait); close(peerfd); return 0; @@ -812,11 +821,12 @@ static void parse_opts(int argc, char **argv) { int c; - while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:")) != -1) { + while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:w:")) != -1) { switch (c) { case 'j': cfg_join = true; cfg_mode = CFG_MODE_POLL; + cfg_wait = 400000; break; case 'l': listen_mode = true; @@ -850,6 +860,9 @@ static void parse_opts(int argc, char **argv) case 'R': cfg_rcvbuf = parse_int(optarg); break; + case 'w': + cfg_wait = atoi(optarg)*1000000; + break; } } @@ -865,6 +878,7 @@ int main(int argc, char *argv[]) { init_rng(); + signal(SIGUSR1, handle_signal); parse_opts(argc, argv); if (tcpulp_audit) diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index acf02e156d20..57d75b7f6220 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -3,7 +3,7 @@ time_start=$(date +%s) -optstring="S:R:d:e:l:r:h4cm:" +optstring="S:R:d:e:l:r:h4cm:f:t" ret=0 sin="" sout="" @@ -21,6 +21,8 @@ testmode="" sndbuf=0 rcvbuf=0 options_log=true +do_tcp=0 +filesize=0 if [ $tc_loss -eq 100 ];then tc_loss=1% @@ -40,9 +42,11 @@ usage() { echo -e "\t-e: ethtool features to disable, e.g.: \"-e tso -e gso\" (default: randomly disable any of tso/gso/gro)" echo -e "\t-4: IPv4 only: disable IPv6 tests (default: test both IPv4 and IPv6)" echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)" + echo -e "\t-f: size of file to transfer in bytes (default random)" echo -e "\t-S: set sndbuf value (default: use kernel default)" echo -e "\t-R: set rcvbuf value (default: use kernel default)" echo -e "\t-m: test mode (poll, sendfile; default: poll)" + echo -e "\t-t: also run tests with TCP (use twice to non-fallback tcp)" } while getopts "$optstring" option;do @@ -94,6 +98,12 @@ while getopts "$optstring" option;do "m") testmode="$OPTARG" ;; + "f") + filesize="$OPTARG" + ;; + "t") + do_tcp=$((do_tcp+1)) + ;; "?") usage $0 exit 1 @@ -186,6 +196,9 @@ ip -net "$ns4" link set ns4eth3 up ip -net "$ns4" route add default via 10.0.3.2 ip -net "$ns4" route add default via dead:beef:3::2 +# use TCP syn cookies, even if no flooding was detected. +ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2 + set_ethtool_flags() { local ns="$1" local dev="$2" @@ -385,14 +398,23 @@ do_transfer() capuser="-Z $SUDO_USER" fi - local capfile="${listener_ns}-${connector_ns}-${cl_proto}-${srv_proto}-${connect_addr}.pcap" + local capfile="${rndh}-${connector_ns:0:3}-${listener_ns:0:3}-${cl_proto}-${srv_proto}-${connect_addr}-${port}" + local capopt="-i any -s 65535 -B 32768 ${capuser}" + + ip netns exec ${listener_ns} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 & + local cappid_listener=$! - ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 & - local cappid=$! + ip netns exec ${connector_ns} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 & + local cappid_connector=$! sleep 1 fi + local stat_synrx_last_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableSYNRX | while read a count c rest ;do echo $count;done) + local stat_ackrx_last_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableACKRX | while read a count c rest ;do echo $count;done) + local stat_cookietx_last=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesSent | while read a count c rest ;do echo $count;done) + local stat_cookierx_last=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesRecv | while read a count c rest ;do echo $count;done) + ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" & local spid=$! @@ -413,7 +435,8 @@ do_transfer() if $capture; then sleep 1 - kill $cappid + kill ${cappid_listener} + kill ${cappid_connector} fi local duration @@ -435,6 +458,45 @@ do_transfer() check_transfer $cin $sout "file received by server" rets=$? + local stat_synrx_now_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableSYNRX | while read a count c rest ;do echo $count;done) + local stat_ackrx_now_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableACKRX | while read a count c rest ;do echo $count;done) + + local stat_cookietx_now=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesSent | while read a count c rest ;do echo $count;done) + local stat_cookierx_now=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesRecv | while read a count c rest ;do echo $count;done) + + expect_synrx=$((stat_synrx_last_l)) + expect_ackrx=$((stat_ackrx_last_l)) + + cookies=$(ip netns exec ${listener_ns} sysctl net.ipv4.tcp_syncookies) + cookies=${cookies##*=} + + if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then + expect_synrx=$((stat_synrx_last_l+1)) + expect_ackrx=$((stat_ackrx_last_l+1)) + fi + if [ $cookies -eq 2 ];then + if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then + echo "${listener_ns} CookieSent: ${cl_proto} -> ${srv_proto}: did not advance" + fi + if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then + echo "${listener_ns} CookieRecv: ${cl_proto} -> ${srv_proto}: did not advance" + fi + else + if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then + echo "${listener_ns} CookieSent: ${cl_proto} -> ${srv_proto}: changed" + fi + if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then + echo "${listener_ns} CookieRecv: ${cl_proto} -> ${srv_proto}: changed" + fi + fi + + if [ $expect_synrx -ne $stat_synrx_now_l ] ;then + echo "${listener_ns} SYNRX: ${cl_proto} -> ${srv_proto}: expect ${expect_synrx}, got ${stat_synrx_now_l}" + fi + if [ $expect_ackrx -ne $stat_ackrx_now_l ] ;then + echo "${listener_ns} ACKRX: ${cl_proto} -> ${srv_proto}: expect ${expect_synrx}, got ${stat_synrx_now_l}" + fi + if [ $retc -eq 0 ] && [ $rets -eq 0 ];then echo "$duration [ OK ]" cat "$capout" @@ -449,20 +511,25 @@ make_file() { local name=$1 local who=$2 + local SIZE=$filesize + local ksize + local rem - local SIZE TSIZE - SIZE=$((RANDOM % (1024 * 8))) - TSIZE=$((SIZE * 1024)) + if [ $SIZE -eq 0 ]; then + local MAXSIZE=$((1024 * 1024 * 8)) + local MINSIZE=$((1024 * 256)) - dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null + SIZE=$(((RANDOM * RANDOM + MINSIZE) % MAXSIZE)) + fi - SIZE=$((RANDOM % 1024)) - SIZE=$((SIZE + 128)) - TSIZE=$((TSIZE + SIZE)) - dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null + ksize=$((SIZE / 1024)) + rem=$((SIZE - (ksize * 1024))) + + dd if=/dev/urandom of="$name" bs=1024 count=$ksize 2> /dev/null + dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$rem 2> /dev/null echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name" - echo "Created $name (size $TSIZE) containing data sent by $who" + echo "Created $name (size $(du -b "$name")) containing data sent by $who" } run_tests_lo() @@ -497,9 +564,11 @@ run_tests_lo() return 1 fi - # don't bother testing fallback tcp except for loopback case. - if [ ${listener_ns} != ${connector_ns} ]; then - return 0 + if [ $do_tcp -eq 0 ]; then + # don't bother testing fallback tcp except for loopback case. + if [ ${listener_ns} != ${connector_ns} ]; then + return 0 + fi fi do_transfer ${listener_ns} ${connector_ns} MPTCP TCP ${connect_addr} ${local_addr} @@ -516,6 +585,15 @@ run_tests_lo() return 1 fi + if [ $do_tcp -gt 1 ] ;then + do_transfer ${listener_ns} ${connector_ns} TCP TCP ${connect_addr} ${local_addr} + lret=$? + if [ $lret -ne 0 ]; then + ret=$lret + return 1 + fi + fi + return 0 } diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index dd42c2f692d0..f39c1129ce5f 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -72,6 +72,15 @@ reset() init } +reset_with_cookies() +{ + reset + + for netns in "$ns1" "$ns2";do + ip netns exec $netns sysctl -q net.ipv4.tcp_syncookies=2 + done +} + for arg in "$@"; do if [ "$arg" = "-c" ]; then capture=1 @@ -138,7 +147,7 @@ do_transfer() capuser="-Z $SUDO_USER" fi - capfile="mp_join-${listener_ns}.pcap" + capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "${listener_ns}") echo "Capturing traffic for test $TEST_COUNT into $capfile" ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 & @@ -227,7 +236,7 @@ chk_join_nr() local count local dump_stats - printf "%-36s %s" "$msg" "syn" + printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn" count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'` [ -z "$count" ] && count=0 if [ "$count" != "$syn_nr" ]; then @@ -354,4 +363,57 @@ ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow run_tests $ns1 $ns2 10.0.1.1 chk_join_nr "multiple subflows and signal" 3 3 3 +# single subflow, syncookies +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "single subflow with syn cookies" 1 1 1 + +# multiple subflows with syn cookies +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "multiple subflows with syn cookies" 2 2 2 + +# multiple subflows limited by server +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "subflows limited by server w cookies" 2 2 1 + +# test signal address with cookies +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl limits 1 1 +ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "signal address with syn cookies" 1 1 1 + +# test cookie with subflow and signal +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal +ip netns exec $ns1 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl limits 1 2 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "subflow and signal w cookies" 2 2 2 + +# accept and use add_addr with additional subflows +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 3 +ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal +ip netns exec $ns2 ./pm_nl_ctl limits 1 3 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "subflows and signal w. cookies" 3 3 3 + exit $ret diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index bcb79ba1f214..e4613ce4ed69 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -44,6 +44,7 @@ struct test_case { struct options sockopt; struct tstamps expected; bool enabled; + bool warn_on_fail; }; struct sof_flag { @@ -67,44 +68,44 @@ static struct socket_type socket_types[] = { static struct test_case test_cases[] = { { {}, {} }, { - { so_timestamp: 1 }, - { tstamp: true } + { .so_timestamp = 1 }, + { .tstamp = true } }, { - { so_timestampns: 1 }, - { tstampns: true } + { .so_timestampns = 1 }, + { .tstampns = true } }, { - { so_timestamp: 1, so_timestampns: 1 }, - { tstampns: true } + { .so_timestamp = 1, .so_timestampns = 1 }, + { .tstampns = true } }, { - { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE }, + { .so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE }, {} }, { /* Loopback device does not support hw timestamps. */ - { so_timestamping: SOF_TIMESTAMPING_RX_HARDWARE }, + { .so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE }, {} }, { - { so_timestamping: SOF_TIMESTAMPING_SOFTWARE }, - {} + { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE }, + .warn_on_fail = true }, { - { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE + { .so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE }, {} }, { - { so_timestamping: SOF_TIMESTAMPING_SOFTWARE + { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE }, - { swtstamp: true } + { .swtstamp = true } }, { - { so_timestamp: 1, so_timestamping: SOF_TIMESTAMPING_SOFTWARE + { .so_timestamp = 1, .so_timestamping = SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE }, - { tstamp: true, swtstamp: true } + { .tstamp = true, .swtstamp = true } }, }; @@ -115,6 +116,9 @@ static struct option long_options[] = { { "tcp", no_argument, 0, 't' }, { "udp", no_argument, 0, 'u' }, { "ip", no_argument, 0, 'i' }, + { "strict", no_argument, 0, 'S' }, + { "ipv4", no_argument, 0, '4' }, + { "ipv6", no_argument, 0, '6' }, { NULL, 0, NULL, 0 }, }; @@ -270,37 +274,55 @@ void config_so_flags(int rcv, struct options o) error(1, errno, "Failed to set SO_TIMESTAMPING"); } -bool run_test_case(struct socket_type s, struct test_case t) +bool run_test_case(struct socket_type *s, int test_num, char ip_version, + bool strict) { - int port = (s.type == SOCK_RAW) ? 0 : next_port++; + union { + struct sockaddr_in6 addr6; + struct sockaddr_in addr4; + struct sockaddr addr_un; + } addr; int read_size = op_size; - struct sockaddr_in addr; + int src, dst, rcv, port; + socklen_t addr_size; bool failed = false; - int src, dst, rcv; - src = socket(AF_INET, s.type, s.protocol); + port = (s->type == SOCK_RAW) ? 0 : next_port++; + memset(&addr, 0, sizeof(addr)); + if (ip_version == '4') { + addr.addr4.sin_family = AF_INET; + addr.addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + addr.addr4.sin_port = htons(port); + addr_size = sizeof(addr.addr4); + if (s->type == SOCK_RAW) + read_size += 20; /* for IPv4 header */ + } else { + addr.addr6.sin6_family = AF_INET6; + addr.addr6.sin6_addr = in6addr_loopback; + addr.addr6.sin6_port = htons(port); + addr_size = sizeof(addr.addr6); + } + printf("Starting testcase %d over ipv%c...\n", test_num, ip_version); + src = socket(addr.addr_un.sa_family, s->type, + s->protocol); if (src < 0) error(1, errno, "Failed to open src socket"); - dst = socket(AF_INET, s.type, s.protocol); + dst = socket(addr.addr_un.sa_family, s->type, + s->protocol); if (dst < 0) error(1, errno, "Failed to open dst socket"); - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - addr.sin_port = htons(port); - - if (bind(dst, (struct sockaddr *)&addr, sizeof(addr)) < 0) + if (bind(dst, &addr.addr_un, addr_size) < 0) error(1, errno, "Failed to bind to port %d", port); - if (s.type == SOCK_STREAM && (listen(dst, 1) < 0)) + if (s->type == SOCK_STREAM && (listen(dst, 1) < 0)) error(1, errno, "Failed to listen"); - if (connect(src, (struct sockaddr *)&addr, sizeof(addr)) < 0) + if (connect(src, &addr.addr_un, addr_size) < 0) error(1, errno, "Failed to connect"); - if (s.type == SOCK_STREAM) { + if (s->type == SOCK_STREAM) { rcv = accept(dst, NULL, NULL); if (rcv < 0) error(1, errno, "Failed to accept"); @@ -309,17 +331,22 @@ bool run_test_case(struct socket_type s, struct test_case t) rcv = dst; } - config_so_flags(rcv, t.sockopt); + config_so_flags(rcv, test_cases[test_num].sockopt); usleep(20000); /* setsockopt for SO_TIMESTAMPING is asynchronous */ do_send(src); - if (s.type == SOCK_RAW) - read_size += 20; /* for IP header */ - failed = do_recv(rcv, read_size, t.expected); + failed = do_recv(rcv, read_size, test_cases[test_num].expected); close(rcv); close(src); + if (failed) { + printf("FAILURE in testcase %d over ipv%c ", test_num, + ip_version); + print_test_case(&test_cases[test_num]); + if (!strict && test_cases[test_num].warn_on_fail) + failed = false; + } return failed; } @@ -327,6 +354,9 @@ int main(int argc, char **argv) { bool all_protocols = true; bool all_tests = true; + bool cfg_ipv4 = false; + bool cfg_ipv6 = false; + bool strict = false; int arg_index = 0; int failures = 0; int s, t, opt; @@ -362,6 +392,15 @@ int main(int argc, char **argv) all_protocols = false; socket_types[0].enabled = true; break; + case 'S': + strict = true; + break; + case '4': + cfg_ipv4 = true; + break; + case '6': + cfg_ipv6 = true; + break; default: error(1, 0, "Failed to parse parameters."); } @@ -375,13 +414,14 @@ int main(int argc, char **argv) for (t = 0; t < ARRAY_SIZE(test_cases); t++) { if (!all_tests && !test_cases[t].enabled) continue; - - printf("Starting testcase %d...\n", t); - if (run_test_case(socket_types[s], test_cases[t])) { - failures++; - printf("FAILURE in test case "); - print_test_case(&test_cases[t]); - } + if (cfg_ipv4 || !cfg_ipv6) + if (run_test_case(&socket_types[s], t, '4', + strict)) + failures++; + if (cfg_ipv6 || !cfg_ipv4) + if (run_test_case(&socket_types[s], t, '6', + strict)) + failures++; } } if (!failures) diff --git a/tools/testing/selftests/net/rxtimestamp.sh b/tools/testing/selftests/net/rxtimestamp.sh new file mode 100755 index 000000000000..91631e88bf46 --- /dev/null +++ b/tools/testing/selftests/net/rxtimestamp.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +./in_netns.sh ./rxtimestamp $@ diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c index 011b0da6b033..490a8cca708a 100644 --- a/tools/testing/selftests/net/txtimestamp.c +++ b/tools/testing/selftests/net/txtimestamp.c @@ -64,6 +64,7 @@ static int cfg_payload_len = 10; static int cfg_poll_timeout = 100; static int cfg_delay_snd; static int cfg_delay_ack; +static int cfg_delay_tolerance_usec = 500; static bool cfg_show_payload; static bool cfg_do_pktinfo; static bool cfg_busy_poll; @@ -152,11 +153,12 @@ static void validate_key(int tskey, int tstype) static void validate_timestamp(struct timespec *cur, int min_delay) { - int max_delay = min_delay + 500 /* processing time upper bound */; int64_t cur64, start64; + int max_delay; cur64 = timespec_to_us64(cur); start64 = timespec_to_us64(&ts_usr); + max_delay = min_delay + cfg_delay_tolerance_usec; if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) { fprintf(stderr, "ERROR: %lu us expected between %d and %d\n", @@ -683,6 +685,7 @@ static void __attribute__((noreturn)) usage(const char *filepath) " -r: use raw\n" " -R: use raw (IP_HDRINCL)\n" " -S N: usec to sleep before reading error queue\n" + " -t N: tolerance (usec) for timestamp validation\n" " -u: use udp\n" " -v: validate SND delay (usec)\n" " -V: validate ACK delay (usec)\n" @@ -697,7 +700,7 @@ static void parse_opt(int argc, char **argv) int c; while ((c = getopt(argc, argv, - "46bc:CeEFhIl:LnNp:PrRS:uv:V:x")) != -1) { + "46bc:CeEFhIl:LnNp:PrRS:t:uv:V:x")) != -1) { switch (c) { case '4': do_ipv6 = 0; @@ -760,6 +763,9 @@ static void parse_opt(int argc, char **argv) case 'S': cfg_sleep_usec = strtoul(optarg, NULL, 10); break; + case 't': + cfg_delay_tolerance_usec = strtoul(optarg, NULL, 10); + break; case 'u': proto_count++; cfg_proto = SOCK_DGRAM; diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh new file mode 100755 index 000000000000..18b982d611de --- /dev/null +++ b/tools/testing/selftests/net/vrf_strict_mode_test.sh @@ -0,0 +1,396 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test is designed for testing the new VRF strict_mode functionality. + +ret=0 + +# identifies the "init" network namespace which is often called root network +# namespace. +INIT_NETNS_NAME="init" + +PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no} + +log_test() +{ + local rc=$1 + local expected=$2 + local msg="$3" + + if [ ${rc} -eq ${expected} ]; then + nsuccess=$((nsuccess+1)) + printf "\n TEST: %-60s [ OK ]\n" "${msg}" + else + ret=1 + nfail=$((nfail+1)) + printf "\n TEST: %-60s [FAIL]\n" "${msg}" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + fi +} + +print_log_test_results() +{ + if [ "$TESTS" != "none" ]; then + printf "\nTests passed: %3d\n" ${nsuccess} + printf "Tests failed: %3d\n" ${nfail} + fi +} + +log_section() +{ + echo + echo "################################################################################" + echo "TEST SECTION: $*" + echo "################################################################################" +} + +ip_expand_args() +{ + local nsname=$1 + local nsarg="" + + if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then + nsarg="-netns ${nsname}" + fi + + echo "${nsarg}" +} + +vrf_count() +{ + local nsname=$1 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} -o link show type vrf | wc -l +} + +count_vrf_by_table_id() +{ + local nsname=$1 + local tableid=$2 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} -d -o link show type vrf | grep "table ${tableid}" | wc -l +} + +add_vrf() +{ + local nsname=$1 + local vrfname=$2 + local vrftable=$3 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} link add ${vrfname} type vrf table ${vrftable} &>/dev/null +} + +add_vrf_and_check() +{ + local nsname=$1 + local vrfname=$2 + local vrftable=$3 + local cnt + local rc + + add_vrf ${nsname} ${vrfname} ${vrftable}; rc=$? + + cnt=$(count_vrf_by_table_id ${nsname} ${vrftable}) + + log_test ${rc} 0 "${nsname}: add vrf ${vrfname}, ${cnt} vrfs for table ${vrftable}" +} + +add_vrf_and_check_fail() +{ + local nsname=$1 + local vrfname=$2 + local vrftable=$3 + local cnt + local rc + + add_vrf ${nsname} ${vrfname} ${vrftable}; rc=$? + + cnt=$(count_vrf_by_table_id ${nsname} ${vrftable}) + + log_test ${rc} 2 "${nsname}: CANNOT add vrf ${vrfname}, ${cnt} vrfs for table ${vrftable}" +} + +del_vrf_and_check() +{ + local nsname=$1 + local vrfname=$2 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} link del ${vrfname} + log_test $? 0 "${nsname}: remove vrf ${vrfname}" +} + +config_vrf_and_check() +{ + local nsname=$1 + local addr=$2 + local vrfname=$3 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} link set dev ${vrfname} up && \ + ip ${nsarg} addr add ${addr} dev ${vrfname} + log_test $? 0 "${nsname}: vrf ${vrfname} up, addr ${addr}" +} + +read_strict_mode() +{ + local nsname=$1 + local rval + local rc=0 + local nsexec="" + + if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then + # a custom network namespace is provided + nsexec="ip netns exec ${nsname}" + fi + + rval="$(${nsexec} bash -c "cat /proc/sys/net/vrf/strict_mode" | \ + grep -E "^[0-1]$")" &> /dev/null + if [ $? -ne 0 ]; then + # set errors + rval=255 + rc=1 + fi + + # on success, rval can be only 0 or 1; on error, rval is equal to 255 + echo ${rval} + return ${rc} +} + +read_strict_mode_compare_and_check() +{ + local nsname=$1 + local expected=$2 + local res + + res="$(read_strict_mode ${nsname})" + log_test ${res} ${expected} "${nsname}: check strict_mode=${res}" +} + +set_strict_mode() +{ + local nsname=$1 + local val=$2 + local nsexec="" + + if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then + # a custom network namespace is provided + nsexec="ip netns exec ${nsname}" + fi + + ${nsexec} bash -c "echo ${val} >/proc/sys/net/vrf/strict_mode" &>/dev/null +} + +enable_strict_mode() +{ + local nsname=$1 + + set_strict_mode ${nsname} 1 +} + +disable_strict_mode() +{ + local nsname=$1 + + set_strict_mode ${nsname} 0 +} + +disable_strict_mode_and_check() +{ + local nsname=$1 + + disable_strict_mode ${nsname} + log_test $? 0 "${nsname}: disable strict_mode (=0)" +} + +enable_strict_mode_and_check() +{ + local nsname=$1 + + enable_strict_mode ${nsname} + log_test $? 0 "${nsname}: enable strict_mode (=1)" +} + +enable_strict_mode_and_check_fail() +{ + local nsname=$1 + + enable_strict_mode ${nsname} + log_test $? 1 "${nsname}: CANNOT enable strict_mode" +} + +strict_mode_check_default() +{ + local nsname=$1 + local strictmode + local vrfcnt + + vrfcnt=$(vrf_count ${nsname}) + strictmode=$(read_strict_mode ${nsname}) + log_test ${strictmode} 0 "${nsname}: strict_mode=0 by default, ${vrfcnt} vrfs" +} + +setup() +{ + modprobe vrf + + ip netns add testns + ip netns exec testns ip link set lo up +} + +cleanup() +{ + ip netns del testns 2>/dev/null + + ip link del vrf100 2>/dev/null + ip link del vrf101 2>/dev/null + ip link del vrf102 2>/dev/null + + echo 0 >/proc/sys/net/vrf/strict_mode 2>/dev/null +} + +vrf_strict_mode_tests_init() +{ + vrf_strict_mode_check_support init + + strict_mode_check_default init + + add_vrf_and_check init vrf100 100 + config_vrf_and_check init 172.16.100.1/24 vrf100 + + enable_strict_mode_and_check init + + add_vrf_and_check_fail init vrf101 100 + + disable_strict_mode_and_check init + + add_vrf_and_check init vrf101 100 + config_vrf_and_check init 172.16.101.1/24 vrf101 + + enable_strict_mode_and_check_fail init + + del_vrf_and_check init vrf101 + + enable_strict_mode_and_check init + + add_vrf_and_check init vrf102 102 + config_vrf_and_check init 172.16.102.1/24 vrf102 + + # the strict_modle is enabled in the init +} + +vrf_strict_mode_tests_testns() +{ + vrf_strict_mode_check_support testns + + strict_mode_check_default testns + + enable_strict_mode_and_check testns + + add_vrf_and_check testns vrf100 100 + config_vrf_and_check testns 10.0.100.1/24 vrf100 + + add_vrf_and_check_fail testns vrf101 100 + + add_vrf_and_check_fail testns vrf102 100 + + add_vrf_and_check testns vrf200 200 + + disable_strict_mode_and_check testns + + add_vrf_and_check testns vrf101 100 + + add_vrf_and_check testns vrf102 100 + + #the strict_mode is disabled in the testns +} + +vrf_strict_mode_tests_mix() +{ + read_strict_mode_compare_and_check init 1 + + read_strict_mode_compare_and_check testns 0 + + del_vrf_and_check testns vrf101 + + del_vrf_and_check testns vrf102 + + disable_strict_mode_and_check init + + enable_strict_mode_and_check testns + + enable_strict_mode_and_check init + enable_strict_mode_and_check init + + disable_strict_mode_and_check testns + disable_strict_mode_and_check testns + + read_strict_mode_compare_and_check init 1 + + read_strict_mode_compare_and_check testns 0 +} + +vrf_strict_mode_tests() +{ + log_section "VRF strict_mode test on init network namespace" + vrf_strict_mode_tests_init + + log_section "VRF strict_mode test on testns network namespace" + vrf_strict_mode_tests_testns + + log_section "VRF strict_mode test mixing init and testns network namespaces" + vrf_strict_mode_tests_mix +} + +vrf_strict_mode_check_support() +{ + local nsname=$1 + local output + local rc + + output="$(lsmod | grep '^vrf' | awk '{print $1}')" + if [ -z "${output}" ]; then + modinfo vrf || return $? + fi + + # we do not care about the value of the strict_mode; we only check if + # the strict_mode parameter is available or not. + read_strict_mode ${nsname} &>/dev/null; rc=$? + log_test ${rc} 0 "${nsname}: net.vrf.strict_mode is available" + + return ${rc} +} + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit 0 +fi + +if [ ! -x "$(command -v ip)" ]; then + echo "SKIP: Could not run test without ip tool" + exit 0 +fi + +modprobe vrf &>/dev/null +if [ ! -e /proc/sys/net/vrf/strict_mode ]; then + echo "SKIP: vrf sysctl does not exist" + exit 0 +fi + +cleanup &> /dev/null + +setup +vrf_strict_mode_tests +cleanup + +print_log_test_results + +exit $ret diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c index da7a9dda9490..f7911aaeb007 100644 --- a/tools/testing/selftests/ptp/testptp.c +++ b/tools/testing/selftests/ptp/testptp.c @@ -35,6 +35,8 @@ #define CLOCK_INVALID -1 #endif +#define NSEC_PER_SEC 1000000000LL + /* clock_adjtime is not available in GLIBC < 2.14 */ #if !__GLIBC_PREREQ(2, 14) #include <sys/syscall.h> @@ -132,6 +134,8 @@ static void usage(char *progname) " 1 - external time stamp\n" " 2 - periodic output\n" " -p val enable output with a period of 'val' nanoseconds\n" + " -H val set output phase to 'val' nanoseconds (requires -p)\n" + " -w val set output pulse width to 'val' nanoseconds (requires -p)\n" " -P val enable or disable (val=1|0) the system clock PPS\n" " -s set the ptp clock time from the system time\n" " -S set the system time from the ptp clock time\n" @@ -169,7 +173,6 @@ int main(int argc, char *argv[]) int list_pins = 0; int pct_offset = 0; int n_samples = 0; - int perout = -1; int pin_index = -1, pin_func; int pps = -1; int seconds = 0; @@ -177,10 +180,13 @@ int main(int argc, char *argv[]) int64_t t1, t2, tp; int64_t interval, offset; + int64_t perout_phase = -1; + int64_t pulsewidth = -1; + int64_t perout = -1; progname = strrchr(argv[0], '/'); progname = progname ? 1+progname : argv[0]; - while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) { + while (EOF != (c = getopt(argc, argv, "cd:e:f:ghH:i:k:lL:p:P:sSt:T:w:z"))) { switch (c) { case 'c': capabilities = 1; @@ -197,6 +203,9 @@ int main(int argc, char *argv[]) case 'g': gettime = 1; break; + case 'H': + perout_phase = atoll(optarg); + break; case 'i': index = atoi(optarg); break; @@ -215,7 +224,7 @@ int main(int argc, char *argv[]) } break; case 'p': - perout = atoi(optarg); + perout = atoll(optarg); break; case 'P': pps = atoi(optarg); @@ -233,6 +242,9 @@ int main(int argc, char *argv[]) settime = 3; seconds = atoi(optarg); break; + case 'w': + pulsewidth = atoi(optarg); + break; case 'z': flagtest = 1; break; @@ -391,6 +403,16 @@ int main(int argc, char *argv[]) } } + if (pulsewidth >= 0 && perout < 0) { + puts("-w can only be specified together with -p"); + return -1; + } + + if (perout_phase >= 0 && perout < 0) { + puts("-H can only be specified together with -p"); + return -1; + } + if (perout >= 0) { if (clock_gettime(clkid, &ts)) { perror("clock_gettime"); @@ -398,11 +420,24 @@ int main(int argc, char *argv[]) } memset(&perout_request, 0, sizeof(perout_request)); perout_request.index = index; - perout_request.start.sec = ts.tv_sec + 2; - perout_request.start.nsec = 0; - perout_request.period.sec = 0; - perout_request.period.nsec = perout; - if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) { + perout_request.period.sec = perout / NSEC_PER_SEC; + perout_request.period.nsec = perout % NSEC_PER_SEC; + perout_request.flags = 0; + if (pulsewidth >= 0) { + perout_request.flags |= PTP_PEROUT_DUTY_CYCLE; + perout_request.on.sec = pulsewidth / NSEC_PER_SEC; + perout_request.on.nsec = pulsewidth % NSEC_PER_SEC; + } + if (perout_phase >= 0) { + perout_request.flags |= PTP_PEROUT_PHASE; + perout_request.phase.sec = perout_phase / NSEC_PER_SEC; + perout_request.phase.nsec = perout_phase % NSEC_PER_SEC; + } else { + perout_request.start.sec = ts.tv_sec + 2; + perout_request.start.nsec = 0; + } + + if (ioctl(fd, PTP_PEROUT_REQUEST2, &perout_request)) { perror("PTP_PEROUT_REQUEST"); } else { puts("periodic output request okay"); diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/Makefile index be5a5e542804..91fee5c43274 100644 --- a/tools/testing/selftests/tc-testing/bpf/Makefile +++ b/tools/testing/selftests/tc-testing/Makefile @@ -1,11 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 -APIDIR := ../../../../include/uapi +top_srcdir = $(abspath ../../../..) +APIDIR := $(top_scrdir)/include/uapi TEST_GEN_FILES = action.o -top_srcdir = ../../../../.. KSFT_KHDR_INSTALL := 1 -include ../../lib.mk +include ../lib.mk CLANG ?= clang LLC ?= llc @@ -28,3 +28,6 @@ $(OUTPUT)/%.o: %.c $(CLANG) $(CLANG_FLAGS) \ -O2 -target bpf -emit-llvm -c $< -o - | \ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@ + +TEST_PROGS += ./tdc.sh +TEST_FILES := tdc*.py Tdc*.py plugins plugin-lib tc-tests diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/action.c index c32b99b80e19..c32b99b80e19 100644 --- a/tools/testing/selftests/tc-testing/bpf/action.c +++ b/tools/testing/selftests/tc-testing/action.c diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh new file mode 100755 index 000000000000..7fe38c76db44 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tdc.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +./tdc.py -c actions --nobuildebpf +./tdc.py -c qdisc diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py index 080709cc4297..cd4a27ee1466 100644 --- a/tools/testing/selftests/tc-testing/tdc_config.py +++ b/tools/testing/selftests/tc-testing/tdc_config.py @@ -24,7 +24,7 @@ NAMES = { # Name of the namespace to use 'NS': 'tcut', # Directory containing eBPF test programs - 'EBPFDIR': './bpf' + 'EBPFDIR': './' } |