summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:15:09 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:15:09 +0100
commit3051bf36c25d5153051704291782f8d44e744d36 (patch)
tree72dfc8a1d12675c6f2981d13102df954b678f11b /tools
parentMerge tag 'gcc-plugins-v4.11-rc1' of git://git.kernel.org/pub/scm/linux/kerne... (diff)
parentRevert "ath10k: Search SMBIOS for OEM board file extension" (diff)
downloadlinux-3051bf36c25d5153051704291782f8d44e744d36.tar.xz
linux-3051bf36c25d5153051704291782f8d44e744d36.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support TX_RING in AF_PACKET TPACKET_V3 mode, from Sowmini Varadhan. 2) Simplify classifier state on sk_buff in order to shrink it a bit. From Willem de Bruijn. 3) Introduce SIPHASH and it's usage for secure sequence numbers and syncookies. From Jason A. Donenfeld. 4) Reduce CPU usage for ICMP replies we are going to limit or suppress, from Jesper Dangaard Brouer. 5) Introduce Shared Memory Communications socket layer, from Ursula Braun. 6) Add RACK loss detection and allow it to actually trigger fast recovery instead of just assisting after other algorithms have triggered it. From Yuchung Cheng. 7) Add xmit_more and BQL support to mvneta driver, from Simon Guinot. 8) skb_cow_data avoidance in esp4 and esp6, from Steffen Klassert. 9) Export MPLS packet stats via netlink, from Robert Shearman. 10) Significantly improve inet port bind conflict handling, especially when an application is restarted and changes it's setting of reuseport. From Josef Bacik. 11) Implement TX batching in vhost_net, from Jason Wang. 12) Extend the dummy device so that VF (virtual function) features, such as configuration, can be more easily tested. From Phil Sutter. 13) Avoid two atomic ops per page on x86 in bnx2x driver, from Eric Dumazet. 14) Add new bpf MAP, implementing a longest prefix match trie. From Daniel Mack. 15) Packet sample offloading support in mlxsw driver, from Yotam Gigi. 16) Add new aquantia driver, from David VomLehn. 17) Add bpf tracepoints, from Daniel Borkmann. 18) Add support for port mirroring to b53 and bcm_sf2 drivers, from Florian Fainelli. 19) Remove custom busy polling in many drivers, it is done in the core networking since 4.5 times. From Eric Dumazet. 20) Support XDP adjust_head in virtio_net, from John Fastabend. 21) Fix several major holes in neighbour entry confirmation, from Julian Anastasov. 22) Add XDP support to bnxt_en driver, from Michael Chan. 23) VXLAN offloads for enic driver, from Govindarajulu Varadarajan. 24) Add IPVTAP driver (IP-VLAN based tap driver) from Sainath Grandhi. 25) Support GRO in IPSEC protocols, from Steffen Klassert" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1764 commits) Revert "ath10k: Search SMBIOS for OEM board file extension" net: socket: fix recvmmsg not returning error from sock_error bnxt_en: use eth_hw_addr_random() bpf: fix unlocking of jited image when module ronx not set arch: add ARCH_HAS_SET_MEMORY config net: napi_watchdog() can use napi_schedule_irqoff() tcp: Revert "tcp: tcp_probe: use spin_lock_bh()" net/hsr: use eth_hw_addr_random() net: mvpp2: enable building on 64-bit platforms net: mvpp2: switch to build_skb() in the RX path net: mvpp2: simplify MVPP2_PRS_RI_* definitions net: mvpp2: fix indentation of MVPP2_EXT_GLOBAL_CTRL_DEFAULT net: mvpp2: remove unused register definitions net: mvpp2: simplify mvpp2_bm_bufs_add() net: mvpp2: drop useless fields in mvpp2_bm_pool and related code net: mvpp2: remove unused 'tx_skb' field of 'struct mvpp2_tx_queue' net: mvpp2: release reference to txq_cpu[] entry after unmapping net: mvpp2: handle too large value in mvpp2_rx_time_coal_set() net: mvpp2: handle too large value handling in mvpp2_rx_pkts_coal_set() net: mvpp2: remove useless arguments in mvpp2_rx_{pkts, time}_coal_set ...
Diffstat (limited to 'tools')
-rw-r--r--tools/include/uapi/linux/bpf.h23
-rw-r--r--tools/lib/bpf/bpf.c18
-rw-r--r--tools/lib/bpf/bpf.h12
-rw-r--r--tools/lib/traceevent/event-parse.c34
-rw-r--r--tools/lib/traceevent/event-parse.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c1
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_sys.h108
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c358
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c138
-rw-r--r--tools/testing/selftests/bpf/test_maps.c162
-rw-r--r--tools/testing/selftests/bpf/test_tag.c203
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c1633
-rw-r--r--tools/testing/selftests/net/psock_lib.h39
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c97
17 files changed, 2536 insertions, 313 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index d2b0ac799d03..0539a0ceef38 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -63,6 +63,12 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+struct bpf_lpm_trie_key {
+ __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
+ __u8 data[0]; /* Arbitrary size */
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -89,6 +95,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ BPF_MAP_TYPE_LPM_TRIE,
};
enum bpf_prog_type {
@@ -437,6 +444,18 @@ union bpf_attr {
* @xdp_md: pointer to xdp_md
* @delta: An positive/negative integer to be added to xdp_md.data
* Return: 0 on success or negative on error
+ *
+ * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * Copy a NUL terminated string from unsafe address. In case the string
+ * length is smaller than size, the target is not padded with further NUL
+ * bytes. In case the string length is larger than size, just count-1
+ * bytes are copied and the last byte is set to NUL.
+ * @dst: destination address
+ * @size: maximum number of bytes to copy, including the trailing NUL
+ * @unsafe_ptr: unsafe address
+ * Return:
+ * > 0 length of the string including the trailing NUL on success
+ * < 0 error
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -483,7 +502,8 @@ union bpf_attr {
FN(set_hash_invalid), \
FN(get_numa_node_id), \
FN(skb_change_head), \
- FN(xdp_adjust_head),
+ FN(xdp_adjust_head), \
+ FN(probe_read_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -509,6 +529,7 @@ enum bpf_func_id {
/* BPF_FUNC_l4_csum_replace flags. */
#define BPF_F_PSEUDO_HDR (1ULL << 4)
#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
+#define BPF_F_MARK_ENFORCE (1ULL << 6)
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
#define BPF_F_INGRESS (1ULL << 0)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index ae752fa4eaa7..d48b70ceb25a 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -42,13 +42,13 @@
# endif
#endif
-static __u64 ptr_to_u64(void *ptr)
+static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
}
-static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
- unsigned int size)
+static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
{
return syscall(__NR_bpf, cmd, attr, size);
}
@@ -69,8 +69,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size,
return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
}
-int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
- size_t insns_cnt, char *license,
+int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+ size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf, size_t log_buf_sz)
{
int fd;
@@ -98,7 +98,7 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}
-int bpf_map_update_elem(int fd, void *key, void *value,
+int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags)
{
union bpf_attr attr;
@@ -112,7 +112,7 @@ int bpf_map_update_elem(int fd, void *key, void *value,
return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
}
-int bpf_map_lookup_elem(int fd, void *key, void *value)
+int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
@@ -124,7 +124,7 @@ int bpf_map_lookup_elem(int fd, void *key, void *value)
return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
}
-int bpf_map_delete_elem(int fd, void *key)
+int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
@@ -135,7 +135,7 @@ int bpf_map_delete_elem(int fd, void *key)
return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
}
-int bpf_map_get_next_key(int fd, void *key, void *next_key)
+int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 44fb7c5f8ae6..09c3dcac0496 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -29,17 +29,17 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
/* Recommend log buffer size */
#define BPF_LOG_BUF_SIZE 65536
-int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
- size_t insns_cnt, char *license,
+int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+ size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf,
size_t log_buf_sz);
-int bpf_map_update_elem(int fd, void *key, void *value,
+int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
-int bpf_map_lookup_elem(int fd, void *key, void *value);
-int bpf_map_delete_elem(int fd, void *key);
-int bpf_map_get_next_key(int fd, void *key, void *next_key);
+int bpf_map_lookup_elem(int fd, const void *key, void *value);
+int bpf_map_delete_elem(int fd, const void *key);
+int bpf_map_get_next_key(int fd, const void *key, void *next_key);
int bpf_obj_pin(int fd, const char *pathname);
int bpf_obj_get(const char *pathname);
int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 14a4f623c1a5..f2ea78021450 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -831,6 +831,7 @@ static void free_arg(struct print_arg *arg)
free_flag_sym(arg->symbol.symbols);
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
free_arg(arg->hex.field);
free_arg(arg->hex.size);
break;
@@ -2629,10 +2630,11 @@ out_free:
}
static enum event_type
-process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+process_hex_common(struct event_format *event, struct print_arg *arg,
+ char **tok, enum print_arg_type type)
{
memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_HEX;
+ arg->type = type;
if (alloc_and_process_delim(event, ",", &arg->hex.field))
goto out;
@@ -2651,6 +2653,19 @@ out:
}
static enum event_type
+process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+{
+ return process_hex_common(event, arg, tok, PRINT_HEX);
+}
+
+static enum event_type
+process_hex_str(struct event_format *event, struct print_arg *arg,
+ char **tok)
+{
+ return process_hex_common(event, arg, tok, PRINT_HEX_STR);
+}
+
+static enum event_type
process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
{
memset(arg, 0, sizeof(*arg));
@@ -3009,6 +3024,10 @@ process_function(struct event_format *event, struct print_arg *arg,
free_token(token);
return process_hex(event, arg, tok);
}
+ if (strcmp(token, "__print_hex_str") == 0) {
+ free_token(token);
+ return process_hex_str(event, arg, tok);
+ }
if (strcmp(token, "__print_array") == 0) {
free_token(token);
return process_int_array(event, arg, tok);
@@ -3547,6 +3566,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
case PRINT_SYMBOL:
case PRINT_INT_ARRAY:
case PRINT_HEX:
+ case PRINT_HEX_STR:
break;
case PRINT_TYPE:
val = eval_num_arg(data, size, event, arg->typecast.item);
@@ -3962,6 +3982,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
offset = pevent_read_number(pevent,
@@ -3981,7 +4002,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
len = eval_num_arg(data, size, event, arg->hex.size);
for (i = 0; i < len; i++) {
- if (i)
+ if (i && arg->type == PRINT_HEX)
trace_seq_putc(s, ' ');
trace_seq_printf(s, "%02x", hex[i]);
}
@@ -5727,6 +5748,13 @@ static void print_args(struct print_arg *args)
print_args(args->hex.size);
printf(")");
break;
+ case PRINT_HEX_STR:
+ printf("__print_hex_str(");
+ print_args(args->hex.field);
+ printf(", ");
+ print_args(args->hex.size);
+ printf(")");
+ break;
case PRINT_INT_ARRAY:
printf("__print_array(");
print_args(args->int_array.field);
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 7aae746ec2fe..74cecba87daa 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -292,6 +292,7 @@ enum print_arg_type {
PRINT_FUNC,
PRINT_BITMASK,
PRINT_DYNAMIC_ARRAY_LEN,
+ PRINT_HEX_STR,
};
struct print_arg {
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index c1555fd0035a..dff043a29589 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -217,6 +217,7 @@ static void define_event_symbols(struct event_format *event,
cur_field_name);
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 089438da1f7f..581e0efd6356 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -236,6 +236,7 @@ static void define_event_symbols(struct event_format *event,
cur_field_name);
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 071431bedde8..541d9d7fad5a 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,3 +1,5 @@
test_verifier
test_maps
test_lru_map
+test_lpm_map
+test_tag
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 7a5f24543a5f..c7816fe60feb 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,12 +1,25 @@
-CFLAGS += -Wall -O2 -I../../../../usr/include
+LIBDIR := ../../../lib
+BPFOBJ := $(LIBDIR)/bpf/bpf.o
-test_objs = test_verifier test_maps test_lru_map
+CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR)
-TEST_PROGS := test_verifier test_maps test_lru_map test_kmod.sh
+test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map
+
+TEST_PROGS := $(test_objs) test_kmod.sh
TEST_FILES := $(test_objs)
+.PHONY: all clean force
+
all: $(test_objs)
+# force a rebuild of BPFOBJ when its dependencies are updated
+force:
+
+$(BPFOBJ): force
+ $(MAKE) -C $(dir $(BPFOBJ))
+
+$(test_objs): $(BPFOBJ)
+
include ../lib.mk
clean:
diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h
deleted file mode 100644
index 6b4565f2a3f2..000000000000
--- a/tools/testing/selftests/bpf/bpf_sys.h
+++ /dev/null
@@ -1,108 +0,0 @@
-#ifndef __BPF_SYS__
-#define __BPF_SYS__
-
-#include <stdint.h>
-#include <stdlib.h>
-
-#include <sys/syscall.h>
-
-#include <linux/bpf.h>
-
-static inline __u64 bpf_ptr_to_u64(const void *ptr)
-{
- return (__u64)(unsigned long) ptr;
-}
-
-static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size)
-{
-#ifdef __NR_bpf
- return syscall(__NR_bpf, cmd, attr, size);
-#else
- fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
- errno = ENOSYS;
- return -1;
-#endif
-}
-
-static inline int bpf_map_lookup(int fd, const void *key, void *value)
-{
- union bpf_attr attr = {};
-
- attr.map_fd = fd;
- attr.key = bpf_ptr_to_u64(key);
- attr.value = bpf_ptr_to_u64(value);
-
- return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_update(int fd, const void *key, const void *value,
- uint64_t flags)
-{
- union bpf_attr attr = {};
-
- attr.map_fd = fd;
- attr.key = bpf_ptr_to_u64(key);
- attr.value = bpf_ptr_to_u64(value);
- attr.flags = flags;
-
- return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_delete(int fd, const void *key)
-{
- union bpf_attr attr = {};
-
- attr.map_fd = fd;
- attr.key = bpf_ptr_to_u64(key);
-
- return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_next_key(int fd, const void *key, void *next_key)
-{
- union bpf_attr attr = {};
-
- attr.map_fd = fd;
- attr.key = bpf_ptr_to_u64(key);
- attr.next_key = bpf_ptr_to_u64(next_key);
-
- return bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
- uint32_t size_value, uint32_t max_elem,
- uint32_t flags)
-{
- union bpf_attr attr = {};
-
- attr.map_type = type;
- attr.key_size = size_key;
- attr.value_size = size_value;
- attr.max_entries = max_elem;
- attr.map_flags = flags;
-
- return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
-}
-
-static inline int bpf_prog_load(enum bpf_prog_type type,
- const struct bpf_insn *insns, size_t size_insns,
- const char *license, char *log, size_t size_log)
-{
- union bpf_attr attr = {};
-
- attr.prog_type = type;
- attr.insns = bpf_ptr_to_u64(insns);
- attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
- attr.license = bpf_ptr_to_u64(license);
-
- if (size_log > 0) {
- attr.log_buf = bpf_ptr_to_u64(log);
- attr.log_size = size_log;
- attr.log_level = 1;
- log[0] = 0;
- }
-
- return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
-}
-
-#endif /* __BPF_SYS__ */
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
new file mode 100644
index 000000000000..e97565243d59
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -0,0 +1,358 @@
+/*
+ * Randomized tests for eBPF longest-prefix-match maps
+ *
+ * This program runs randomized tests against the lpm-bpf-map. It implements a
+ * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked
+ * lists. The implementation should be pretty straightforward.
+ *
+ * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies
+ * the trie-based bpf-map implementation behaves the same way as tlpm.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <bpf/bpf.h>
+#include "bpf_util.h"
+
+struct tlpm_node {
+ struct tlpm_node *next;
+ size_t n_bits;
+ uint8_t key[];
+};
+
+static struct tlpm_node *tlpm_add(struct tlpm_node *list,
+ const uint8_t *key,
+ size_t n_bits)
+{
+ struct tlpm_node *node;
+ size_t n;
+
+ /* add new entry with @key/@n_bits to @list and return new head */
+
+ n = (n_bits + 7) / 8;
+ node = malloc(sizeof(*node) + n);
+ assert(node);
+
+ node->next = list;
+ node->n_bits = n_bits;
+ memcpy(node->key, key, n);
+
+ return node;
+}
+
+static void tlpm_clear(struct tlpm_node *list)
+{
+ struct tlpm_node *node;
+
+ /* free all entries in @list */
+
+ while ((node = list)) {
+ list = list->next;
+ free(node);
+ }
+}
+
+static struct tlpm_node *tlpm_match(struct tlpm_node *list,
+ const uint8_t *key,
+ size_t n_bits)
+{
+ struct tlpm_node *best = NULL;
+ size_t i;
+
+ /* Perform longest prefix-match on @key/@n_bits. That is, iterate all
+ * entries and match each prefix against @key. Remember the "best"
+ * entry we find (i.e., the longest prefix that matches) and return it
+ * to the caller when done.
+ */
+
+ for ( ; list; list = list->next) {
+ for (i = 0; i < n_bits && i < list->n_bits; ++i) {
+ if ((key[i / 8] & (1 << (7 - i % 8))) !=
+ (list->key[i / 8] & (1 << (7 - i % 8))))
+ break;
+ }
+
+ if (i >= list->n_bits) {
+ if (!best || i > best->n_bits)
+ best = list;
+ }
+ }
+
+ return best;
+}
+
+static void test_lpm_basic(void)
+{
+ struct tlpm_node *list = NULL, *t1, *t2;
+
+ /* very basic, static tests to verify tlpm works as expected */
+
+ assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+
+ t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8);
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16));
+ assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8));
+ assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8));
+ assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7));
+
+ t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16);
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+ assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
+ assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
+
+ tlpm_clear(list);
+}
+
+static void test_lpm_order(void)
+{
+ struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL;
+ size_t i, j;
+
+ /* Verify the tlpm implementation works correctly regardless of the
+ * order of entries. Insert a random set of entries into @l1, and copy
+ * the same data in reverse order into @l2. Then verify a lookup of
+ * random keys will yield the same result in both sets.
+ */
+
+ for (i = 0; i < (1 << 12); ++i)
+ l1 = tlpm_add(l1, (uint8_t[]){
+ rand() % 0xff,
+ rand() % 0xff,
+ }, rand() % 16 + 1);
+
+ for (t1 = l1; t1; t1 = t1->next)
+ l2 = tlpm_add(l2, t1->key, t1->n_bits);
+
+ for (i = 0; i < (1 << 8); ++i) {
+ uint8_t key[] = { rand() % 0xff, rand() % 0xff };
+
+ t1 = tlpm_match(l1, key, 16);
+ t2 = tlpm_match(l2, key, 16);
+
+ assert(!t1 == !t2);
+ if (t1) {
+ assert(t1->n_bits == t2->n_bits);
+ for (j = 0; j < t1->n_bits; ++j)
+ assert((t1->key[j / 8] & (1 << (7 - j % 8))) ==
+ (t2->key[j / 8] & (1 << (7 - j % 8))));
+ }
+ }
+
+ tlpm_clear(l1);
+ tlpm_clear(l2);
+}
+
+static void test_lpm_map(int keysize)
+{
+ size_t i, j, n_matches, n_nodes, n_lookups;
+ struct tlpm_node *t, *list = NULL;
+ struct bpf_lpm_trie_key *key;
+ uint8_t *data, *value;
+ int r, map;
+
+ /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of
+ * prefixes and insert it into both tlpm and bpf-lpm. Then run some
+ * randomized lookups and verify both maps return the same result.
+ */
+
+ n_matches = 0;
+ n_nodes = 1 << 8;
+ n_lookups = 1 << 16;
+
+ data = alloca(keysize);
+ memset(data, 0, keysize);
+
+ value = alloca(keysize + 1);
+ memset(value, 0, keysize + 1);
+
+ key = alloca(sizeof(*key) + keysize);
+ memset(key, 0, sizeof(*key) + keysize);
+
+ map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ sizeof(*key) + keysize,
+ keysize + 1,
+ 4096,
+ BPF_F_NO_PREALLOC);
+ assert(map >= 0);
+
+ for (i = 0; i < n_nodes; ++i) {
+ for (j = 0; j < keysize; ++j)
+ value[j] = rand() & 0xff;
+ value[keysize] = rand() % (8 * keysize + 1);
+
+ list = tlpm_add(list, value, value[keysize]);
+
+ key->prefixlen = value[keysize];
+ memcpy(key->data, value, keysize);
+ r = bpf_map_update_elem(map, key, value, 0);
+ assert(!r);
+ }
+
+ for (i = 0; i < n_lookups; ++i) {
+ for (j = 0; j < keysize; ++j)
+ data[j] = rand() & 0xff;
+
+ t = tlpm_match(list, data, 8 * keysize);
+
+ key->prefixlen = 8 * keysize;
+ memcpy(key->data, data, keysize);
+ r = bpf_map_lookup_elem(map, key, value);
+ assert(!r || errno == ENOENT);
+ assert(!t == !!r);
+
+ if (t) {
+ ++n_matches;
+ assert(t->n_bits == value[keysize]);
+ for (j = 0; j < t->n_bits; ++j)
+ assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
+ (value[j / 8] & (1 << (7 - j % 8))));
+ }
+ }
+
+ close(map);
+ tlpm_clear(list);
+
+ /* With 255 random nodes in the map, we are pretty likely to match
+ * something on every lookup. For statistics, use this:
+ *
+ * printf(" nodes: %zu\n"
+ * "lookups: %zu\n"
+ * "matches: %zu\n", n_nodes, n_lookups, n_matches);
+ */
+}
+
+/* Test the implementation with some 'real world' examples */
+
+static void test_lpm_ipaddr(void)
+{
+ struct bpf_lpm_trie_key *key_ipv4;
+ struct bpf_lpm_trie_key *key_ipv6;
+ size_t key_size_ipv4;
+ size_t key_size_ipv6;
+ int map_fd_ipv4;
+ int map_fd_ipv6;
+ __u64 value;
+
+ key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32);
+ key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4;
+ key_ipv4 = alloca(key_size_ipv4);
+ key_ipv6 = alloca(key_size_ipv6);
+
+ map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ key_size_ipv4, sizeof(value),
+ 100, BPF_F_NO_PREALLOC);
+ assert(map_fd_ipv4 >= 0);
+
+ map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+ key_size_ipv6, sizeof(value),
+ 100, BPF_F_NO_PREALLOC);
+ assert(map_fd_ipv6 >= 0);
+
+ /* Fill data some IPv4 and IPv6 address ranges */
+ value = 1;
+ key_ipv4->prefixlen = 16;
+ inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+ assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 2;
+ key_ipv4->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+ assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 3;
+ key_ipv4->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.128.0", key_ipv4->data);
+ assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 5;
+ key_ipv4->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.1.0", key_ipv4->data);
+ assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 4;
+ key_ipv4->prefixlen = 23;
+ inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+ assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 0xdeadbeef;
+ key_ipv6->prefixlen = 64;
+ inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data);
+ assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0);
+
+ /* Set tprefixlen to maximum for lookups */
+ key_ipv4->prefixlen = 32;
+ key_ipv6->prefixlen = 128;
+
+ /* Test some lookups that should come back with a value */
+ inet_pton(AF_INET, "192.168.128.23", key_ipv4->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
+ assert(value == 3);
+
+ inet_pton(AF_INET, "192.168.0.1", key_ipv4->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
+ assert(value == 2);
+
+ inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
+ assert(value == 0xdeadbeef);
+
+ inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
+ assert(value == 0xdeadbeef);
+
+ /* Test some lookups that should not match any entry */
+ inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
+ errno == ENOENT);
+
+ inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
+ errno == ENOENT);
+
+ inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
+ errno == ENOENT);
+
+ close(map_fd_ipv4);
+ close(map_fd_ipv6);
+}
+
+int main(void)
+{
+ struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
+ int i, ret;
+
+ /* we want predictable, pseudo random tests */
+ srand(0xf00ba1);
+
+ /* allow unlimited locked memory */
+ ret = setrlimit(RLIMIT_MEMLOCK, &limit);
+ if (ret < 0)
+ perror("Unable to lift memlock rlimit");
+
+ test_lpm_basic();
+ test_lpm_order();
+
+ /* Test with 8, 16, 24, 32, ... 128 bit prefix length */
+ for (i = 1; i <= 16; ++i)
+ test_lpm_map(i);
+
+ test_lpm_ipaddr();
+
+ printf("test_lpm: OK\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 9f7bd1915c21..00b0aff56e2e 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -18,7 +18,7 @@
#include <sys/wait.h>
#include <sys/resource.h>
-#include "bpf_sys.h"
+#include <bpf/bpf.h>
#include "bpf_util.h"
#define LOCAL_FREE_TARGET (128)
@@ -30,11 +30,11 @@ static int create_map(int map_type, int map_flags, unsigned int size)
{
int map_fd;
- map_fd = bpf_map_create(map_type, sizeof(unsigned long long),
+ map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
sizeof(unsigned long long), size, map_flags);
if (map_fd == -1)
- perror("bpf_map_create");
+ perror("bpf_create_map");
return map_fd;
}
@@ -45,9 +45,9 @@ static int map_subset(int map0, int map1)
unsigned long long value0[nr_cpus], value1[nr_cpus];
int ret;
- while (!bpf_map_next_key(map1, &next_key, &next_key)) {
- assert(!bpf_map_lookup(map1, &next_key, value1));
- ret = bpf_map_lookup(map0, &next_key, value0);
+ while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
+ assert(!bpf_map_lookup_elem(map1, &next_key, value1));
+ ret = bpf_map_lookup_elem(map0, &next_key, value0);
if (ret) {
printf("key:%llu not found from map. %s(%d)\n",
next_key, strerror(errno), errno);
@@ -119,52 +119,54 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* insert key=1 element */
key = 1;
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
/* key=1 already exists */
- errno == EEXIST);
+ && errno == EEXIST);
- assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 &&
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
errno == EINVAL);
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
errno == ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
/* key=2 is not there */
errno == ENOENT);
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* insert key=3 element */
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
errno == ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
*/
key = 1;
- assert(!bpf_map_lookup(lru_map_fd, &key, value));
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
assert(value[0] == 1234);
key = 3;
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup(lru_map_fd, &key, value) == -1);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -217,14 +219,15 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
/* Insert 1 to tgt_free (+tgt_free keys) */
end_key = 1 + tgt_free;
for (key = 1; key < end_key; key++)
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
/* Lookup 1 to tgt_free/2 */
end_key = 1 + batch_size;
for (key = 1; key < end_key; key++) {
- assert(!bpf_map_lookup(lru_map_fd, &key, value));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
/* Insert 1+tgt_free to 2*tgt_free
@@ -234,9 +237,10 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
key = 1 + tgt_free;
end_key = key + tgt_free;
for (; key < end_key; key++) {
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -301,9 +305,10 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
/* Insert 1 to tgt_free (+tgt_free keys) */
end_key = 1 + tgt_free;
for (key = 1; key < end_key; key++)
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
- /* Any bpf_map_update will require to acquire a new node
+ /* Any bpf_map_update_elem will require to acquire a new node
* from LRU first.
*
* The local list is running out of free nodes.
@@ -316,10 +321,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
*/
key = 1;
if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_delete(lru_map_fd, &key));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+ assert(!bpf_map_delete_elem(lru_map_fd, &key));
} else {
- assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST));
+ assert(bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_EXIST));
}
/* Re-insert 1 to tgt_free/2 again and do a lookup
@@ -328,12 +335,13 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
end_key = 1 + batch_size;
value[0] = 4321;
for (key = 1; key < end_key; key++) {
- assert(bpf_map_lookup(lru_map_fd, &key, value));
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_lookup(lru_map_fd, &key, value));
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
assert(value[0] == 4321);
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
value[0] = 1234;
@@ -344,14 +352,16 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
/* These newly added but not referenced keys will be
* gone during the next LRU shrink.
*/
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
/* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
end_key = key + tgt_free;
for (; key < end_key; key++) {
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -401,14 +411,15 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
/* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
end_key = 1 + (2 * tgt_free);
for (key = 1; key < end_key; key++)
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
/* Lookup key 1 to tgt_free*3/2 */
end_key = tgt_free + batch_size;
for (key = 1; key < end_key; key++) {
- assert(!bpf_map_lookup(lru_map_fd, &key, value));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
/* Add 1+2*tgt_free to tgt_free*5/2
@@ -417,9 +428,10 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
key = 2 * tgt_free + 1;
end_key = key + batch_size;
for (; key < end_key; key++) {
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -457,27 +469,29 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
for (key = 1; key <= 2 * tgt_free; key++)
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
key = 1;
- assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
for (key = 1; key <= tgt_free; key++) {
- assert(!bpf_map_lookup(lru_map_fd, &key, value));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
for (; key <= 2 * tgt_free; key++) {
- assert(!bpf_map_delete(lru_map_fd, &key));
- assert(bpf_map_delete(lru_map_fd, &key));
+ assert(!bpf_map_delete_elem(lru_map_fd, &key));
+ assert(bpf_map_delete_elem(lru_map_fd, &key));
}
end_key = key + 2 * tgt_free;
for (; key < end_key; key++) {
- assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_update(expected_map_fd, &key, value,
- BPF_NOEXIST));
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+ BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
}
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -493,16 +507,16 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
unsigned long long key, value[nr_cpus];
/* Ensure the last key inserted by previous CPU can be found */
- assert(!bpf_map_lookup(map_fd, &last_key, value));
+ assert(!bpf_map_lookup_elem(map_fd, &last_key, value));
value[0] = 1234;
key = last_key + 1;
- assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_lookup(map_fd, &key, value));
+ assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_lookup_elem(map_fd, &key, value));
/* Cannot find the last key because it was removed by LRU */
- assert(bpf_map_lookup(map_fd, &last_key, value));
+ assert(bpf_map_lookup_elem(map_fd, &last_key, value));
}
/* Test map with only one element */
@@ -523,7 +537,7 @@ static void test_lru_sanity5(int map_type, int map_flags)
value[0] = 1234;
key = 0;
- assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
while (sched_next_online(0, &next_cpu) != -1) {
pid_t pid;
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index eedfef8d2946..cada17ac00b8 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -21,7 +21,7 @@
#include <linux/bpf.h>
-#include "bpf_sys.h"
+#include <bpf/bpf.h>
#include "bpf_util.h"
static int map_flags;
@@ -31,7 +31,7 @@ static void test_hashmap(int task, void *data)
long long key, next_key, value;
int fd;
- fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
2, map_flags);
if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno));
@@ -41,69 +41,70 @@ static void test_hashmap(int task, void *data)
key = 1;
value = 1234;
/* Insert key=1 element. */
- assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
value = 0;
/* BPF_NOEXIST means add new element if it doesn't exist. */
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
/* key=1 already exists. */
errno == EEXIST);
/* -1 is an invalid flag. */
- assert(bpf_map_update(fd, &key, &value, -1) == -1 && errno == EINVAL);
+ assert(bpf_map_update_elem(fd, &key, &value, -1) == -1 &&
+ errno == EINVAL);
/* Check that key=1 can be found. */
- assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
key = 2;
/* Check that key=2 is not found. */
- assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
/* BPF_EXIST means update existing element. */
- assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
/* key=2 is not there. */
errno == ENOENT);
/* Insert key=2 element. */
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
/* key=1 and key=2 were inserted, check that key=0 cannot be
* inserted due to max_entries limit.
*/
key = 0;
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
errno == E2BIG);
/* Update existing element, though the map is full. */
key = 1;
- assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
key = 2;
- assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
key = 1;
- assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
/* Check that key = 0 doesn't exist. */
key = 0;
- assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
/* Iterate over two elements. */
- assert(bpf_map_next_key(fd, &key, &next_key) == 0 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
(next_key == 1 || next_key == 2));
- assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
(next_key == 1 || next_key == 2));
- assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
errno == ENOENT);
/* Delete both elements. */
key = 1;
- assert(bpf_map_delete(fd, &key) == 0);
+ assert(bpf_map_delete_elem(fd, &key) == 0);
key = 2;
- assert(bpf_map_delete(fd, &key) == 0);
- assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) == 0);
+ assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
key = 0;
/* Check that map is empty. */
- assert(bpf_map_next_key(fd, &key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
errno == ENOENT);
close(fd);
@@ -117,7 +118,7 @@ static void test_hashmap_percpu(int task, void *data)
int expected_key_mask = 0;
int fd, i;
- fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
+ fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
sizeof(value[0]), 2, map_flags);
if (fd < 0) {
printf("Failed to create hashmap '%s'!\n", strerror(errno));
@@ -130,53 +131,54 @@ static void test_hashmap_percpu(int task, void *data)
key = 1;
/* Insert key=1 element. */
assert(!(expected_key_mask & key));
- assert(bpf_map_update(fd, &key, value, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0);
expected_key_mask |= key;
/* BPF_NOEXIST means add new element if it doesn't exist. */
- assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
/* key=1 already exists. */
errno == EEXIST);
/* -1 is an invalid flag. */
- assert(bpf_map_update(fd, &key, value, -1) == -1 && errno == EINVAL);
+ assert(bpf_map_update_elem(fd, &key, value, -1) == -1 &&
+ errno == EINVAL);
/* Check that key=1 can be found. Value could be 0 if the lookup
* was run from a different CPU.
*/
value[0] = 1;
- assert(bpf_map_lookup(fd, &key, value) == 0 && value[0] == 100);
+ assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100);
key = 2;
/* Check that key=2 is not found. */
- assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, value) == -1 && errno == ENOENT);
/* BPF_EXIST means update existing element. */
- assert(bpf_map_update(fd, &key, value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 &&
/* key=2 is not there. */
errno == ENOENT);
/* Insert key=2 element. */
assert(!(expected_key_mask & key));
- assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
expected_key_mask |= key;
/* key=1 and key=2 were inserted, check that key=0 cannot be
* inserted due to max_entries limit.
*/
key = 0;
- assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
errno == E2BIG);
/* Check that key = 0 doesn't exist. */
- assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
/* Iterate over two elements. */
- while (!bpf_map_next_key(fd, &key, &next_key)) {
+ while (!bpf_map_get_next_key(fd, &key, &next_key)) {
assert((expected_key_mask & next_key) == next_key);
expected_key_mask &= ~next_key;
- assert(bpf_map_lookup(fd, &next_key, value) == 0);
+ assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
for (i = 0; i < nr_cpus; i++)
assert(value[i] == i + 100);
@@ -187,18 +189,18 @@ static void test_hashmap_percpu(int task, void *data)
/* Update with BPF_EXIST. */
key = 1;
- assert(bpf_map_update(fd, &key, value, BPF_EXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
/* Delete both elements. */
key = 1;
- assert(bpf_map_delete(fd, &key) == 0);
+ assert(bpf_map_delete_elem(fd, &key) == 0);
key = 2;
- assert(bpf_map_delete(fd, &key) == 0);
- assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_delete_elem(fd, &key) == 0);
+ assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
key = 0;
/* Check that map is empty. */
- assert(bpf_map_next_key(fd, &key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
errno == ENOENT);
close(fd);
@@ -209,7 +211,7 @@ static void test_arraymap(int task, void *data)
int key, next_key, fd;
long long value;
- fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
+ fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
2, 0);
if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno));
@@ -219,40 +221,40 @@ static void test_arraymap(int task, void *data)
key = 1;
value = 1234;
/* Insert key=1 element. */
- assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
value = 0;
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
errno == EEXIST);
/* Check that key=1 can be found. */
- assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
key = 0;
/* Check that key=0 is also found and zero initialized. */
- assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
/* key=0 and key=1 were inserted, check that key=2 cannot be inserted
* due to max_entries limit.
*/
key = 2;
- assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
errno == E2BIG);
/* Check that key = 2 doesn't exist. */
- assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
/* Iterate over two elements. */
- assert(bpf_map_next_key(fd, &key, &next_key) == 0 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
next_key == 0);
- assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
next_key == 1);
- assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
errno == ENOENT);
/* Delete shouldn't succeed. */
key = 1;
- assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL);
+ assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
close(fd);
}
@@ -263,7 +265,7 @@ static void test_arraymap_percpu(int task, void *data)
int key, next_key, fd, i;
long values[nr_cpus];
- fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+ fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
sizeof(values[0]), 2, 0);
if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno));
@@ -275,39 +277,39 @@ static void test_arraymap_percpu(int task, void *data)
key = 1;
/* Insert key=1 element. */
- assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
values[0] = 0;
- assert(bpf_map_update(fd, &key, values, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 &&
errno == EEXIST);
/* Check that key=1 can be found. */
- assert(bpf_map_lookup(fd, &key, values) == 0 && values[0] == 100);
+ assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100);
key = 0;
/* Check that key=0 is also found and zero initialized. */
- assert(bpf_map_lookup(fd, &key, values) == 0 &&
+ assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
values[0] == 0 && values[nr_cpus - 1] == 0);
/* Check that key=2 cannot be inserted due to max_entries limit. */
key = 2;
- assert(bpf_map_update(fd, &key, values, BPF_EXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) == -1 &&
errno == E2BIG);
/* Check that key = 2 doesn't exist. */
- assert(bpf_map_lookup(fd, &key, values) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT);
/* Iterate over two elements. */
- assert(bpf_map_next_key(fd, &key, &next_key) == 0 &&
+ assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
next_key == 0);
- assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
next_key == 1);
- assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 &&
+ assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
errno == ENOENT);
/* Delete shouldn't succeed. */
key = 1;
- assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL);
+ assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
close(fd);
}
@@ -319,7 +321,7 @@ static void test_arraymap_percpu_many_keys(void)
long values[nr_cpus];
int key, fd, i;
- fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+ fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
sizeof(values[0]), nr_keys, 0);
if (fd < 0) {
printf("Failed to create per-cpu arraymap '%s'!\n",
@@ -331,13 +333,13 @@ static void test_arraymap_percpu_many_keys(void)
values[i] = i + 10;
for (key = 0; key < nr_keys; key++)
- assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0);
+ assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
for (key = 0; key < nr_keys; key++) {
for (i = 0; i < nr_cpus; i++)
values[i] = 0;
- assert(bpf_map_lookup(fd, &key, values) == 0);
+ assert(bpf_map_lookup_elem(fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
assert(values[i] == i + 10);
@@ -357,7 +359,7 @@ static void test_map_large(void)
} key;
int fd, i, value;
- fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
MAP_SIZE, map_flags);
if (fd < 0) {
printf("Failed to create large map '%s'!\n", strerror(errno));
@@ -368,22 +370,22 @@ static void test_map_large(void)
key = (struct bigkey) { .c = i };
value = i;
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
}
key.c = -1;
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
errno == E2BIG);
/* Iterate through all elements. */
for (i = 0; i < MAP_SIZE; i++)
- assert(bpf_map_next_key(fd, &key, &key) == 0);
- assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &key) == 0);
+ assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
key.c = 0;
- assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
key.a = 1;
- assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT);
+ assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
close(fd);
}
@@ -437,10 +439,12 @@ static void do_work(int fn, void *data)
key = value = i;
if (do_update) {
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0);
- assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value,
+ BPF_NOEXIST) == 0);
+ assert(bpf_map_update_elem(fd, &key, &value,
+ BPF_EXIST) == 0);
} else {
- assert(bpf_map_delete(fd, &key) == 0);
+ assert(bpf_map_delete_elem(fd, &key) == 0);
}
}
}
@@ -450,7 +454,7 @@ static void test_map_parallel(void)
int i, fd, key = 0, value = 0;
int data[2];
- fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
MAP_SIZE, map_flags);
if (fd < 0) {
printf("Failed to create map for parallel test '%s'!\n",
@@ -468,20 +472,20 @@ static void test_map_parallel(void)
run_parallel(TASKS, do_work, data);
/* Check that key=0 is already there. */
- assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
errno == EEXIST);
/* Check that all elements were inserted. */
key = -1;
for (i = 0; i < MAP_SIZE; i++)
- assert(bpf_map_next_key(fd, &key, &key) == 0);
- assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &key) == 0);
+ assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
/* Another check for all elements */
for (i = 0; i < MAP_SIZE; i++) {
key = MAP_SIZE - i - 1;
- assert(bpf_map_lookup(fd, &key, &value) == 0 &&
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0 &&
value == key);
}
@@ -491,7 +495,7 @@ static void test_map_parallel(void)
/* Nothing should be left. */
key = -1;
- assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+ assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
}
static void run_all_tests(void)
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
new file mode 100644
index 000000000000..de409fc50c35
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -0,0 +1,203 @@
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <sched.h>
+#include <limits.h>
+#include <assert.h>
+
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/if_alg.h>
+
+#include <bpf/bpf.h>
+
+#include "../../../include/linux/filter.h"
+
+static struct bpf_insn prog[BPF_MAXINSNS];
+
+static void bpf_gen_imm_prog(unsigned int insns, int fd_map)
+{
+ int i;
+
+ srand(time(NULL));
+ for (i = 0; i < insns; i++)
+ prog[i] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, rand());
+ prog[i - 1] = BPF_EXIT_INSN();
+}
+
+static void bpf_gen_map_prog(unsigned int insns, int fd_map)
+{
+ int i, j = 0;
+
+ for (i = 0; i + 1 < insns; i += 2) {
+ struct bpf_insn tmp[] = {
+ BPF_LD_MAP_FD(j++ % BPF_REG_10, fd_map)
+ };
+
+ memcpy(&prog[i], tmp, sizeof(tmp));
+ }
+ if (insns % 2 == 0)
+ prog[insns - 2] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, 42);
+ prog[insns - 1] = BPF_EXIT_INSN();
+}
+
+static int bpf_try_load_prog(int insns, int fd_map,
+ void (*bpf_filler)(unsigned int insns,
+ int fd_map))
+{
+ int fd_prog;
+
+ bpf_filler(insns, fd_map);
+ fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0,
+ NULL, 0);
+ assert(fd_prog > 0);
+ if (fd_map > 0)
+ bpf_filler(insns, 0);
+ return fd_prog;
+}
+
+static int __hex2bin(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ ch = tolower(ch);
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ return -1;
+}
+
+static int hex2bin(uint8_t *dst, const char *src, size_t count)
+{
+ while (count--) {
+ int hi = __hex2bin(*src++);
+ int lo = __hex2bin(*src++);
+
+ if ((hi < 0) || (lo < 0))
+ return -1;
+ *dst++ = (hi << 4) | lo;
+ }
+ return 0;
+}
+
+static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len)
+{
+ const int prefix_len = sizeof("prog_tag:\t") - 1;
+ char buff[256];
+ int ret = -1;
+ FILE *fp;
+
+ snprintf(buff, sizeof(buff), "/proc/%d/fdinfo/%d", getpid(),
+ fd_prog);
+ fp = fopen(buff, "r");
+ assert(fp);
+
+ while (fgets(buff, sizeof(buff), fp)) {
+ if (strncmp(buff, "prog_tag:\t", prefix_len))
+ continue;
+ ret = hex2bin(tag, buff + prefix_len, len);
+ break;
+ }
+
+ fclose(fp);
+ assert(!ret);
+}
+
+static void tag_from_alg(int insns, uint8_t *tag, uint32_t len)
+{
+ static const struct sockaddr_alg alg = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash",
+ .salg_name = "sha1",
+ };
+ int fd_base, fd_alg, ret;
+ ssize_t size;
+
+ fd_base = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ assert(fd_base > 0);
+
+ ret = bind(fd_base, (struct sockaddr *)&alg, sizeof(alg));
+ assert(!ret);
+
+ fd_alg = accept(fd_base, NULL, 0);
+ assert(fd_alg > 0);
+
+ insns *= sizeof(struct bpf_insn);
+ size = write(fd_alg, prog, insns);
+ assert(size == insns);
+
+ size = read(fd_alg, tag, len);
+ assert(size == len);
+
+ close(fd_alg);
+ close(fd_base);
+}
+
+static void tag_dump(const char *prefix, uint8_t *tag, uint32_t len)
+{
+ int i;
+
+ printf("%s", prefix);
+ for (i = 0; i < len; i++)
+ printf("%02x", tag[i]);
+ printf("\n");
+}
+
+static void tag_exit_report(int insns, int fd_map, uint8_t *ftag,
+ uint8_t *atag, uint32_t len)
+{
+ printf("Program tag mismatch for %d insns%s!\n", insns,
+ fd_map < 0 ? "" : " with map");
+
+ tag_dump(" fdinfo result: ", ftag, len);
+ tag_dump(" af_alg result: ", atag, len);
+ exit(1);
+}
+
+static void do_test(uint32_t *tests, int start_insns, int fd_map,
+ void (*bpf_filler)(unsigned int insns, int fd))
+{
+ int i, fd_prog;
+
+ for (i = start_insns; i <= BPF_MAXINSNS; i++) {
+ uint8_t ftag[8], atag[sizeof(ftag)];
+
+ fd_prog = bpf_try_load_prog(i, fd_map, bpf_filler);
+ tag_from_fdinfo(fd_prog, ftag, sizeof(ftag));
+ tag_from_alg(i, atag, sizeof(atag));
+ if (memcmp(ftag, atag, sizeof(ftag)))
+ tag_exit_report(i, fd_map, ftag, atag, sizeof(ftag));
+
+ close(fd_prog);
+ sched_yield();
+ (*tests)++;
+ }
+}
+
+int main(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+ uint32_t tests = 0;
+ int i, fd_map;
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+ fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int),
+ sizeof(int), 1, BPF_F_NO_PREALLOC);
+ assert(fd_map > 0);
+
+ for (i = 0; i < 5; i++) {
+ do_test(&tests, 2, -1, bpf_gen_imm_prog);
+ do_test(&tests, 3, fd_map, bpf_gen_map_prog);
+ }
+
+ printf("test_tag: OK (%u tests)\n", tests);
+ close(fd_map);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 853d7e43434a..e1f5b9eea1e8 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,7 +8,9 @@
* License as published by the Free Software Foundation.
*/
+#include <stdint.h>
#include <stdio.h>
+#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
@@ -16,6 +18,7 @@
#include <stdbool.h>
#include <sched.h>
+#include <sys/capability.h>
#include <sys/resource.h>
#include <linux/unistd.h>
@@ -23,9 +26,9 @@
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
-#include "../../../include/linux/filter.h"
+#include <bpf/bpf.h>
-#include "bpf_sys.h"
+#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -859,15 +862,451 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
- "check non-u32 access to cb",
+ "check cb access: byte",
.insns = {
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: byte, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) - 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, oob 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, oob 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) - 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ },
+ {
+ "check cb access: half",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: half, unaligned",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) - 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) - 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ },
+ {
+ "check cb access: word",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: word, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: word, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: word, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: word, unaligned 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: double, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) - 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) - 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
.result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check out of range skb->cb access",
@@ -1890,6 +2329,107 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_IMM(BPF_REG_5, 12),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2905,6 +3445,1012 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "invalid bpf_context access",
},
+ {
+ "helper access to map: full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to map: partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to map: empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to map: out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=56",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to map: negative range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const imm): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const imm): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const imm): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const imm): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const imm): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const imm): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const reg): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3,
+ offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const reg): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3,
+ offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const reg): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const reg): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3,
+ offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const reg): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3,
+ offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via const reg): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3,
+ offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via variable): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+ offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via variable): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+ offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via variable): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+ offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+ offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=45",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "map element value (adjusted) is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
+ offsetof(struct test_val, foo)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, bitwise AND, zero included",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP (signed), correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP, bounds + offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP, no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 unbounded memory access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP, no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: stack, JMP (signed), no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: map, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+ sizeof(struct test_val), 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: map, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+ sizeof(struct test_val) + 1, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=49",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: map adjusted, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+ sizeof(struct test_val) - 20, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: map adjusted, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+ sizeof(struct test_val) - 19, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size > 0 not allowed on NULL",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=imm expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to variable memory: size = 0 not allowed on != NULL",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-8 access_size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to variable memory: 8 bytes leak",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+32 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: 8 bytes no leak (init memory)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "invalid and of negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+ {
+ "invalid range check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ }
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -2921,7 +4467,7 @@ static int create_map(uint32_t size_value, uint32_t max_elem)
{
int fd;
- fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
size_value, max_elem, BPF_F_NO_PREALLOC);
if (fd < 0)
printf("Failed to create hash map '%s'!\n", strerror(errno));
@@ -2933,7 +4479,7 @@ static int create_prog_array(void)
{
int fd;
- fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
+ fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), 4, 0);
if (fd < 0)
printf("Failed to create prog array '%s'!\n", strerror(errno));
@@ -2991,9 +4537,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
- fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
- prog, prog_len * sizeof(struct bpf_insn),
- "GPL", bpf_vlog, sizeof(bpf_vlog));
+ fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
+ prog, prog_len, "GPL", 0, bpf_vlog,
+ sizeof(bpf_vlog));
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
@@ -3031,6 +4577,55 @@ fail_log:
goto close_fds;
}
+static bool is_admin(void)
+{
+ cap_t caps;
+ cap_flag_value_t sysadmin = CAP_CLEAR;
+ const cap_value_t cap_val = CAP_SYS_ADMIN;
+
+ if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
+ perror("cap_get_flag");
+ return false;
+ }
+ caps = cap_get_proc();
+ if (!caps) {
+ perror("cap_get_proc");
+ return false;
+ }
+ if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
+ perror("cap_get_flag");
+ if (cap_free(caps))
+ perror("cap_free");
+ return (sysadmin == CAP_SET);
+}
+
+static int set_admin(bool admin)
+{
+ cap_t caps;
+ const cap_value_t cap_val = CAP_SYS_ADMIN;
+ int ret = -1;
+
+ caps = cap_get_proc();
+ if (!caps) {
+ perror("cap_get_proc");
+ return -1;
+ }
+ if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
+ admin ? CAP_SET : CAP_CLEAR)) {
+ perror("cap_set_flag");
+ goto out;
+ }
+ if (cap_set_proc(caps)) {
+ perror("cap_set_proc");
+ goto out;
+ }
+ ret = 0;
+out:
+ if (cap_free(caps))
+ perror("cap_free");
+ return ret;
+}
+
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
int i, passes = 0, errors = 0;
@@ -3041,11 +4636,19 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
/* Program types that are not supported by non-root we
* skip right away.
*/
- if (unpriv && test->prog_type)
- continue;
+ if (!test->prog_type) {
+ if (!unpriv)
+ set_admin(false);
+ printf("#%d/u %s ", i, test->descr);
+ do_test_single(test, true, &passes, &errors);
+ if (!unpriv)
+ set_admin(true);
+ }
- printf("#%d %s ", i, test->descr);
- do_test_single(test, unpriv, &passes, &errors);
+ if (!unpriv) {
+ printf("#%d/p %s ", i, test->descr);
+ do_test_single(test, false, &passes, &errors);
+ }
}
printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
@@ -3057,7 +4660,7 @@ int main(int argc, char **argv)
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
struct rlimit rlim = { 1 << 20, 1 << 20 };
unsigned int from = 0, to = ARRAY_SIZE(tests);
- bool unpriv = geteuid() != 0;
+ bool unpriv = !is_admin();
if (argc == 3) {
unsigned int l = atoi(argv[argc - 2]);
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 24bc7ec1be7d..a77da88bf946 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -40,14 +40,39 @@
static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
{
+ /* the filter below checks for all of the following conditions that
+ * are based on the contents of create_payload()
+ * ether type 0x800 and
+ * ip proto udp and
+ * skb->len == DATA_LEN and
+ * udp[38] == 'a' or udp[38] == 'b'
+ * It can be generated from the following bpf_asm input:
+ * ldh [12]
+ * jne #0x800, drop ; ETH_P_IP
+ * ldb [23]
+ * jneq #17, drop ; IPPROTO_UDP
+ * ld len ; ld skb->len
+ * jlt #100, drop ; DATA_LEN
+ * ldb [80]
+ * jeq #97, pass ; DATA_CHAR
+ * jne #98, drop ; DATA_CHAR_1
+ * pass:
+ * ret #-1
+ * drop:
+ * ret #0
+ */
struct sock_filter bpf_filter[] = {
- { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */
- { 0x35, 0, 4, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/
- { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */
- { 0x15, 1, 0, DATA_CHAR }, /* JEQ DATA_CHAR [t goto match]*/
- { 0x15, 0, 1, DATA_CHAR_1}, /* JEQ DATA_CHAR_1 [t goto match]*/
- { 0x06, 0, 0, 0x00000060 }, /* RET match */
- { 0x06, 0, 0, 0x00000000 }, /* RET no match */
+ { 0x28, 0, 0, 0x0000000c },
+ { 0x15, 0, 8, 0x00000800 },
+ { 0x30, 0, 0, 0x00000017 },
+ { 0x15, 0, 6, 0x00000011 },
+ { 0x80, 0, 0, 0000000000 },
+ { 0x35, 0, 4, 0x00000064 },
+ { 0x30, 0, 0, 0x00000050 },
+ { 0x15, 1, 0, 0x00000061 },
+ { 0x15, 0, 1, 0x00000062 },
+ { 0x06, 0, 0, 0xffffffff },
+ { 0x06, 0, 0, 0000000000 },
};
struct sock_fprog bpf_prog;
diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c
index 24adf709bd9d..7f6cd9fdacf3 100644
--- a/tools/testing/selftests/net/psock_tpacket.c
+++ b/tools/testing/selftests/net/psock_tpacket.c
@@ -110,7 +110,7 @@ static unsigned int total_packets, total_bytes;
static int pfsocket(int ver)
{
- int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ int ret, sock = socket(PF_PACKET, SOCK_RAW, 0);
if (sock == -1) {
perror("socket");
exit(1);
@@ -239,7 +239,6 @@ static void walk_v1_v2_rx(int sock, struct ring *ring)
bug_on(ring->type != PACKET_RX_RING);
pair_udp_open(udp_sock, PORT_BASE);
- pair_udp_setfilter(sock);
memset(&pfd, 0, sizeof(pfd));
pfd.fd = sock;
@@ -311,20 +310,33 @@ static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr)
__sync_synchronize();
}
-static inline int __v1_v2_tx_kernel_ready(void *base, int version)
+static inline int __v3_tx_kernel_ready(struct tpacket3_hdr *hdr)
+{
+ return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
+}
+
+static inline void __v3_tx_user_ready(struct tpacket3_hdr *hdr)
+{
+ hdr->tp_status = TP_STATUS_SEND_REQUEST;
+ __sync_synchronize();
+}
+
+static inline int __tx_kernel_ready(void *base, int version)
{
switch (version) {
case TPACKET_V1:
return __v1_tx_kernel_ready(base);
case TPACKET_V2:
return __v2_tx_kernel_ready(base);
+ case TPACKET_V3:
+ return __v3_tx_kernel_ready(base);
default:
bug_on(1);
return 0;
}
}
-static inline void __v1_v2_tx_user_ready(void *base, int version)
+static inline void __tx_user_ready(void *base, int version)
{
switch (version) {
case TPACKET_V1:
@@ -333,6 +345,9 @@ static inline void __v1_v2_tx_user_ready(void *base, int version)
case TPACKET_V2:
__v2_tx_user_ready(base);
break;
+ case TPACKET_V3:
+ __v3_tx_user_ready(base);
+ break;
}
}
@@ -348,7 +363,22 @@ static void __v1_v2_set_packet_loss_discard(int sock)
}
}
-static void walk_v1_v2_tx(int sock, struct ring *ring)
+static inline void *get_next_frame(struct ring *ring, int n)
+{
+ uint8_t *f0 = ring->rd[0].iov_base;
+
+ switch (ring->version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ return ring->rd[n].iov_base;
+ case TPACKET_V3:
+ return f0 + (n * ring->req3.tp_frame_size);
+ default:
+ bug_on(1);
+ }
+}
+
+static void walk_tx(int sock, struct ring *ring)
{
struct pollfd pfd;
int rcv_sock, ret;
@@ -360,9 +390,19 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
.sll_family = PF_PACKET,
.sll_halen = ETH_ALEN,
};
+ int nframes;
+
+ /* TPACKET_V{1,2} sets up the ring->rd* related variables based
+ * on frames (e.g., rd_num is tp_frame_nr) whereas V3 sets these
+ * up based on blocks (e.g, rd_num is tp_block_nr)
+ */
+ if (ring->version <= TPACKET_V2)
+ nframes = ring->rd_num;
+ else
+ nframes = ring->req3.tp_frame_nr;
bug_on(ring->type != PACKET_TX_RING);
- bug_on(ring->rd_num < NUM_PACKETS);
+ bug_on(nframes < NUM_PACKETS);
rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (rcv_sock == -1) {
@@ -388,10 +428,11 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
create_payload(packet, &packet_len);
while (total_packets > 0) {
- while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base,
- ring->version) &&
+ void *next = get_next_frame(ring, frame_num);
+
+ while (__tx_kernel_ready(next, ring->version) &&
total_packets > 0) {
- ppd.raw = ring->rd[frame_num].iov_base;
+ ppd.raw = next;
switch (ring->version) {
case TPACKET_V1:
@@ -413,14 +454,27 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
packet_len);
total_bytes += ppd.v2->tp_h.tp_snaplen;
break;
+ case TPACKET_V3: {
+ struct tpacket3_hdr *tx = next;
+
+ tx->tp_snaplen = packet_len;
+ tx->tp_len = packet_len;
+ tx->tp_next_offset = 0;
+
+ memcpy((uint8_t *)tx + TPACKET3_HDRLEN -
+ sizeof(struct sockaddr_ll), packet,
+ packet_len);
+ total_bytes += tx->tp_snaplen;
+ break;
+ }
}
status_bar_update();
total_packets--;
- __v1_v2_tx_user_ready(ppd.raw, ring->version);
+ __tx_user_ready(next, ring->version);
- frame_num = (frame_num + 1) % ring->rd_num;
+ frame_num = (frame_num + 1) % nframes;
}
poll(&pfd, 1, 1);
@@ -460,7 +514,7 @@ static void walk_v1_v2(int sock, struct ring *ring)
if (ring->type == PACKET_RX_RING)
walk_v1_v2_rx(sock, ring);
else
- walk_v1_v2_tx(sock, ring);
+ walk_tx(sock, ring);
}
static uint64_t __v3_prev_block_seq_num = 0;
@@ -546,7 +600,6 @@ static void walk_v3_rx(int sock, struct ring *ring)
bug_on(ring->type != PACKET_RX_RING);
pair_udp_open(udp_sock, PORT_BASE);
- pair_udp_setfilter(sock);
memset(&pfd, 0, sizeof(pfd));
pfd.fd = sock;
@@ -583,7 +636,7 @@ static void walk_v3(int sock, struct ring *ring)
if (ring->type == PACKET_RX_RING)
walk_v3_rx(sock, ring);
else
- bug_on(1);
+ walk_tx(sock, ring);
}
static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
@@ -602,12 +655,13 @@ static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
ring->flen = ring->req.tp_frame_size;
}
-static void __v3_fill(struct ring *ring, unsigned int blocks)
+static void __v3_fill(struct ring *ring, unsigned int blocks, int type)
{
- ring->req3.tp_retire_blk_tov = 64;
- ring->req3.tp_sizeof_priv = 0;
- ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
-
+ if (type == PACKET_RX_RING) {
+ ring->req3.tp_retire_blk_tov = 64;
+ ring->req3.tp_sizeof_priv = 0;
+ ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
+ }
ring->req3.tp_block_size = getpagesize() << 2;
ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7;
ring->req3.tp_block_nr = blocks;
@@ -641,7 +695,7 @@ static void setup_ring(int sock, struct ring *ring, int version, int type)
break;
case TPACKET_V3:
- __v3_fill(ring, blocks);
+ __v3_fill(ring, blocks, type);
ret = setsockopt(sock, SOL_PACKET, type, &ring->req3,
sizeof(ring->req3));
break;
@@ -685,6 +739,8 @@ static void bind_ring(int sock, struct ring *ring)
{
int ret;
+ pair_udp_setfilter(sock);
+
ring->ll.sll_family = PF_PACKET;
ring->ll.sll_protocol = htons(ETH_P_ALL);
ring->ll.sll_ifindex = if_nametoindex("lo");
@@ -796,6 +852,7 @@ int main(void)
ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING);
ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING);
+ ret |= test_tpacket(TPACKET_V3, PACKET_TX_RING);
if (ret)
return 1;