summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/helpers.c
diff options
context:
space:
mode:
authorKumar Kartikeya Dwivedi <memxor@gmail.com>2022-11-18 02:56:06 +0100
committerAlexei Starovoitov <ast@kernel.org>2022-11-18 04:22:14 +0100
commit8cab76ec634995e59a8b6346bf8b835ab7fad3a3 (patch)
tree68d38b6a23af0b784c94ce63191328078aeb2321 /kernel/bpf/helpers.c
parentbpf: Permit NULL checking pointer with non-zero fixed offset (diff)
downloadlinux-8cab76ec634995e59a8b6346bf8b835ab7fad3a3.tar.xz
linux-8cab76ec634995e59a8b6346bf8b835ab7fad3a3.zip
bpf: Introduce single ownership BPF linked list API
Add a linked list API for use in BPF programs, where it expects protection from the bpf_spin_lock in the same allocation as the bpf_list_head. For now, only one bpf_spin_lock can be present hence that is assumed to be the one protecting the bpf_list_head. The following functions are added to kick things off: // Add node to beginning of list void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node); // Add node to end of list void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node); // Remove node at beginning of list and return it struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head); // Remove node at end of list and return it struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head); The lock protecting the bpf_list_head needs to be taken for all operations. The verifier ensures that the lock that needs to be taken is always held, and only the correct lock is taken for these operations. These checks are made statically by relying on the reg->id preserved for registers pointing into regions having both bpf_spin_lock and the objects protected by it. The comment over check_reg_allocation_locked in this change describes the logic in detail. Note that bpf_list_push_front and bpf_list_push_back are meant to consume the object containing the node in the 1st argument, however that specific mechanism is intended to not release the ref_obj_id directly until the bpf_spin_unlock is called. In this commit, nothing is done, but the next commit will be introducing logic to handle this case, so it has been left as is for now. bpf_list_pop_front and bpf_list_pop_back delete the first or last item of the list respectively, and return pointer to the element at the list_node offset. The user can then use container_of style macro to get the actual entry type. The verifier however statically knows the actual type, so the safety properties are still preserved. With these additions, programs can now manage their own linked lists and store their objects in them. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20221118015614.2013203-17-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/helpers.c')
-rw-r--r--kernel/bpf/helpers.c55
1 files changed, 54 insertions, 1 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 71d803ca0c1d..212e791d7452 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1780,6 +1780,50 @@ void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
bpf_mem_free(&bpf_global_ma, p);
}
+static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail)
+{
+ struct list_head *n = (void *)node, *h = (void *)head;
+
+ if (unlikely(!h->next))
+ INIT_LIST_HEAD(h);
+ if (unlikely(!n->next))
+ INIT_LIST_HEAD(n);
+ tail ? list_add_tail(n, h) : list_add(n, h);
+}
+
+void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
+{
+ return __bpf_list_add(node, head, false);
+}
+
+void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
+{
+ return __bpf_list_add(node, head, true);
+}
+
+static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
+{
+ struct list_head *n, *h = (void *)head;
+
+ if (unlikely(!h->next))
+ INIT_LIST_HEAD(h);
+ if (list_empty(h))
+ return NULL;
+ n = tail ? h->prev : h->next;
+ list_del_init(n);
+ return (struct bpf_list_node *)n;
+}
+
+struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
+{
+ return __bpf_list_del(head, false);
+}
+
+struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
+{
+ return __bpf_list_del(head, true);
+}
+
__diag_pop();
BTF_SET8_START(generic_btf_ids)
@@ -1788,6 +1832,10 @@ BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
#endif
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_list_push_front)
+BTF_ID_FLAGS(func, bpf_list_push_back)
+BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
BTF_SET8_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
@@ -1797,7 +1845,12 @@ static const struct btf_kfunc_id_set generic_kfunc_set = {
static int __init kfunc_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
+ if (ret)
+ return ret;
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
}
late_initcall(kfunc_init);