summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJakub Sitnicki <jakub@cloudflare.com>2020-05-31 10:28:36 +0200
committerAlexei Starovoitov <ast@kernel.org>2020-06-02 00:21:02 +0200
commita3fd7ceee05431d2c51ed86c6cae015d236a51f0 (patch)
tree239fb6cb80a26ab38befd59faa7d4354d5eac190 /net/core
parentflow_dissector: Pull locking up from prog attach callback (diff)
downloadlinux-a3fd7ceee05431d2c51ed86c6cae015d236a51f0.tar.xz
linux-a3fd7ceee05431d2c51ed86c6cae015d236a51f0.zip
net: Introduce netns_bpf for BPF programs attached to netns
In order to: (1) attach more than one BPF program type to netns, or (2) support attaching BPF programs to netns with bpf_link, or (3) support multi-prog attach points for netns we will need to keep more state per netns than a single pointer like we have now for BPF flow dissector program. Prepare for the above by extracting netns_bpf that is part of struct net, for storing all state related to BPF programs attached to netns. Turn flow dissector callbacks for querying/attaching/detaching a program into generic ones that operate on netns_bpf. Next patch will move the generic callbacks into their own module. This is similar to how it is organized for cgroup with cgroup_bpf. Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Cc: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/bpf/20200531082846.2117903-3-jakub@cloudflare.com
Diffstat (limited to 'net/core')
-rw-r--r--net/core/flow_dissector.c105
1 files changed, 69 insertions, 36 deletions
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index b64a44a083fd..6c1b8e43d611 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -31,8 +31,10 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_labels.h>
#endif
+#include <linux/bpf-netns.h>
-static DEFINE_MUTEX(flow_dissector_mutex);
+/* Protects updates to netns_bpf */
+DEFINE_MUTEX(netns_bpf_mutex);
static void dissector_set_key(struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
@@ -70,23 +72,28 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
}
EXPORT_SYMBOL(skb_flow_dissector_init);
-int skb_flow_dissector_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
+int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
u32 prog_id, prog_cnt = 0, flags = 0;
+ enum netns_bpf_attach_type type;
struct bpf_prog *attached;
struct net *net;
if (attr->query.query_flags)
return -EINVAL;
+ type = to_netns_bpf_attach_type(attr->query.attach_type);
+ if (type < 0)
+ return -EINVAL;
+
net = get_net_ns_by_fd(attr->query.target_fd);
if (IS_ERR(net))
return PTR_ERR(net);
rcu_read_lock();
- attached = rcu_dereference(net->flow_dissector_prog);
+ attached = rcu_dereference(net->bpf.progs[type]);
if (attached) {
prog_cnt = 1;
prog_id = attached->aux->id;
@@ -112,6 +119,7 @@ int skb_flow_dissector_prog_query(const union bpf_attr *attr,
static int flow_dissector_bpf_prog_attach(struct net *net,
struct bpf_prog *prog)
{
+ enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
struct bpf_prog *attached;
if (net == &init_net) {
@@ -125,74 +133,97 @@ static int flow_dissector_bpf_prog_attach(struct net *net,
for_each_net(ns) {
if (ns == &init_net)
continue;
- if (rcu_access_pointer(ns->flow_dissector_prog))
+ if (rcu_access_pointer(ns->bpf.progs[type]))
return -EEXIST;
}
} else {
/* Make sure root flow dissector is not attached
* when attaching to the non-root namespace.
*/
- if (rcu_access_pointer(init_net.flow_dissector_prog))
+ if (rcu_access_pointer(init_net.bpf.progs[type]))
return -EEXIST;
}
- attached = rcu_dereference_protected(net->flow_dissector_prog,
- lockdep_is_held(&flow_dissector_mutex));
+ attached = rcu_dereference_protected(net->bpf.progs[type],
+ lockdep_is_held(&netns_bpf_mutex));
if (attached == prog)
/* The same program cannot be attached twice */
return -EINVAL;
- rcu_assign_pointer(net->flow_dissector_prog, prog);
+ rcu_assign_pointer(net->bpf.progs[type], prog);
if (attached)
bpf_prog_put(attached);
return 0;
}
-int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
- struct bpf_prog *prog)
+int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
+ enum netns_bpf_attach_type type;
+ struct net *net;
int ret;
- mutex_lock(&flow_dissector_mutex);
- ret = flow_dissector_bpf_prog_attach(current->nsproxy->net_ns, prog);
- mutex_unlock(&flow_dissector_mutex);
+ type = to_netns_bpf_attach_type(attr->attach_type);
+ if (type < 0)
+ return -EINVAL;
+
+ net = current->nsproxy->net_ns;
+ mutex_lock(&netns_bpf_mutex);
+ switch (type) {
+ case NETNS_BPF_FLOW_DISSECTOR:
+ ret = flow_dissector_bpf_prog_attach(net, prog);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&netns_bpf_mutex);
return ret;
}
-static int flow_dissector_bpf_prog_detach(struct net *net)
+/* Must be called with netns_bpf_mutex held. */
+static int __netns_bpf_prog_detach(struct net *net,
+ enum netns_bpf_attach_type type)
{
struct bpf_prog *attached;
- mutex_lock(&flow_dissector_mutex);
- attached = rcu_dereference_protected(net->flow_dissector_prog,
- lockdep_is_held(&flow_dissector_mutex));
- if (!attached) {
- mutex_unlock(&flow_dissector_mutex);
+ attached = rcu_dereference_protected(net->bpf.progs[type],
+ lockdep_is_held(&netns_bpf_mutex));
+ if (!attached)
return -ENOENT;
- }
- RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
+ RCU_INIT_POINTER(net->bpf.progs[type], NULL);
bpf_prog_put(attached);
- mutex_unlock(&flow_dissector_mutex);
return 0;
}
-int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+int netns_bpf_prog_detach(const union bpf_attr *attr)
{
- return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns);
+ enum netns_bpf_attach_type type;
+ int ret;
+
+ type = to_netns_bpf_attach_type(attr->attach_type);
+ if (type < 0)
+ return -EINVAL;
+
+ mutex_lock(&netns_bpf_mutex);
+ ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type);
+ mutex_unlock(&netns_bpf_mutex);
+
+ return ret;
}
-static void __net_exit flow_dissector_pernet_pre_exit(struct net *net)
+static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
{
- /* We're not racing with attach/detach because there are no
- * references to netns left when pre_exit gets called.
- */
- if (rcu_access_pointer(net->flow_dissector_prog))
- flow_dissector_bpf_prog_detach(net);
+ enum netns_bpf_attach_type type;
+
+ mutex_lock(&netns_bpf_mutex);
+ for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
+ __netns_bpf_prog_detach(net, type);
+ mutex_unlock(&netns_bpf_mutex);
}
-static struct pernet_operations flow_dissector_pernet_ops __net_initdata = {
- .pre_exit = flow_dissector_pernet_pre_exit,
+static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
+ .pre_exit = netns_bpf_pernet_pre_exit,
};
/**
@@ -1044,11 +1075,13 @@ bool __skb_flow_dissect(const struct net *net,
WARN_ON_ONCE(!net);
if (net) {
+ enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+
rcu_read_lock();
- attached = rcu_dereference(init_net.flow_dissector_prog);
+ attached = rcu_dereference(init_net.bpf.progs[type]);
if (!attached)
- attached = rcu_dereference(net->flow_dissector_prog);
+ attached = rcu_dereference(net->bpf.progs[type]);
if (attached) {
struct bpf_flow_keys flow_keys;
@@ -1870,6 +1903,6 @@ static int __init init_default_flow_dissectors(void)
flow_keys_basic_dissector_keys,
ARRAY_SIZE(flow_keys_basic_dissector_keys));
- return register_pernet_subsys(&flow_dissector_pernet_ops);
+ return register_pernet_subsys(&netns_bpf_pernet_ops);
}
core_initcall(init_default_flow_dissectors);