summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-07-17 19:53:25 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2018-07-18 15:10:34 +0200
commit602144c224604f1cbff02ee2d1cf46825269ecbd (patch)
tree8de28d85871da8fd3d49865568fccfca8ab2cbd0 /kernel
parentbpf: offload: aggregate offloads per-device (diff)
downloadlinux-602144c224604f1cbff02ee2d1cf46825269ecbd.tar.xz
linux-602144c224604f1cbff02ee2d1cf46825269ecbd.zip
bpf: offload: keep the offload state per-ASIC
Create a higher-level entity to represent a device/ASIC to allow programs and maps to be shared between device ports. The extra work is required to make sure we don't destroy BPF objects as soon as the netdev for which they were loaded gets destroyed, as other ports may still be using them. When netdev goes away all of its BPF objects will be moved to other netdevs of the device, and only destroyed when last netdev is unregistered. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/offload.c81
1 files changed, 64 insertions, 17 deletions
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index cd64a26807aa..925575f64ff1 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -32,11 +32,17 @@
*/
static DECLARE_RWSEM(bpf_devs_lock);
+struct bpf_offload_dev {
+ struct list_head netdevs;
+};
+
struct bpf_offload_netdev {
struct rhash_head l;
struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
struct list_head progs;
struct list_head maps;
+ struct list_head offdev_netdevs;
};
static const struct rhashtable_params offdevs_params = {
@@ -526,25 +532,18 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
return ret;
}
-int bpf_offload_dev_netdev_register(struct net_device *netdev)
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
struct bpf_offload_netdev *ondev;
int err;
- down_write(&bpf_devs_lock);
- if (!offdevs_inited) {
- err = rhashtable_init(&offdevs, &offdevs_params);
- if (err)
- return err;
- offdevs_inited = true;
- }
- up_write(&bpf_devs_lock);
-
ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
if (!ondev)
return -ENOMEM;
ondev->netdev = netdev;
+ ondev->offdev = offdev;
INIT_LIST_HEAD(&ondev->progs);
INIT_LIST_HEAD(&ondev->maps);
@@ -555,6 +554,7 @@ int bpf_offload_dev_netdev_register(struct net_device *netdev)
goto err_unlock_free;
}
+ list_add(&ondev->offdev_netdevs, &offdev->netdevs);
up_write(&bpf_devs_lock);
return 0;
@@ -565,11 +565,12 @@ err_unlock_free:
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
-void bpf_offload_dev_netdev_unregister(struct net_device *netdev)
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev)
{
+ struct bpf_offload_netdev *ondev, *altdev;
struct bpf_offloaded_map *offmap, *mtmp;
struct bpf_prog_offload *offload, *ptmp;
- struct bpf_offload_netdev *ondev;
ASSERT_RTNL();
@@ -579,11 +580,26 @@ void bpf_offload_dev_netdev_unregister(struct net_device *netdev)
goto unlock;
WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
-
- list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
- __bpf_prog_offload_destroy(offload->prog);
- list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
- __bpf_map_offload_destroy(offmap);
+ list_del(&ondev->offdev_netdevs);
+
+ /* Try to move the objects to another netdev of the device */
+ altdev = list_first_entry_or_null(&offdev->netdevs,
+ struct bpf_offload_netdev,
+ offdev_netdevs);
+ if (altdev) {
+ list_for_each_entry(offload, &ondev->progs, offloads)
+ offload->netdev = altdev->netdev;
+ list_splice_init(&ondev->progs, &altdev->progs);
+
+ list_for_each_entry(offmap, &ondev->maps, offloads)
+ offmap->netdev = altdev->netdev;
+ list_splice_init(&ondev->maps, &altdev->maps);
+ } else {
+ list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
+ __bpf_prog_offload_destroy(offload->prog);
+ list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
+ __bpf_map_offload_destroy(offmap);
+ }
WARN_ON(!list_empty(&ondev->progs));
WARN_ON(!list_empty(&ondev->maps));
@@ -592,3 +608,34 @@ unlock:
up_write(&bpf_devs_lock);
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
+
+struct bpf_offload_dev *bpf_offload_dev_create(void)
+{
+ struct bpf_offload_dev *offdev;
+ int err;
+
+ down_write(&bpf_devs_lock);
+ if (!offdevs_inited) {
+ err = rhashtable_init(&offdevs, &offdevs_params);
+ if (err)
+ return ERR_PTR(err);
+ offdevs_inited = true;
+ }
+ up_write(&bpf_devs_lock);
+
+ offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
+ if (!offdev)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&offdev->netdevs);
+
+ return offdev;
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
+
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
+{
+ WARN_ON(!list_empty(&offdev->netdevs));
+ kfree(offdev);
+}
+EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);