summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2020-05-29 02:25:35 +0200
committerDavid S. Miller <davem@davemloft.net>2020-06-01 20:41:12 +0200
commit1fac52da5942c58dd3e337fd7c5a550925ca752e (patch)
tree7f13f7aa52d4285ef434c6857f34deda4061de74 /net
parentnetfilter: nf_flowtable: expose nf_flow_table_gc_cleanup() (diff)
downloadlinux-1fac52da5942c58dd3e337fd7c5a550925ca752e.tar.xz
linux-1fac52da5942c58dd3e337fd7c5a550925ca752e.zip
net: flow_offload: consolidate indirect flow_block infrastructure
Tunnel devices provide no dev->netdev_ops->ndo_setup_tc(...) interface. The tunnel device and route control plane does not provide an obvious way to relate tunnel and physical devices. This patch allows drivers to register a tunnel device offload handler for the tc and netfilter frontends through flow_indr_dev_register() and flow_indr_dev_unregister(). The frontend calls flow_indr_dev_setup_offload() that iterates over the list of drivers that are offering tunnel device hardware offload support and it sets up the flow block for this tunnel device. If the driver module is removed, the indirect flow_block ends up with a stale callback reference. The module removal path triggers the dev_shutdown() path to remove the qdisc and the flow_blocks for the physical devices. However, this is not useful for tunnel devices, where relation between the physical and the tunnel device is not explicit. This patch introduces a cleanup callback that is invoked when the driver module is removed to clean up the tunnel device flow_block. This patch defines struct flow_block_indr and it uses it from flow_block_cb to store the information that front-end requires to perform the flow_block_cb cleanup on module removal. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/flow_offload.c157
1 files changed, 157 insertions, 0 deletions
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index e64941c526b1..8cd7da2586ae 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -317,6 +317,163 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
}
EXPORT_SYMBOL(flow_block_cb_setup_simple);
+static DEFINE_MUTEX(flow_indr_block_lock);
+static LIST_HEAD(flow_block_indr_list);
+static LIST_HEAD(flow_block_indr_dev_list);
+
+struct flow_indr_dev {
+ struct list_head list;
+ flow_indr_block_bind_cb_t *cb;
+ void *cb_priv;
+ refcount_t refcnt;
+ struct rcu_head rcu;
+};
+
+static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv)
+{
+ struct flow_indr_dev *indr_dev;
+
+ indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
+ if (!indr_dev)
+ return NULL;
+
+ indr_dev->cb = cb;
+ indr_dev->cb_priv = cb_priv;
+ refcount_set(&indr_dev->refcnt, 1);
+
+ return indr_dev;
+}
+
+int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
+{
+ struct flow_indr_dev *indr_dev;
+
+ mutex_lock(&flow_indr_block_lock);
+ list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
+ if (indr_dev->cb == cb &&
+ indr_dev->cb_priv == cb_priv) {
+ refcount_inc(&indr_dev->refcnt);
+ mutex_unlock(&flow_indr_block_lock);
+ return 0;
+ }
+ }
+
+ indr_dev = flow_indr_dev_alloc(cb, cb_priv);
+ if (!indr_dev) {
+ mutex_unlock(&flow_indr_block_lock);
+ return -ENOMEM;
+ }
+
+ list_add(&indr_dev->list, &flow_block_indr_dev_list);
+ mutex_unlock(&flow_indr_block_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(flow_indr_dev_register);
+
+static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+ struct list_head *cleanup_list)
+{
+ struct flow_block_cb *this, *next;
+
+ list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
+ if (this->cb == setup_cb &&
+ this->cb_priv == cb_priv) {
+ list_move(&this->indr.list, cleanup_list);
+ return;
+ }
+ }
+}
+
+static void flow_block_indr_notify(struct list_head *cleanup_list)
+{
+ struct flow_block_cb *this, *next;
+
+ list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
+ list_del(&this->indr.list);
+ this->indr.cleanup(this);
+ }
+}
+
+void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
+ flow_setup_cb_t *setup_cb)
+{
+ struct flow_indr_dev *this, *next, *indr_dev = NULL;
+ LIST_HEAD(cleanup_list);
+
+ mutex_lock(&flow_indr_block_lock);
+ list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
+ if (this->cb == cb &&
+ this->cb_priv == cb_priv &&
+ refcount_dec_and_test(&this->refcnt)) {
+ indr_dev = this;
+ list_del(&indr_dev->list);
+ break;
+ }
+ }
+
+ if (!indr_dev) {
+ mutex_unlock(&flow_indr_block_lock);
+ return;
+ }
+
+ __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+ mutex_unlock(&flow_indr_block_lock);
+
+ flow_block_indr_notify(&cleanup_list);
+ kfree(indr_dev);
+}
+EXPORT_SYMBOL(flow_indr_dev_unregister);
+
+static void flow_block_indr_init(struct flow_block_cb *flow_block,
+ struct flow_block_offload *bo,
+ struct net_device *dev, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ flow_block->indr.binder_type = bo->binder_type;
+ flow_block->indr.data = data;
+ flow_block->indr.dev = dev;
+ flow_block->indr.cleanup = cleanup;
+}
+
+static void __flow_block_indr_binding(struct flow_block_offload *bo,
+ struct net_device *dev, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct flow_block_cb *block_cb;
+
+ list_for_each_entry(block_cb, &bo->cb_list, list) {
+ switch (bo->command) {
+ case FLOW_BLOCK_BIND:
+ flow_block_indr_init(block_cb, bo, dev, data, cleanup);
+ list_add(&block_cb->indr.list, &flow_block_indr_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ list_del(&block_cb->indr.list);
+ break;
+ }
+ }
+}
+
+int flow_indr_dev_setup_offload(struct net_device *dev,
+ enum tc_setup_type type, void *data,
+ struct flow_block_offload *bo,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct flow_indr_dev *this;
+
+ mutex_lock(&flow_indr_block_lock);
+ list_for_each_entry(this, &flow_block_indr_dev_list, list)
+ this->cb(dev, this->cb_priv, type, bo);
+
+ __flow_block_indr_binding(bo, dev, data, cleanup);
+ mutex_unlock(&flow_indr_block_lock);
+
+ return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
+}
+EXPORT_SYMBOL(flow_indr_dev_setup_offload);
+
static LIST_HEAD(block_cb_list);
static struct rhashtable indr_setup_block_ht;