summaryrefslogtreecommitdiffstats
path: root/net/sched/cls_flower.c
diff options
context:
space:
mode:
authorCong Wang <xiyou.wangcong@gmail.com>2018-05-24 00:26:53 +0200
committerDavid S. Miller <davem@davemloft.net>2018-05-25 04:56:15 +0200
commitaaa908ffbee18a65529b716efb346a626e81559a (patch)
tree0ff6902780f9a964ac1b8fe8ea6a2bb7d7898cc7 /net/sched/cls_flower.c
parentMerge branch 'Mirroring-tests-involving-VLAN' (diff)
downloadlinux-aaa908ffbee18a65529b716efb346a626e81559a.tar.xz
linux-aaa908ffbee18a65529b716efb346a626e81559a.zip
net_sched: switch to rcu_work
Commit 05f0fe6b74db ("RCU, workqueue: Implement rcu_work") introduces new API's for dispatching work in a RCU callback. Now we can just switch to the new API's for tc filters. This could get rid of a lot of code. Cc: Tejun Heo <tj@kernel.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_flower.c')
-rw-r--r--net/sched/cls_flower.c40
1 files changed, 10 insertions, 30 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index eacaaf803914..4e74508515f4 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -73,10 +73,7 @@ struct fl_flow_mask {
struct cls_fl_head {
struct rhashtable ht;
struct list_head masks;
- union {
- struct work_struct work;
- struct rcu_head rcu;
- };
+ struct rcu_work rwork;
struct idr handle_idr;
};
@@ -90,10 +87,7 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
- union {
- struct work_struct work;
- struct rcu_head rcu;
- };
+ struct rcu_work rwork;
struct net_device *hw_dev;
};
@@ -235,21 +229,14 @@ static void __fl_destroy_filter(struct cls_fl_filter *f)
static void fl_destroy_filter_work(struct work_struct *work)
{
- struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
+ struct cls_fl_filter *f = container_of(to_rcu_work(work),
+ struct cls_fl_filter, rwork);
rtnl_lock();
__fl_destroy_filter(f);
rtnl_unlock();
}
-static void fl_destroy_filter(struct rcu_head *head)
-{
- struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
-
- INIT_WORK(&f->work, fl_destroy_filter_work);
- tcf_queue_work(&f->work);
-}
-
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
struct netlink_ext_ack *extack)
{
@@ -327,7 +314,7 @@ static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
fl_hw_destroy_filter(tp, f, extack);
tcf_unbind_filter(tp, &f->res);
if (async)
- call_rcu(&f->rcu, fl_destroy_filter);
+ tcf_queue_work(&f->rwork, fl_destroy_filter_work);
else
__fl_destroy_filter(f);
@@ -336,20 +323,13 @@ static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
static void fl_destroy_sleepable(struct work_struct *work)
{
- struct cls_fl_head *head = container_of(work, struct cls_fl_head,
- work);
+ struct cls_fl_head *head = container_of(to_rcu_work(work),
+ struct cls_fl_head,
+ rwork);
kfree(head);
module_put(THIS_MODULE);
}
-static void fl_destroy_rcu(struct rcu_head *rcu)
-{
- struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
-
- INIT_WORK(&head->work, fl_destroy_sleepable);
- schedule_work(&head->work);
-}
-
static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -365,7 +345,7 @@ static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
idr_destroy(&head->handle_idr);
__module_get(THIS_MODULE);
- call_rcu(&head->rcu, fl_destroy_rcu);
+ tcf_queue_work(&head->rwork, fl_destroy_sleepable);
}
static void *fl_get(struct tcf_proto *tp, u32 handle)
@@ -1036,7 +1016,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
- call_rcu(&fold->rcu, fl_destroy_filter);
+ tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
} else {
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
}