diff options
author | Yotam Gigi <yotamg@mellanox.com> | 2016-07-21 12:03:12 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-07-25 08:11:59 +0200 |
commit | b87f7936a93246804cf70e7e2e0568799c948bb1 (patch) | |
tree | 9cc6fdbd50cb59f39c24997f7f4578a879d8b5bd /net/sched/cls_matchall.c | |
parent | net/sched: introduce Match-all classifier (diff) | |
download | linux-b87f7936a93246804cf70e7e2e0568799c948bb1.tar.xz linux-b87f7936a93246804cf70e7e2e0568799c948bb1.zip |
net/sched: Add match-all classifier hw offloading.
Following the work that have been done on offloading classifiers like u32
and flower, now the match-all classifier hw offloading is possible. if
the interface supports tc offloading.
To control the offloading, two tc flags have been introduced: skip_sw and
skip_hw. Typical usage:
tc filter add dev eth25 parent ffff: \
matchall skip_sw \
action mirred egress mirror \
dev eth27
Signed-off-by: Yotam Gigi <yotamg@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r-- | net/sched/cls_matchall.c | 76 |
1 files changed, 73 insertions, 3 deletions
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 8a6b4de7a99a..25927b6c4436 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -21,6 +21,7 @@ struct cls_mall_filter { struct tcf_result res; u32 handle; struct rcu_head rcu; + u32 flags; }; struct cls_mall_head { @@ -34,6 +35,9 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct cls_mall_head *head = rcu_dereference_bh(tp->root); struct cls_mall_filter *f = head->filter; + if (tc_skip_sw(f->flags)) + return -1; + return tcf_exts_exec(skb, &f->exts, res); } @@ -55,18 +59,61 @@ static void mall_destroy_filter(struct rcu_head *head) struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); tcf_exts_destroy(&f->exts); + kfree(f); } +static int mall_replace_hw_filter(struct tcf_proto *tp, + struct cls_mall_filter *f, + unsigned long cookie) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_to_netdev offload; + struct tc_cls_matchall_offload mall_offload = {0}; + + offload.type = TC_SETUP_MATCHALL; + offload.cls_mall = &mall_offload; + offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; + offload.cls_mall->exts = &f->exts; + offload.cls_mall->cookie = cookie; + + return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &offload); +} + +static void mall_destroy_hw_filter(struct tcf_proto *tp, + struct cls_mall_filter *f, + unsigned long cookie) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_to_netdev offload; + struct tc_cls_matchall_offload mall_offload = {0}; + + offload.type = TC_SETUP_MATCHALL; + offload.cls_mall = &mall_offload; + offload.cls_mall->command = TC_CLSMATCHALL_DESTROY; + offload.cls_mall->exts = NULL; + offload.cls_mall->cookie = cookie; + + dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &offload); +} + static bool mall_destroy(struct tcf_proto *tp, bool force) { struct cls_mall_head *head = rtnl_dereference(tp->root); + struct net_device *dev = tp->q->dev_queue->dev; + struct cls_mall_filter *f = head->filter; - if (!force && head->filter) + if (!force && f) return false; - if (head->filter) - call_rcu(&head->filter->rcu, mall_destroy_filter); + if (f) { + if (tc_should_offload(dev, tp, f->flags)) + mall_destroy_hw_filter(tp, f, (unsigned long) f); + + call_rcu(&f->rcu, mall_destroy_filter); + } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; @@ -117,8 +164,10 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, { struct cls_mall_head *head = rtnl_dereference(tp->root); struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg; + struct net_device *dev = tp->q->dev_queue->dev; struct cls_mall_filter *f; struct nlattr *tb[TCA_MATCHALL_MAX + 1]; + u32 flags = 0; int err; if (!tca[TCA_OPTIONS]) @@ -135,6 +184,12 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; + if (tb[TCA_MATCHALL_FLAGS]) { + flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]); + if (!tc_flags_valid(flags)) + return -EINVAL; + } + f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) return -ENOBUFS; @@ -144,11 +199,22 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (!handle) handle = 1; f->handle = handle; + f->flags = flags; err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); if (err) goto errout; + if (tc_should_offload(dev, tp, flags)) { + err = mall_replace_hw_filter(tp, f, (unsigned long) f); + if (err) { + if (tc_skip_sw(flags)) + goto errout; + else + err = 0; + } + } + *arg = (unsigned long) f; rcu_assign_pointer(head->filter, f); @@ -163,6 +229,10 @@ static int mall_delete(struct tcf_proto *tp, unsigned long arg) { struct cls_mall_head *head = rtnl_dereference(tp->root); struct cls_mall_filter *f = (struct cls_mall_filter *) arg; + struct net_device *dev = tp->q->dev_queue->dev; + + if (tc_should_offload(dev, tp, f->flags)) + mall_destroy_hw_filter(tp, f, (unsigned long) f); RCU_INIT_POINTER(head->filter, NULL); tcf_unbind_filter(tp, &f->res); |