summaryrefslogtreecommitdiffstats
path: root/net/sched/act_skbedit.c
diff options
context:
space:
mode:
authorDavide Caratti <dcaratti@redhat.com>2018-07-11 16:04:50 +0200
committerDavid S. Miller <davem@davemloft.net>2018-07-12 23:54:12 +0200
commitc749cdda9089eb1fdb6a9ab98f945124d12f2595 (patch)
tree29ecf38f5595632596fd00c7f8b47c52e176aed6 /net/sched/act_skbedit.c
parentnet/sched: skbedit: use per-cpu counters (diff)
downloadlinux-c749cdda9089eb1fdb6a9ab98f945124d12f2595.tar.xz
linux-c749cdda9089eb1fdb6a9ab98f945124d12f2595.zip
net/sched: act_skbedit: don't use spinlock in the data path
use RCU instead of spin_{,un}lock_bh, to protect concurrent read/write on act_skbedit configuration. This reduces the effects of contention in the data path, in case multiple readers are present. Signed-off-by: Davide Caratti <dcaratti@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/act_skbedit.c')
-rw-r--r--net/sched/act_skbedit.c107
1 files changed, 68 insertions, 39 deletions
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 8651b5bd6b59..da56e6938c9e 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -37,14 +37,19 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_skbedit_params *params;
+ int action;
tcf_lastuse_update(&d->tcf_tm);
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
- spin_lock(&d->tcf_lock);
- if (d->flags & SKBEDIT_F_PRIORITY)
- skb->priority = d->priority;
- if (d->flags & SKBEDIT_F_INHERITDSFIELD) {
+ rcu_read_lock();
+ params = rcu_dereference(d->params);
+ action = READ_ONCE(d->tcf_action);
+
+ if (params->flags & SKBEDIT_F_PRIORITY)
+ skb->priority = params->priority;
+ if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
int wlen = skb_network_offset(skb);
switch (tc_skb_protocol(skb)) {
@@ -63,23 +68,23 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
break;
}
}
- if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
- skb->dev->real_num_tx_queues > d->queue_mapping)
- skb_set_queue_mapping(skb, d->queue_mapping);
- if (d->flags & SKBEDIT_F_MARK) {
- skb->mark &= ~d->mask;
- skb->mark |= d->mark & d->mask;
+ if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
+ skb->dev->real_num_tx_queues > params->queue_mapping)
+ skb_set_queue_mapping(skb, params->queue_mapping);
+ if (params->flags & SKBEDIT_F_MARK) {
+ skb->mark &= ~params->mask;
+ skb->mark |= params->mark & params->mask;
}
- if (d->flags & SKBEDIT_F_PTYPE)
- skb->pkt_type = d->ptype;
-
- spin_unlock(&d->tcf_lock);
- return d->tcf_action;
+ if (params->flags & SKBEDIT_F_PTYPE)
+ skb->pkt_type = params->ptype;
+unlock:
+ rcu_read_unlock();
+ return action;
err:
- spin_unlock(&d->tcf_lock);
qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
- return TC_ACT_SHOT;
+ action = TC_ACT_SHOT;
+ goto unlock;
}
static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
@@ -98,6 +103,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+ struct tcf_skbedit_params *params_old, *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
struct tcf_skbedit *d;
@@ -185,25 +191,34 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
}
- spin_lock_bh(&d->tcf_lock);
+ ASSERT_RTNL();
- d->flags = flags;
+ params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+ if (unlikely(!params_new)) {
+ if (ret == ACT_P_CREATED)
+ tcf_idr_release(*a, bind);
+ return -ENOMEM;
+ }
+
+ params_new->flags = flags;
if (flags & SKBEDIT_F_PRIORITY)
- d->priority = *priority;
+ params_new->priority = *priority;
if (flags & SKBEDIT_F_QUEUE_MAPPING)
- d->queue_mapping = *queue_mapping;
+ params_new->queue_mapping = *queue_mapping;
if (flags & SKBEDIT_F_MARK)
- d->mark = *mark;
+ params_new->mark = *mark;
if (flags & SKBEDIT_F_PTYPE)
- d->ptype = *ptype;
+ params_new->ptype = *ptype;
/* default behaviour is to use all the bits */
- d->mask = 0xffffffff;
+ params_new->mask = 0xffffffff;
if (flags & SKBEDIT_F_MASK)
- d->mask = *mask;
+ params_new->mask = *mask;
d->tcf_action = parm->action;
-
- spin_unlock_bh(&d->tcf_lock);
+ params_old = rtnl_dereference(d->params);
+ rcu_assign_pointer(d->params, params_new);
+ if (params_old)
+ kfree_rcu(params_old, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -215,33 +230,36 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_skbedit_params *params;
struct tc_skbedit opt = {
.index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
.action = d->tcf_action,
};
- struct tcf_t t;
u64 pure_flags = 0;
+ struct tcf_t t;
+
+ params = rtnl_dereference(d->params);
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_PRIORITY) &&
- nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
+ if ((params->flags & SKBEDIT_F_PRIORITY) &&
+ nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
- nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
+ if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+ nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_MARK) &&
- nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
+ if ((params->flags & SKBEDIT_F_MARK) &&
+ nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_PTYPE) &&
- nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
+ if ((params->flags & SKBEDIT_F_PTYPE) &&
+ nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
goto nla_put_failure;
- if ((d->flags & SKBEDIT_F_MASK) &&
- nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask))
+ if ((params->flags & SKBEDIT_F_MASK) &&
+ nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
goto nla_put_failure;
- if (d->flags & SKBEDIT_F_INHERITDSFIELD)
+ if (params->flags & SKBEDIT_F_INHERITDSFIELD)
pure_flags |= SKBEDIT_F_INHERITDSFIELD;
if (pure_flags != 0 &&
nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
@@ -257,6 +275,16 @@ nla_put_failure:
return -1;
}
+static void tcf_skbedit_cleanup(struct tc_action *a)
+{
+ struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_skbedit_params *params;
+
+ params = rcu_dereference_protected(d->params, 1);
+ if (params)
+ kfree_rcu(params, rcu);
+}
+
static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
struct netlink_callback *cb, int type,
const struct tc_action_ops *ops,
@@ -289,6 +317,7 @@ static struct tc_action_ops act_skbedit_ops = {
.act = tcf_skbedit,
.dump = tcf_skbedit_dump,
.init = tcf_skbedit_init,
+ .cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
.lookup = tcf_skbedit_search,
.delete = tcf_skbedit_delete,