summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorVlad Buslov <vladbu@mellanox.com>2018-08-10 19:51:49 +0200
committerDavid S. Miller <davem@davemloft.net>2018-08-11 21:37:09 +0200
commitc8814552fe51358f5fc46bc1c4aa4bb68454f4eb (patch)
tree71af6a057faa64a3c469f9491320e50df0697486 /net
parentnet: sched: act_simple: remove dependency on rtnl lock (diff)
downloadlinux-c8814552fe51358f5fc46bc1c4aa4bb68454f4eb.tar.xz
linux-c8814552fe51358f5fc46bc1c4aa4bb68454f4eb.zip
net: sched: act_skbmod: remove dependency on rtnl lock
Move read of skbmod_p rcu pointer to be protected by tcf spinlock. Use tcf spinlock to protect private skbmod data from concurrent modification during dump. Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/act_skbmod.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index c437c6d51a71..e9c86ade3b40 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -156,7 +156,6 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
d = to_skbmod(*a);
- ASSERT_RTNL();
p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
if (unlikely(!p)) {
tcf_idr_release(*a, bind);
@@ -166,10 +165,10 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
p->flags = lflags;
d->tcf_action = parm->action;
- p_old = rtnl_dereference(d->skbmod_p);
-
if (ovr)
spin_lock_bh(&d->tcf_lock);
+ /* Protected by tcf_lock if overwriting existing action. */
+ p_old = rcu_dereference_protected(d->skbmod_p, 1);
if (lflags & SKBMOD_F_DMAC)
ether_addr_copy(p->eth_dst, daddr);
@@ -205,15 +204,18 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_skbmod *d = to_skbmod(a);
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p);
+ struct tcf_skbmod_params *p;
struct tc_skbmod opt = {
.index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
- .action = d->tcf_action,
};
struct tcf_t t;
+ spin_lock_bh(&d->tcf_lock);
+ opt.action = d->tcf_action;
+ p = rcu_dereference_protected(d->skbmod_p,
+ lockdep_is_held(&d->tcf_lock));
opt.flags = p->flags;
if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -231,8 +233,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&d->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}