diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2016-11-28 12:22:12 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-30 01:48:38 +0100 |
commit | 707693c8a498697aa8db240b93eb76ec62e30892 (patch) | |
tree | a9ee8933c07cfc86afc74436d39a26c8beaff683 /net/netlink | |
parent | net/sched: pedit: make sure that offset is valid (diff) | |
download | linux-707693c8a498697aa8db240b93eb76ec62e30892.tar.xz linux-707693c8a498697aa8db240b93eb76ec62e30892.zip |
netlink: Call cb->done from a worker thread
The cb->done interface expects to be called in process context.
This was broken by the netlink RCU conversion. This patch fixes
it by adding a worker struct to make the cb->done call where
necessary.
Fixes: 21e4902aea80 ("netlink: Lockless lookup with RCU grace...")
Reported-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink')
-rw-r--r-- | net/netlink/af_netlink.c | 27 | ||||
-rw-r--r-- | net/netlink/af_netlink.h | 2 |
2 files changed, 25 insertions, 4 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 62bea4591054..602e5ebe9db3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } -static void netlink_sock_destruct(struct sock *sk) +static void __netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->cb_running) { - if (nlk->cb.done) - nlk->cb.done(&nlk->cb); - module_put(nlk->cb.module); kfree_skb(nlk->cb.skb); } @@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk) WARN_ON(nlk_sk(sk)->groups); } +static void netlink_sock_destruct_work(struct work_struct *work) +{ + struct netlink_sock *nlk = container_of(work, struct netlink_sock, + work); + + nlk->cb.done(&nlk->cb); + __netlink_sock_destruct(&nlk->sk); +} + +static void netlink_sock_destruct(struct sock *sk) +{ + struct netlink_sock *nlk = nlk_sk(sk); + + if (nlk->cb_running && nlk->cb.done) { + INIT_WORK(&nlk->work, netlink_sock_destruct_work); + schedule_work(&nlk->work); + return; + } + + __netlink_sock_destruct(sk); +} + /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 3cfd6cc60504..4fdb38318977 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -3,6 +3,7 @@ #include <linux/rhashtable.h> #include <linux/atomic.h> +#include <linux/workqueue.h> #include <net/sock.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) @@ -33,6 +34,7 @@ struct netlink_sock { struct rhash_head node; struct rcu_head rcu; + struct work_struct work; }; static inline struct netlink_sock *nlk_sk(struct sock *sk) |