summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiping Zhang <liping.zhang@spreadtrum.com>2016-07-03 07:18:45 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2016-07-11 11:44:34 +0200
commit8786a9716d028083f56f944996883f7d1a05919e (patch)
tree7c2691ec8ade17c7e90ff1d8a3a99bc23f35861e
parentnetfilter: cttimeout: unlink timeout obj again when hash resize happen (diff)
downloadlinux-8786a9716d028083f56f944996883f7d1a05919e.tar.xz
linux-8786a9716d028083f56f944996883f7d1a05919e.zip
netfilter: nf_ct_helper: unlink helper again when hash resize happen
From: Liping Zhang <liping.zhang@spreadtrum.com> Similar to ctnl_untimeout, when hash resize happened, we should try to do unhelp from the 0# bucket again. Signed-off-by: Liping Zhang <liping.zhang@spreadtrum.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r--net/netfilter/nf_conntrack_helper.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 3a1a88b9bafa..a4294e949cdc 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -409,6 +409,8 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
struct nf_conntrack_expect *exp;
const struct hlist_node *next;
const struct hlist_nulls_node *nn;
+ unsigned int last_hsize;
+ spinlock_t *lock;
struct net *net;
unsigned int i;
@@ -446,13 +448,18 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
rtnl_unlock();
local_bh_disable();
- for (i = 0; i < nf_conntrack_htable_size; i++) {
- nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
- if (i < nf_conntrack_htable_size) {
- hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
- unhelp(h, me);
+restart:
+ last_hsize = nf_conntrack_htable_size;
+ for (i = 0; i < last_hsize; i++) {
+ lock = &nf_conntrack_locks[i % CONNTRACK_LOCKS];
+ nf_conntrack_lock(lock);
+ if (last_hsize != nf_conntrack_htable_size) {
+ spin_unlock(lock);
+ goto restart;
}
- spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+ hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
+ unhelp(h, me);
+ spin_unlock(lock);
}
local_bh_enable();
}