summaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2015-09-15 23:30:09 +0200
committerDavid S. Miller <davem@davemloft.net>2015-09-15 23:53:05 +0200
commit70da5b5c532f0ec8aa76b4f46158da5f010f34b3 (patch)
tree11b8e0054461b9788d669215416655f112545670 /net/ipv6
parentipv6: Avoid double dst_free (diff)
downloadlinux-70da5b5c532f0ec8aa76b4f46158da5f010f34b3.tar.xz
linux-70da5b5c532f0ec8aa76b4f46158da5f010f34b3.zip
ipv6: Replace spinlock with seqlock and rcu in ip6_tunnel
This patch uses a seqlock to ensure consistency between idst->dst and idst->cookie. It also makes dst freeing from fib tree to undergo a rcu grace period. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/ip6_tunnel.c51
2 files changed, 34 insertions, 26 deletions
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e68350bf838b..8a9ec01f4d01 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
kmem_cache_free(fib6_node_kmem, fn);
}
+static void rt6_rcu_free(struct rt6_info *rt)
+{
+ call_rcu(&rt->dst.rcu_head, dst_rcu_free);
+}
+
static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{
int cpu;
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
pcpu_rt = *ppcpu_rt;
if (pcpu_rt) {
- dst_free(&pcpu_rt->dst);
+ rt6_rcu_free(pcpu_rt);
*ppcpu_rt = NULL;
}
}
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
{
if (atomic_dec_and_test(&rt->rt6i_ref)) {
rt6_free_pcpu(rt);
- dst_free(&rt->dst);
+ rt6_rcu_free(rt);
}
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 851cf6d1eb45..983f0d20f96d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,45 +126,48 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
* Locking : hash tables are protected by RCU and RTNL
*/
-static void __ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
- struct dst_entry *dst)
+static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
+ struct dst_entry *dst)
{
- dst_release(idst->dst);
+ write_seqlock_bh(&idst->lock);
+ dst_release(rcu_dereference_protected(
+ idst->dst,
+ lockdep_is_held(&idst->lock.lock)));
if (dst) {
dst_hold(dst);
idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
} else {
idst->cookie = 0;
}
- idst->dst = dst;
-}
-
-static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
- struct dst_entry *dst)
-{
-
- spin_lock_bh(&idst->lock);
- __ip6_tnl_per_cpu_dst_set(idst, dst);
- spin_unlock_bh(&idst->lock);
+ rcu_assign_pointer(idst->dst, dst);
+ write_sequnlock_bh(&idst->lock);
}
struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
{
struct ip6_tnl_dst *idst;
struct dst_entry *dst;
+ unsigned int seq;
+ u32 cookie;
idst = raw_cpu_ptr(t->dst_cache);
- spin_lock_bh(&idst->lock);
- dst = idst->dst;
- if (dst) {
- if (!dst->obsolete || dst->ops->check(dst, idst->cookie)) {
- dst_hold(idst->dst);
- } else {
- __ip6_tnl_per_cpu_dst_set(idst, NULL);
- dst = NULL;
- }
+
+ rcu_read_lock();
+ do {
+ seq = read_seqbegin(&idst->lock);
+ dst = rcu_dereference(idst->dst);
+ cookie = idst->cookie;
+ } while (read_seqretry(&idst->lock, seq));
+
+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+ dst = NULL;
+ rcu_read_unlock();
+
+ if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
+ ip6_tnl_per_cpu_dst_set(idst, NULL);
+ dst_release(dst);
+ dst = NULL;
}
- spin_unlock_bh(&idst->lock);
return dst;
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
@@ -204,7 +207,7 @@ int ip6_tnl_dst_init(struct ip6_tnl *t)
return -ENOMEM;
for_each_possible_cpu(i)
- spin_lock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
+ seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
return 0;
}