summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-06-10 09:24:21 +0200
committerDavid S. Miller <davem@davemloft.net>2012-06-11 11:09:10 +0200
commitb48c80ece973e9eddb042f6685b482b261ff0d47 (patch)
tree30262edc9721ac70e423bcc8d51f558d2962b539
parentipv4: Kill ip_rt_frag_needed(). (diff)
downloadlinux-b48c80ece973e9eddb042f6685b482b261ff0d47.tar.xz
linux-b48c80ece973e9eddb042f6685b482b261ff0d47.zip
inet: Add family scope inetpeer flushes.
This implementation can deal with having many inetpeer roots, which is a necessary prerequisite for per-FIB table rooted peer tables. Each family (AF_INET, AF_INET6) has a sequence number which we bump when we get a family invalidation request. Each peer lookup cheaply checks whether the flush sequence of the root we are using is out of date, and if so flushes it and updates the sequence number. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/inetpeer.h2
-rw-r--r--net/ipv4/inetpeer.c28
-rw-r--r--net/ipv4/route.c2
3 files changed, 31 insertions, 1 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d432489e7109..e15c0862a686 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -68,6 +68,7 @@ struct inet_peer {
struct inet_peer_base {
struct inet_peer __rcu *root;
seqlock_t lock;
+ u32 flush_seq;
int total;
};
@@ -168,6 +169,7 @@ extern void inet_putpeer(struct inet_peer *p);
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
extern void inetpeer_invalidate_tree(struct inet_peer_base *);
+extern void inetpeer_invalidate_family(int family);
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e4cba56a5349..cac02ad1425d 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -86,10 +86,36 @@ void inet_peer_base_init(struct inet_peer_base *bp)
{
bp->root = peer_avl_empty_rcu;
seqlock_init(&bp->lock);
+ bp->flush_seq = ~0U;
bp->total = 0;
}
EXPORT_SYMBOL_GPL(inet_peer_base_init);
+static atomic_t v4_seq = ATOMIC_INIT(0);
+static atomic_t v6_seq = ATOMIC_INIT(0);
+
+static atomic_t *inetpeer_seq_ptr(int family)
+{
+ return (family == AF_INET ? &v4_seq : &v6_seq);
+}
+
+static inline void flush_check(struct inet_peer_base *base, int family)
+{
+ atomic_t *fp = inetpeer_seq_ptr(family);
+
+ if (unlikely(base->flush_seq != atomic_read(fp))) {
+ inetpeer_invalidate_tree(base);
+ base->flush_seq = atomic_read(fp);
+ }
+}
+
+void inetpeer_invalidate_family(int family)
+{
+ atomic_t *fp = inetpeer_seq_ptr(family);
+
+ atomic_inc(fp);
+}
+
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
/* Exported for sysctl_net_ipv4. */
@@ -437,6 +463,8 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
unsigned int sequence;
int invalidated, gccnt = 0;
+ flush_check(base, daddr->family);
+
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4f5834c4a667..456a9470fb54 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- inetpeer_invalidate_tree(net->ipv4.peers);
+ inetpeer_invalidate_family(AF_INET);
}
/*