summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-05-04 22:35:31 +0200
committerDavid S. Miller <davem@davemloft.net>2016-05-04 22:35:31 +0200
commit32b583a0cb9b757d68c44f2819fa6ccf95dbb953 (patch)
tree9715bb206a1dee466c854ce86e164aadef105aba /net/core
parentnet: fix infoleak in rtnetlink (diff)
parentvti: Add pmtu handling to vti_xmit. (diff)
downloadlinux-32b583a0cb9b757d68c44f2819fa6ccf95dbb953.tar.xz
linux-32b583a0cb9b757d68c44f2819fa6ccf95dbb953.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
Steffen Klassert says: ==================== pull request (net): ipsec 2016-05-04 1) The flowcache can hit an OOM condition if too many entries are in the gc_list. Fix this by counting the entries in the gc_list and refuse new allocations if the value is too high. 2) The inner headers are invalid after a xfrm transformation, so reset the skb encapsulation field to ensure nobody tries access the inner headers. Otherwise tunnel devices stacked on top of xfrm may build the outer headers based on wrong informations. 3) Add pmtu handling to vti, we need it to report pmtu informations for local generated packets. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/flow.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 1033725be40b..3937b1b68d5b 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
spin_unlock_bh(&xfrm->flow_cache_gc_lock);
- list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+ list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
flow_entry_kill(fce, xfrm);
+ atomic_dec(&xfrm->flow_cache_gc_count);
+ WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
+ }
}
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
struct netns_xfrm *xfrm)
{
if (deleted) {
+ atomic_add(deleted, &xfrm->flow_cache_gc_count);
fcp->hash_count -= deleted;
spin_lock_bh(&xfrm->flow_cache_gc_lock);
list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp);
+ if (fcp->hash_count > 2 * fc->high_watermark ||
+ atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+ atomic_inc(&net->xfrm.flow_cache_genid);
+ flo = ERR_PTR(-ENOBUFS);
+ goto ret_object;
+ }
+
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
fle->net = net;
@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
mutex_init(&net->xfrm.flow_flush_sem);
+ atomic_set(&net->xfrm.flow_cache_gc_count, 0);
fc->hash_shift = 10;
fc->low_watermark = 2 * flow_cache_hash_size(fc);