summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-08-28 05:49:34 +0200
committerDavid S. Miller <davem@davemloft.net>2014-08-30 05:28:23 +0200
commitd9b2938aabf757da2d40153489b251d4fc3fdd18 (patch)
tree6916bac985be5ffe9d282a94cd55bf2a754bdf53 /net
parentMerge branch 'mlx4-net' (diff)
downloadlinux-d9b2938aabf757da2d40153489b251d4fc3fdd18.tar.xz
linux-d9b2938aabf757da2d40153489b251d4fc3fdd18.zip
net: attempt a single high order allocation
In commit ed98df3361f0 ("net: use __GFP_NORETRY for high order allocations") we tried to address one issue caused by order-3 allocations. We still observe high latencies and system overhead in situations where compaction is not successful. Instead of trying order-3, order-2, and order-1, do a single order-3 best effort and immediately fallback to plain order-0. This mimics slub strategy to fallback to slab min order if the high order allocation used for performance failed. Order-3 allocations give a performance boost only if they can be done without recurring and expensive memory scan. Quoting David : The page allocator relies on synchronous (sync light) memory compaction after direct reclaim for allocations that don't retry and deferred compaction doesn't work with this strategy because the allocation order is always decreasing from the previous failed attempt. This means sync light compaction will always be encountered if memory cannot be defragmented or reclaimed several times during the skb_page_frag_refill() iteration. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 2714811afbd8..29870571c42f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
order);
if (page)
goto fill_page;
+ /* Do not retry other high order allocations */
+ order = 1;
+ max_page_order = 0;
}
order--;
}
@@ -1869,10 +1872,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
* no guarantee that allocations succeed. Therefore, @sz MUST be
* less or equal than PAGE_SIZE.
*/
-bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
{
- int order;
-
if (pfrag->page) {
if (atomic_read(&pfrag->page->_count) == 1) {
pfrag->offset = 0;
@@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
put_page(pfrag->page);
}
- order = SKB_FRAG_PAGE_ORDER;
- do {
- gfp_t gfp = prio;
-
- if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
- pfrag->page = alloc_pages(gfp, order);
+ pfrag->offset = 0;
+ if (SKB_FRAG_PAGE_ORDER) {
+ pfrag->page = alloc_pages(gfp | __GFP_COMP |
+ __GFP_NOWARN | __GFP_NORETRY,
+ SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
- pfrag->offset = 0;
- pfrag->size = PAGE_SIZE << order;
+ pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
return true;
}
- } while (--order >= 0);
-
+ }
+ pfrag->page = alloc_page(gfp);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE;
+ return true;
+ }
return false;
}
EXPORT_SYMBOL(skb_page_frag_refill);