summaryrefslogtreecommitdiffstats
path: root/net/packet
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-12-21 08:11:44 +0100
committerDavid S. Miller <davem@davemloft.net>2011-12-23 08:15:14 +0100
commit0fd7bac6b6157eed6cf0cb86a1e88ba29e57c033 (patch)
treebcc24e9c63587bc1e8e15ad60654de9c6f72883e /net/packet
parentrps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt() (diff)
downloadlinux-0fd7bac6b6157eed6cf0cb86a1e88ba29e57c033.tar.xz
linux-0fd7bac6b6157eed6cf0cb86a1e88ba29e57c033.zip
net: relax rcvbuf limits
skb->truesize might be big even for a small packet. Its even bigger after commit 87fb4b7b533 (net: more accurate skb truesize) and big MTU. We should allow queueing at least one packet per receiver, even with a low RCVBUF setting. Reported-by: Michal Simek <monstr@monstr.eu> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r--net/packet/af_packet.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d0..3891702b81df 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
if (snaplen > res)
snaplen = res;
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize
- < (unsigned)sk->sk_rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {