summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2011-10-18 03:39:55 +0200
committerDavid S. Miller <davem@davemloft.net>2011-10-20 01:24:18 +0200
commit45db81e1590c82ddc735ccd33f8adab02528b3e3 (patch)
treec3438feab991a27b0d1a696c8118d5d28f414e03 /drivers/net/ethernet/stmicro
parentstmmac: add CHAINED descriptor mode support (V4) (diff)
downloadlinux-45db81e1590c82ddc735ccd33f8adab02528b3e3.tar.xz
linux-45db81e1590c82ddc735ccd33f8adab02528b3e3.zip
stmmac: limit max_mtu in case of 4KiB and use __netdev_alloc_skb (V2)
Problem using big mtu around 4096 bytes is you end allocating (4096 +NET_SKB_PAD + NET_IP_ALIGN + sizeof(struct skb_shared_info) bytes -> 8192 bytes : order-1 pages It's better to limit the mtu to SKB_MAX_HEAD(NET_SKB_PAD), to have no more than one page per skb. Also the patch changes the netdev_alloc_skb_ip_align() done in init_dma_desc_rings() and uses a variant allowing GFP_KERNEL allocations allowing the driver to load even in case of memory pressure. Reported-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/stmicro')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5eccd996cde0..aeaa15b451de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -474,11 +474,13 @@ static void init_dma_desc_rings(struct net_device *dev)
for (i = 0; i < rxsize; i++) {
struct dma_desc *p = priv->dma_rx + i;
- skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
+ GFP_KERNEL);
if (unlikely(skb == NULL)) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
break;
}
+ skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
bfsize, DMA_FROM_DEVICE);
@@ -1401,7 +1403,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if (priv->plat->enh_desc)
max_mtu = JUMBO_LEN;
else
- max_mtu = BUF_SIZE_4KiB;
+ max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);