diff options
author | stephen hemminger <shemminger@vyatta.com> | 2010-02-01 14:45:41 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-04 05:32:28 +0100 |
commit | 3fbd9187d004149fb8a98c9cb51ef9f4a4f66aca (patch) | |
tree | e307be7f7f8b9c6b41359c98399ea5a356da8b65 | |
parent | net: CONFIG_COMPAT redux (diff) | |
download | linux-3fbd9187d004149fb8a98c9cb51ef9f4a4f66aca.tar.xz linux-3fbd9187d004149fb8a98c9cb51ef9f4a4f66aca.zip |
sky2: hand receive DMA mapping failures
If receive buffer mapping failed, then it was possible to get
stuck with unmapped receive buffer in DMA ring.
This would be an extremely rare condition because the driver had just
released the map for the last receive so it should be able to get
another map again (in soft-irq).
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/sky2.c | 61 |
1 files changed, 42 insertions, 19 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a6ddfc1a9cb2..72c92495a9b3 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1103,18 +1103,39 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, int i; re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(pdev, re->data_addr))) - return -EIO; + if (pci_dma_mapping_error(pdev, re->data_addr)) + goto mapping_error; pci_unmap_len_set(re, data_size, size); - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - re->frag_addr[i] = pci_map_page(pdev, - skb_shinfo(skb)->frags[i].page, - skb_shinfo(skb)->frags[i].page_offset, - skb_shinfo(skb)->frags[i].size, + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + re->frag_addr[i] = pci_map_page(pdev, frag->page, + frag->page_offset, + frag->size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, re->frag_addr[i])) + goto map_page_error; + } return 0; + +map_page_error: + while (--i >= 0) { + pci_unmap_page(pdev, re->frag_addr[i], + skb_shinfo(skb)->frags[i].size, + PCI_DMA_FROMDEVICE); + } + + pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size), + PCI_DMA_FROMDEVICE); + +mapping_error: + if (net_ratelimit()) + dev_warn(&pdev->dev, "%s: rx mapping error\n", + skb->dev->name); + return -EIO; } static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) @@ -2306,30 +2327,32 @@ static struct sk_buff *receive_new(struct sky2_port *sky2, struct rx_ring_info *re, unsigned int length) { - struct sk_buff *skb, *nskb; + struct sk_buff *skb; + struct rx_ring_info nre; unsigned hdr_space = sky2->rx_data_size; - /* Don't be tricky about reusing pages (yet) */ - nskb = sky2_rx_alloc(sky2); - if (unlikely(!nskb)) - return NULL; + nre.skb = sky2_rx_alloc(sky2); + if (unlikely(!nre.skb)) + goto nobuf; + + if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) + goto nomap; skb = re->skb; sky2_rx_unmap_skb(sky2->hw->pdev, re); - prefetch(skb->data); - re->skb = nskb; - if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) { - dev_kfree_skb(nskb); - re->skb = skb; - return NULL; - } + *re = nre; if (skb_shinfo(skb)->nr_frags) skb_put_frags(skb, hdr_space, length); else skb_put(skb, length); return skb; + +nomap: + dev_kfree_skb(nre.skb); +nobuf: + return NULL; } /* |