summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSoren Brinkmann <soren.brinkmann@xilinx.com>2014-05-05 00:43:02 +0200
committerDavid S. Miller <davem@davemloft.net>2014-05-05 23:11:18 +0200
commitc8ea5a22bd3b27d68ec2f95483ce8bfe7f114933 (patch)
tree28ab536723891f977119ac51d2b1dbc00416a2ae /drivers
parentnet: macb: Remove 'unlikely' optimization (diff)
downloadlinux-c8ea5a22bd3b27d68ec2f95483ce8bfe7f114933.tar.xz
linux-c8ea5a22bd3b27d68ec2f95483ce8bfe7f114933.zip
net: macb: Fix race between HW and driver
Under "heavy" RX load, the driver cannot handle the descriptors fast enough. In detail, when a descriptor is consumed, its used flag is cleared and once the RX budget is consumed all descriptors with a cleared used flag are prepared to receive more data. Under load though, the HW may constantly receive more data and use those descriptors with a cleared used flag before they are actually prepared for next usage. The head and tail pointers into the RX-ring should always be valid and we can omit clearing and checking of the used flag. Signed-off-by: Soren Brinkmann <soren.brinkmann@xilinx.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/cadence/macb.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3e13aa31548a..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
{
unsigned int entry;
struct sk_buff *skb;
- struct macb_dma_desc *desc;
dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
- u32 addr, ctrl;
-
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
- desc = &bp->rx_ring[entry];
/* Make hw descriptor updates visible to CPU */
rmb();
- addr = desc->addr;
- ctrl = desc->ctrl;
bp->rx_prepared_head++;
- if ((addr & MACB_BIT(RX_USED)))
- continue;
-
if (bp->rx_skbuff[entry] == NULL) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
if (!(addr & MACB_BIT(RX_USED)))
break;
- desc->addr &= ~MACB_BIT(RX_USED);
bp->rx_tail++;
count++;