diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2009-01-09 12:03:44 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-11 09:07:32 +0100 |
commit | 001eb84bbf7205f8cc541a75364a6a0892b5d0a2 (patch) | |
tree | bcd1265262417bd61b6f4b37fada2fca80c13b48 /drivers/net/forcedeth.c | |
parent | netdev: missing validate_address hooks (diff) | |
download | linux-001eb84bbf7205f8cc541a75364a6a0892b5d0a2.tar.xz linux-001eb84bbf7205f8cc541a75364a6a0892b5d0a2.zip |
forcedeth: xmit lock fix
This patch fixes a potential race condition between xmit thread and xmit
completion thread. The calculation of empty tx descriptors is not
performed under the lock. This could cause it to set the stop flag while
the completion thread finishes all tx's. This will result in the tx
queue in stopped state and no one to wake it up.
Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r-- | drivers/net/forcedeth.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 5b68dc20168d..6905ec9467df 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -2096,14 +2096,15 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); } + spin_lock_irqsave(&np->lock, flags); empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { - spin_lock_irqsave(&np->lock, flags); netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irqrestore(&np->lock, flags); return NETDEV_TX_BUSY; } + spin_unlock_irqrestore(&np->lock, flags); start_tx = put_tx = np->put_tx.orig; @@ -2214,14 +2215,15 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); } + spin_lock_irqsave(&np->lock, flags); empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { - spin_lock_irqsave(&np->lock, flags); netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irqrestore(&np->lock, flags); return NETDEV_TX_BUSY; } + spin_unlock_irqrestore(&np->lock, flags); start_tx = put_tx = np->put_tx.ex; start_tx_ctx = np->put_tx_ctx; |