diff options
-rw-r--r-- | drivers/net/ixgb/ixgb.h | 1 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_ethtool.c | 1 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_hw.c | 3 | ||||
-rw-r--r-- | drivers/net/ixgb/ixgb_main.c | 57 |
4 files changed, 55 insertions, 7 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 50ffe90488ff..f4aba4355b19 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -171,6 +171,7 @@ struct ixgb_adapter { /* TX */ struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; + unsigned int restart_queue; unsigned long timeo_start; uint32_t tx_cmd_type; uint64_t hw_csum_tx_good; diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index cd22523fb035..82c044d6e08a 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, {"tx_deferred_ok", IXGB_STAT(stats.dc)}, {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, + {"tx_restart_queue", IXGB_STAT(restart_queue) }, {"rx_long_length_errors", IXGB_STAT(stats.roc)}, {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, #ifdef NETIF_F_TSO diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 02089b64e42c..ecbf45861c68 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c @@ -399,8 +399,9 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw) /* Zero out the other 15 receive addresses. */ DEBUGOUT("Clearing RAR[1-15]\n"); for(i = 1; i < IXGB_RAR_ENTRIES; i++) { - IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + /* Write high reg first to disable the AV bit first */ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); } return; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index e628126c9c49..a083a9189230 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; #else #define DRIVERNAPI "-NAPI" #endif -#define DRV_VERSION "1.0.117-k2"DRIVERNAPI +#define DRV_VERSION "1.0.126-k2"DRIVERNAPI char ixgb_driver_version[] = DRV_VERSION; static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, struct ixgb_buffer *buffer_info; int len = skb->len; unsigned int offset = 0, size, count = 0, i; + unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; @@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, while(len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, IXGB_MAX_DATA_PER_TXD); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + buffer_info->length = size; WARN_ON(buffer_info->dma != 0); buffer_info->dma = @@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, while(len) { buffer_info = &tx_ring->buffer_info[i]; size = min(len, IXGB_MAX_DATA_PER_TXD); + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && !nr_frags && size == len + && size > 8)) + size -= 4; + buffer_info->length = size; buffer_info->dma = pci_map_page(adapter->pdev, @@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) IXGB_WRITE_REG(&adapter->hw, TDT, i); } +static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int ixgb_maybe_stop_tx(struct net_device *netdev, + struct ixgb_desc_ring *tx_ring, int size) +{ + if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __ixgb_maybe_stop_tx(netdev, size); +} + + /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) -#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 +#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ + + 1 /* one more needed for sentinel TSO workaround */ static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) @@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) spin_lock_irqsave(&adapter->tx_lock, flags); #endif - if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { + if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, + DESC_NEEDED))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; @@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) #ifdef NETIF_F_LLTX /* Make sure there is space in the ring for the next send. */ - if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) - netif_stop_queue(netdev); + ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); spin_unlock_irqrestore(&adapter->tx_lock, flags); |