diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2010-05-21 21:27:26 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-05-21 21:27:26 +0200 |
commit | ee9a3607fb03e804ddf624544105f4e34260c380 (patch) | |
tree | ce41b6e0fa10982a306f6c142a92dbf3c9961284 /drivers/net/e1000/e1000_main.c | |
parent | pipe: set lower and upper limit on max pages in the pipe page array (diff) | |
parent | Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jac... (diff) | |
download | linux-ee9a3607fb03e804ddf624544105f4e34260c380.tar.xz linux-ee9a3607fb03e804ddf624544105f4e34260c380.zip |
Merge branch 'master' into for-2.6.35
Conflicts:
fs/ext3/fsync.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 429 |
1 files changed, 225 insertions, 204 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index b15ece26ed84..ebdea0891665 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k5-NAPI" +#define DRV_VERSION "7.3.21-k6-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -214,6 +214,17 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /** + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** * e1000_init_module - Driver Registration Routine * * e1000_init_module is the first routine called when the driver is @@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int __init e1000_init_module(void) { int ret; - printk(KERN_INFO "%s - version %s\n", - e1000_driver_string, e1000_driver_version); + pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); - printk(KERN_INFO "%s\n", e1000_copyright); + pr_info("%s\n", e1000_copyright); ret = pci_register_driver(&e1000_driver); if (copybreak != COPYBREAK_DEFAULT) { if (copybreak == 0) - printk(KERN_INFO "e1000: copybreak disabled\n"); + pr_info("copybreak disabled\n"); else - printk(KERN_INFO "e1000: copybreak enabled for " - "packets <= %u bytes\n", copybreak); + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); } return ret; } @@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, netdev); if (err) { - DPRINTK(PROBE, ERR, - "Unable to allocate interrupt Error: %d\n", err); + e_err("Unable to allocate interrupt Error: %d\n", err); } return err; @@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter) ew32(WUC, 0); if (e1000_init_hw(hw)) - DPRINTK(PROBE, ERR, "Hardware Error\n"); + e_err("Hardware Error\n"); e1000_update_mng_vlan(adapter); /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ @@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) data = kmalloc(eeprom.len, GFP_KERNEL); if (!data) { - printk(KERN_ERR "Unable to allocate memory to dump EEPROM" - " data\n"); + pr_err("Unable to allocate memory to dump EEPROM data\n"); return; } @@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) csum_new += data[i] + (data[i + 1] << 8); csum_new = EEPROM_SUM - csum_new; - printk(KERN_ERR "/*********************/\n"); - printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old); - printk(KERN_ERR "Calculated : 0x%04x\n", csum_new); + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); - printk(KERN_ERR "Offset Values\n"); - printk(KERN_ERR "======== ======\n"); + pr_err("Offset Values\n"); + pr_err("======== ======\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); - printk(KERN_ERR "Include this output when contacting your support " - "provider.\n"); - printk(KERN_ERR "This is not a software error! Something bad " - "happened to your hardware or\n"); - printk(KERN_ERR "EEPROM image. Ignoring this " - "problem could result in further problems,\n"); - printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n"); - printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, " - "which is invalid\n"); - printk(KERN_ERR "and requires you to set the proper MAC " - "address manually before continuing\n"); - printk(KERN_ERR "to enable this network device.\n"); - printk(KERN_ERR "Please inspect the EEPROM dump and report the issue " - "to your hardware vendor\n"); - printk(KERN_ERR "or Intel Customer Support.\n"); - printk(KERN_ERR "/*********************/\n"); + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); kfree(data); } @@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) return err; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (err) { - E1000_ERR("No usable DMA configuration, " - "aborting\n"); + pr_err("No usable DMA config, aborting\n"); goto err_dma; } } @@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* initialize eeprom parameters */ if (e1000_init_eeprom_params(hw)) { - E1000_ERR("EEPROM initialization failed\n"); + e_err("EEPROM initialization failed\n"); goto err_eeprom; } @@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* make sure the EEPROM is good */ if (e1000_validate_eeprom_checksum(hw) < 0) { - DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); + e_err("The EEPROM Checksum Is Not Valid\n"); e1000_dump_eeprom(adapter); /* * set MAC address to all zeroes to invalidate and temporary @@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } else { /* copy the MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw)) - DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); + e_err("EEPROM Read Error\n"); } /* don't block initalization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) - DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); + e_err("Invalid MAC Address\n"); e1000_get_bus_info(hw); @@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->wol = adapter->eeprom_wol; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + /* print bus type/speed/width info */ - DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", + e_info("(PCI%s:%s:%s) ", ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : @@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev, (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); - printk("%pM\n", netdev->dev_addr); - - /* reset the hardware with the new settings */ - e1000_reset(adapter); - - strcpy(netdev->name, "eth%d"); - err = register_netdev(netdev); - if (err) - goto err_register; + e_info("%pM\n", netdev->dev_addr); /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); + e_info("Intel(R) PRO/1000 Network Connection\n"); cards_found++; return 0; @@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) /* identify the MAC */ if (e1000_set_mac_type(hw)) { - DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); + e_err("Unknown MAC Type\n"); return -EIO; } @@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->num_rx_queues = 1; if (e1000_alloc_queues(adapter)) { - DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + e_err("Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * txdr->count; txdr->buffer_info = vmalloc(size); if (!txdr->buffer_info) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the transmit descriptor ring\n"); + e_err("Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } memset(txdr->buffer_info, 0, size); @@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, txdr->size = txdr->count * sizeof(struct e1000_tx_desc); txdr->size = ALIGN(txdr->size, 4096); - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); if (!txdr->desc) { setup_tx_desc_die: vfree(txdr->buffer_info); - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the transmit descriptor ring\n"); + e_err("Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } @@ -1408,29 +1410,32 @@ setup_tx_desc_die: if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { void *olddesc = txdr->desc; dma_addr_t olddma = txdr->dma; - DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " - "at %p\n", txdr->size, txdr->desc); + e_err("txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); /* Try again, without freeing the previous */ - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); /* Failed allocation, critical failure */ if (!txdr->desc) { - pci_free_consistent(pdev, txdr->size, olddesc, olddma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); goto setup_tx_desc_die; } if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { /* give up */ - pci_free_consistent(pdev, txdr->size, txdr->desc, - txdr->dma); - pci_free_consistent(pdev, txdr->size, olddesc, olddma); - DPRINTK(PROBE, ERR, - "Unable to allocate aligned memory " - "for the transmit descriptor ring\n"); + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err("Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); vfree(txdr->buffer_info); return -ENOMEM; } else { /* Free old allocation, new allocation was successful */ - pci_free_consistent(pdev, txdr->size, olddesc, olddma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); } } memset(txdr->desc, 0, txdr->size); @@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (err) { - DPRINTK(PROBE, ERR, - "Allocation for Tx Queue %u failed\n", i); + e_err("Allocation for Tx Queue %u failed\n", i); for (i-- ; i >= 0; i--) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); @@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, size = sizeof(struct e1000_buffer) * rxdr->count; rxdr->buffer_info = vmalloc(size); if (!rxdr->buffer_info) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); + e_err("Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } memset(rxdr->buffer_info, 0, size); @@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, rxdr->size = rxdr->count * desc_len; rxdr->size = ALIGN(rxdr->size, 4096); - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); if (!rxdr->desc) { - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); + e_err("Unable to allocate memory for the Rx descriptor ring\n"); setup_rx_desc_die: vfree(rxdr->buffer_info); return -ENOMEM; @@ -1604,31 +1607,33 @@ setup_rx_desc_die: if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { void *olddesc = rxdr->desc; dma_addr_t olddma = rxdr->dma; - DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " - "at %p\n", rxdr->size, rxdr->desc); + e_err("rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); /* Try again, without freeing the previous */ - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); /* Failed allocation, critical failure */ if (!rxdr->desc) { - pci_free_consistent(pdev, rxdr->size, olddesc, olddma); - DPRINTK(PROBE, ERR, - "Unable to allocate memory " - "for the receive descriptor ring\n"); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err("Unable to allocate memory for the Rx descriptor " + "ring\n"); goto setup_rx_desc_die; } if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { /* give up */ - pci_free_consistent(pdev, rxdr->size, rxdr->desc, - rxdr->dma); - pci_free_consistent(pdev, rxdr->size, olddesc, olddma); - DPRINTK(PROBE, ERR, - "Unable to allocate aligned memory " - "for the receive descriptor ring\n"); + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err("Unable to allocate aligned memory for the Rx " + "descriptor ring\n"); goto setup_rx_desc_die; } else { /* Free old allocation, new allocation was successful */ - pci_free_consistent(pdev, rxdr->size, olddesc, olddma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); } } memset(rxdr->desc, 0, rxdr->size); @@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (err) { - DPRINTK(PROBE, ERR, - "Allocation for Rx Queue %u failed\n", i); + e_err("Allocation for Rx Queue %u failed\n", i); for (i-- ; i >= 0; i--) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); @@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter, vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); tx_ring->desc = NULL; } @@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, { if (buffer_info->dma) { if (buffer_info->mapped_as_page) - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); else - pci_unmap_single(adapter->pdev, buffer_info->dma, + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { @@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter, vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); rx_ring->desc = NULL; } @@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma && adapter->clean_rx == e1000_clean_rx_irq) { - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, buffer_info->length, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } else if (buffer_info->dma && adapter->clean_rx == e1000_clean_jumbo_rx_irq) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); } buffer_info->dma = 0; @@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; bool use_uc = false; - struct dev_addr_list *mc_ptr; u32 rctl; u32 hash_value; int i, rar_entries = E1000_RAR_ENTRIES; @@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); if (!mcarray) { - DPRINTK(PROBE, ERR, "memory allocation failed\n"); + e_err("memory allocation failed\n"); return; } @@ -2156,19 +2161,17 @@ static void e1000_set_rx_mode(struct net_device *netdev) e1000_rar_set(hw, ha->addr, i++); } - WARN_ON(i == rar_entries); - - netdev_for_each_mc_addr(mc_ptr, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == rar_entries) { /* load any remaining addresses into the hash table */ u32 hash_reg, hash_bit, mta; - hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); + hash_value = e1000_hash_mc_addr(hw, ha->addr); hash_reg = (hash_value >> 5) & 0x7F; hash_bit = hash_value & 0x1F; mta = (1 << hash_bit); mcarray[hash_reg] |= mta; } else { - e1000_rar_set(hw, mc_ptr->da_addr, i++); + e1000_rar_set(hw, ha->addr, i++); } } @@ -2302,16 +2305,16 @@ static void e1000_watchdog(unsigned long data) &adapter->link_duplex); ctrl = er32(CTRL); - printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, " - "Flow Control: %s\n", - netdev->name, - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & - E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & - E1000_CTRL_RFCE) ? "RX" : ((ctrl & - E1000_CTRL_TFCE) ? "TX" : "None" ))); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; @@ -2341,8 +2344,8 @@ static void e1000_watchdog(unsigned long data) if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; - printk(KERN_INFO "e1000: %s NIC Link is Down\n", - netdev->name); + pr_info("%s NIC Link is Down\n", + netdev->name); netif_carrier_off(netdev); if (!test_bit(__E1000_DOWN, &adapter->flags)) @@ -2381,6 +2384,22 @@ link_up: } } + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + /* Cause software interrupt to ensure rx ring is cleaned */ ew32(ICS, E1000_ICS_RXDMT0); @@ -2525,8 +2544,6 @@ set_itr_now: adapter->itr = new_itr; ew32(ITR, 1000000000 / (new_itr * 256)); } - - return; } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -2632,8 +2649,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, break; default: if (unlikely(net_ratelimit())) - DPRINTK(DRV, WARNING, - "checksum_partial proto=%x!\n", skb->protocol); + e_warn("checksum_partial proto=%x!\n", skb->protocol); break; } @@ -2715,9 +2731,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter, /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = false; - buffer_info->dma = pci_map_single(pdev, skb->data + offset, - size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = i; @@ -2761,10 +2778,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter, buffer_info->length = size; buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; - buffer_info->dma = pci_map_page(pdev, frag->page, + buffer_info->dma = dma_map_page(&pdev->dev, frag->page, offset, size, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; buffer_info->next_to_watch = i; @@ -2930,7 +2947,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len - skb->data_len; + unsigned int len = skb_headlen(skb); unsigned int nr_frags; unsigned int mss; int count = 0; @@ -2976,12 +2993,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* fall through */ pull_size = min((unsigned int)4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { - DPRINTK(DRV, ERR, - "__pskb_pull_tail failed.\n"); + e_err("__pskb_pull_tail failed.\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - len = skb->len - skb->data_len; + len = skb_headlen(skb); break; default: /* do nothing */ @@ -3125,7 +3141,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); + e_err("Invalid MTU setting\n"); return -EINVAL; } @@ -3133,7 +3149,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) switch (hw->mac_type) { case e1000_undefined ... e1000_82542_rev2_1: if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { - DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); + e_err("Jumbo Frames not supported.\n"); return -EINVAL; } break; @@ -3171,8 +3187,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n", - netdev->name, netdev->mtu, new_mtu); + pr_info("%s changing MTU from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -3485,17 +3501,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, !(er32(STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ - DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" - " Tx Queue <%lu>\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", + e_err("Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", (unsigned long)((tx_ring - adapter->tx_ring) / sizeof(struct e1000_tx_ring)), readl(hw->hw_addr + tx_ring->tdh), @@ -3635,8 +3651,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -3734,7 +3750,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) { - DPRINTK(DRV, ERR, "pskb_may_pull failed.\n"); + e_err("pskb_may_pull failed.\n"); dev_kfree_skb(skb); goto next_desc; } @@ -3769,6 +3785,31 @@ next_desc: return cleaned; } +/* + * this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static void e1000_check_copybreak(struct net_device *netdev, + struct e1000_buffer *buffer_info, + u32 length, struct sk_buff **skb) +{ + struct sk_buff *new_skb; + + if (length > copybreak) + return; + + new_skb = netdev_alloc_skb_ip_align(netdev, length); + if (!new_skb) + return; + + skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, + (*skb)->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = *skb; + *skb = new_skb; +} + /** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure @@ -3818,8 +3859,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -3834,8 +3875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ - E1000_DBG("%s: Receive packet consumed multiple" - " buffers\n", netdev->name); + e_info("Receive packet consumed multiple buffers\n"); /* recycle */ buffer_info->skb = skb; if (status & E1000_RXD_STAT_EOP) @@ -3868,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, total_rx_bytes += length; total_rx_packets++; - /* code added for copybreak, this should improve - * performance for small packets with large amounts - * of reassembly being done in the stack */ - if (length < copybreak) { - struct sk_buff *new_skb = - netdev_alloc_skb_ip_align(netdev, length); - if (new_skb) { - skb_copy_to_linear_data_offset(new_skb, - -NET_IP_ALIGN, - (skb->data - - NET_IP_ALIGN), - (length + - NET_IP_ALIGN)); - /* save the skb in buffer_info as good */ - buffer_info->skb = skb; - skb = new_skb; - } - /* else just continue with the old one */ - } - /* end copybreak code */ + e1000_check_copybreak(netdev, buffer_info, length, &skb); + skb_put(skb, length); /* Receive Checksum Offload */ @@ -3965,8 +3987,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { struct sk_buff *oldskb = skb; - DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " - "at %p\n", bufsz, skb->data); + e_err("skb align check failed: %u bytes at %p\n", + bufsz, skb->data); /* Try again, without freeing the previous */ skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ @@ -3999,11 +4021,11 @@ check_page: } if (!buffer_info->dma) { - buffer_info->dma = pci_map_page(pdev, + buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->page, 0, - buffer_info->length, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) { + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { put_page(buffer_info->page); dev_kfree_skb(skb); buffer_info->page = NULL; @@ -4074,8 +4096,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, /* Fix for errata 23, can't cross 64kB boundary */ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { struct sk_buff *oldskb = skb; - DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " - "at %p\n", bufsz, skb->data); + e_err("skb align check failed: %u bytes at %p\n", + bufsz, skb->data); /* Try again, without freeing the previous */ skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ @@ -4099,11 +4121,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; map_skb: - buffer_info->dma = pci_map_single(pdev, + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, buffer_info->length, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, buffer_info->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(skb); buffer_info->skb = NULL; buffer_info->dma = 0; @@ -4120,16 +4142,15 @@ map_skb: if (!e1000_check_64k_bound(adapter, (void *)(unsigned long)buffer_info->dma, adapter->rx_buffer_len)) { - DPRINTK(RX_ERR, ERR, - "dma align check failed: %u bytes at %p\n", - adapter->rx_buffer_len, - (void *)(unsigned long)buffer_info->dma); + e_err("dma align check failed: %u bytes at %p\n", + adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); dev_kfree_skb(skb); buffer_info->skb = NULL; - pci_unmap_single(pdev, buffer_info->dma, + dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; @@ -4335,7 +4356,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw) int ret_val = pci_set_mwi(adapter->pdev); if (ret_val) - DPRINTK(PROBE, ERR, "Error in setting MWI\n"); + e_err("Error in setting MWI\n"); } void e1000_pci_clear_mwi(struct e1000_hw *hw) @@ -4466,7 +4487,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) /* Fiber NICs only allow 1000 gbps Full duplex */ if ((hw->media_type == e1000_media_type_fiber) && spddplx != (SPEED_1000 + DUPLEX_FULL)) { - DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + e_err("Unsupported Speed/Duplex configuration\n"); return -EINVAL; } @@ -4489,7 +4510,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + e_err("Unsupported Speed/Duplex configuration\n"); return -EINVAL; } return 0; @@ -4612,7 +4633,7 @@ static int e1000_resume(struct pci_dev *pdev) else err = pci_enable_device_mem(pdev); if (err) { - printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); + pr_err("Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); @@ -4715,7 +4736,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) else err = pci_enable_device_mem(pdev); if (err) { - printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); + pr_err("Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); @@ -4746,7 +4767,7 @@ static void e1000_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) { if (e1000_up(adapter)) { - printk("e1000: can't bring device back up after reset\n"); + pr_info("can't bring device back up after reset\n"); return; } } |