diff options
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r-- | drivers/net/e1000e/netdev.c | 499 |
1 files changed, 430 insertions, 69 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index b81c4237b5d3..abd492b7336d 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -55,9 +55,11 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_82571] = &e1000_82571_info, [board_82572] = &e1000_82572_info, [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, [board_80003es2lan] = &e1000_es2_info, [board_ich8lan] = &e1000_ich8_info, [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, }; #ifdef DEBUG @@ -1187,8 +1189,8 @@ static irqreturn_t e1000_intr(int irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 rctl, icr = er32(ICR); + if (!icr) return IRQ_NONE; /* Not our interrupt */ @@ -1244,6 +1246,263 @@ static irqreturn_t e1000_intr(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t e1000_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + + +static irqreturn_t e1000_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(adapter)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(1000000000 / (adapter->rx_ring->itr_val * 256), + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + rx_ring->itr_register); + else + writel(1, hw->hw_addr + rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + hw->hw_addr + tx_ring->itr_register); + else + writel(1, hw->hw_addr + tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ +#define E1000_EIAC_MASK_82574 0x01F00000 + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } + + return; +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int numvecs, i; + + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + numvecs = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(numvecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, + numvecs); + if (err == 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + /* Fall through */ + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling " + "back to legacy interrupts.\n"); + } + /* Fall through */ + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + return; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + &e1000_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + adapter->rx_ring->itr_register = E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + &e1000_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + adapter->tx_ring->itr_register = E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + &e1000_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + e1000_configure_msix(adapter); + return 0; +out: + return err; +} + /** * e1000_request_irq - initialize interrupts * @@ -1253,29 +1512,33 @@ static irqreturn_t e1000_intr(int irq, void *data) static int e1000_request_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int irq_flags = IRQF_SHARED; int err; - if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) { - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->flags |= FLAG_MSI_ENABLED; - irq_flags = 0; - } + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, + netdev->name, netdev); + if (!err) + return err; - err = request_irq(adapter->pdev->irq, - ((adapter->flags & FLAG_MSI_ENABLED) ? - &e1000_intr_msi : &e1000_intr), - irq_flags, netdev->name, netdev); - if (err) { - if (adapter->flags & FLAG_MSI_ENABLED) { - pci_disable_msi(adapter->pdev); - adapter->flags &= ~FLAG_MSI_ENABLED; - } - e_err("Unable to allocate interrupt, Error: %d\n", err); + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; } + err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, + netdev->name, netdev); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + return err; } @@ -1283,11 +1546,21 @@ static void e1000_free_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - free_irq(adapter->pdev->irq, netdev); - if (adapter->flags & FLAG_MSI_ENABLED) { - pci_disable_msi(adapter->pdev); - adapter->flags &= ~FLAG_MSI_ENABLED; + if (adapter->msix_entries) { + int vector = 0; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; } + + free_irq(adapter->pdev->irq, netdev); } /** @@ -1298,6 +1571,8 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); e1e_flush(); synchronize_irq(adapter->pdev->irq); } @@ -1309,7 +1584,12 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - ew32(IMS, IMS_ENABLE_MASK); + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } e1e_flush(); } @@ -1559,9 +1839,8 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter) * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see e1000_param.c) + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. **/ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, u16 itr_setting, int packets, @@ -1669,11 +1948,37 @@ set_itr_now: min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr; - ew32(ITR, 1000000000 / (new_itr * 256)); + adapter->rx_ring->itr_val = new_itr; + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + ew32(ITR, 1000000000 / (new_itr * 256)); } } /** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + + adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** * e1000_clean - NAPI Rx polling callback * @napi: struct associated with this polling callback * @budget: amount of packets driver is allowed to process this poll @@ -1681,12 +1986,17 @@ set_itr_now: static int e1000_clean(struct napi_struct *napi, int budget) { struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct e1000_hw *hw = &adapter->hw; struct net_device *poll_dev = adapter->netdev; int tx_cleaned = 0, work_done = 0; /* Must NOT use netdev_priv macro here. */ adapter = poll_dev->priv; + if (adapter->msix_entries && + !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + goto clean_rx; + /* * e1000_clean is called per-cpu. This lock protects * tx_ring from being cleaned by multiple cpus @@ -1698,6 +2008,7 @@ static int e1000_clean(struct napi_struct *napi, int budget) spin_unlock(&adapter->tx_queue_lock); } +clean_rx: adapter->clean_rx(adapter, &work_done, budget); if (tx_cleaned) @@ -1708,7 +2019,10 @@ static int e1000_clean(struct napi_struct *napi, int budget) if (adapter->itr_setting & 3) e1000_set_itr(adapter); netif_rx_complete(poll_dev, napi); - e1000_irq_enable(adapter); + if (adapter->msix_entries) + ew32(IMS, adapter->rx_ring->ims_val); + else + e1000_irq_enable(adapter); } return work_done; @@ -2504,6 +2818,8 @@ int e1000e_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->state); napi_enable(&adapter->napi); + if (adapter->msix_entries) + e1000_configure_msix(adapter); e1000_irq_enable(adapter); /* fire a link change interrupt to start the watchdog */ @@ -2587,13 +2903,10 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); - if (!adapter->tx_ring) - goto err; + e1000e_set_interrupt_capability(adapter); - adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); - if (!adapter->rx_ring) - goto err; + if (e1000_alloc_queues(adapter)) + return -ENOMEM; spin_lock_init(&adapter->tx_queue_lock); @@ -2602,12 +2915,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) set_bit(__E1000_DOWN, &adapter->state); return 0; - -err: - e_err("Unable to allocate memory for queues\n"); - kfree(adapter->rx_ring); - kfree(adapter->tx_ring); - return -ENOMEM; } /** @@ -2649,6 +2956,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) /* free the real vector and request a test handler */ e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); /* Assume that the test fails, if it succeeds then the test * MSI irq handler will unset this flag */ @@ -2679,6 +2987,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) rmb(); if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; err = -EIO; e_info("MSI interrupt test failed!\n"); } @@ -2692,7 +3001,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) /* okay so the test worked, restore settings */ e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); msi_test_failed: - /* restore the original vector, even if it failed */ + e1000e_set_interrupt_capability(adapter); e1000_request_irq(adapter); return err; } @@ -2802,7 +3111,7 @@ static int e1000_open(struct net_device *netdev) * ignore e1000e MSI messages, which means we need to test our MSI * interrupt now */ - { + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { err = e1000_test_msi(adapter); if (err) { e_err("Interrupt allocation failed\n"); @@ -2997,7 +3306,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.algnerrc += er32(ALGNERRC); adapter->stats.rxerrc += er32(RXERRC); - adapter->stats.tncrs += er32(TNCRS); + if (hw->mac.type != e1000_82574) + adapter->stats.tncrs += er32(TNCRS); adapter->stats.cexterr += er32(CEXTERR); adapter->stats.tsctc += er32(TSCTC); adapter->stats.tsctfc += er32(TSCTFC); @@ -3193,6 +3503,27 @@ static void e1000_watchdog_task(struct work_struct *work) &adapter->link_duplex); e1000_print_link_info(adapter); /* + * On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + (hw->mac.autoneg == true) && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); + + if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) + e_info("Autonegotiated half duplex but" + " link partner cannot autoneg. " + " Try forcing full duplex if " + "link gets many collisions.\n"); + } + + /* * tweak tx_queue_len according to speed/duplex * and adjust the timeout factor */ @@ -3307,7 +3638,10 @@ link_up: } /* Cause software interrupt to ensure Rx ring is cleaned */ - ew32(ICS, E1000_ICS_RXDMT0); + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = 1; @@ -3415,34 +3749,50 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) struct e1000_buffer *buffer_info; unsigned int i; u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - css = skb_transport_offset(skb); - - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - - context_desc->lower_setup.ip_config = 0; - context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; - context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.data = 0; - context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; - buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case __constant_htons(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn("checksum_partial proto=%x!\n", skb->protocol); + break; + } - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; + css = skb_transport_offset(skb); - return 1; - } + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; - return 0; + return 1; } #define E1000_MAX_PER_TXD 8192 @@ -4024,6 +4374,7 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) e1000e_down(adapter); e1000_free_irq(adapter); } + e1000e_reset_interrupt_capability(adapter); retval = pci_save_state(pdev); if (retval) @@ -4150,6 +4501,7 @@ static int e1000_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { err = e1000_request_irq(adapter); if (err) @@ -4327,13 +4679,15 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); if (!(le16_to_cpu(buf) & (1 << 0))) { /* Deep Smart Power Down (DSPD) */ - e_warn("Warning: detected DSPD enabled in EEPROM\n"); + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); } ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); if (le16_to_cpu(buf) & (3 << 2)) { /* ASPM enable */ - e_warn("Warning: detected ASPM enabled in EEPROM\n"); + dev_warn(&adapter->pdev->dev, + "Warning: detected ASPM enabled in EEPROM\n"); } } @@ -4702,6 +5056,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) if (!e1000_check_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); + e1000e_reset_interrupt_capability(adapter); kfree(adapter->tx_ring); kfree(adapter->rx_ring); @@ -4743,6 +5098,8 @@ static struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), board_80003es2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), @@ -4765,6 +5122,7 @@ static struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, @@ -4773,6 +5131,9 @@ static struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |