summaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 022794e579c7..a50db5398fa5 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -206,10 +206,11 @@ static int __init igb_init_module(void)
global_quad_port_a = 0;
- ret = pci_register_driver(&igb_driver);
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
#endif
+
+ ret = pci_register_driver(&igb_driver);
return ret;
}
@@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* set flags */
switch (hw->mac.type) {
- case e1000_82576:
case e1000_82575:
- adapter->flags |= IGB_FLAG_HAS_DCA;
adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
break;
+ case e1000_82576:
default:
break;
}
@@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_register;
#ifdef CONFIG_IGB_DCA
- if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
- (dca_add_requester(&pdev->dev) == 0)) {
+ if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&pdev->dev, "DCA enabled\n");
/* Always use CB2 mode, difference is masked
@@ -1457,8 +1456,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
/* Number of supported queues. */
/* Having more queues than CPUs doesn't make sense. */
- adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus());
- adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus());
+ adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+ adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
/* This call may decrease the number of queues depending on
* interrupt mode. */
@@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
rctl |= E1000_RCTL_SECRC;
/*
- * disable store bad packets, long packet enable, and clear size bits.
+ * disable store bad packets and clear size bits.
*/
- rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
+ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
- if (adapter->netdev->mtu > ETH_DATA_LEN)
+ /* enable LPE when to prevent packets larger than max_frame_size */
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
@@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
*/
/* allocations using alloc_page take too long for regular MTU
* so only enable packet split for jumbo frames */
- if (rctl & E1000_RCTL_LPE) {
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
srrctl |= adapter->rx_ps_hdr_size <<
E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
@@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data)
struct e1000_hw *hw = &adapter->hw;
unsigned long event = *(unsigned long *)data;
- if (!(adapter->flags & IGB_FLAG_HAS_DCA))
- goto out;
-
switch (event) {
case DCA_PROVIDER_ADD:
/* if already enabled, don't do it again */
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
break;
- adapter->flags |= IGB_FLAG_DCA_ENABLED;
/* Always use CB2 mode, difference is masked
* in the CB driver. */
wr32(E1000_DCA_CTRL, 2);
if (dca_add_requester(dev) == 0) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&adapter->pdev->dev, "DCA enabled\n");
igb_setup_dca(adapter);
break;
@@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
}
break;
}
-out:
+
return 0;
}