diff options
Diffstat (limited to 'drivers/net/igb')
-rw-r--r-- | drivers/net/igb/e1000_82575.c | 572 | ||||
-rw-r--r-- | drivers/net/igb/e1000_82575.h | 32 | ||||
-rw-r--r-- | drivers/net/igb/e1000_defines.h | 50 | ||||
-rw-r--r-- | drivers/net/igb/e1000_hw.h | 22 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mac.c | 100 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mbx.c | 82 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mbx.h | 10 | ||||
-rw-r--r-- | drivers/net/igb/e1000_nvm.c | 36 | ||||
-rw-r--r-- | drivers/net/igb/e1000_phy.c | 453 | ||||
-rw-r--r-- | drivers/net/igb/e1000_phy.h | 37 | ||||
-rw-r--r-- | drivers/net/igb/e1000_regs.h | 80 | ||||
-rw-r--r-- | drivers/net/igb/igb.h | 149 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 747 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 3450 |
14 files changed, 3526 insertions, 2294 deletions
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index f8f5772557ce..e8e9e9194a88 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -46,7 +46,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); +static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); static s32 igb_setup_copper_link_82575(struct e1000_hw *); static s32 igb_setup_serdes_link_82575(struct e1000_hw *); @@ -62,6 +65,12 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *); static s32 igb_read_mac_addr_82575(struct e1000_hw *); static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = + { 36, 72, 144, 1, 2, 4, 8, 16, + 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) + static s32 igb_get_invariants_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; @@ -81,12 +90,20 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; case E1000_DEV_ID_82576: case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_SERDES_QUAD: mac->type = e1000_82576; break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + mac->type = e1000_82580; + break; default: return -E1000_ERR_MAC_INIT; break; @@ -109,6 +126,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) dev_spec->sgmii_active = true; ctrl_ext |= E1000_CTRL_I2C_ENA; break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; ctrl_ext |= E1000_CTRL_I2C_ENA; @@ -120,12 +138,26 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) wr32(E1000_CTRL_EXT, ctrl_ext); + /* + * if using i2c make certain the MDICNFG register is cleared to prevent + * communications from being misrouted to the mdic registers + */ + if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580)) + wr32(E1000_MDICNFG, 0); + /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; if (mac->type == e1000_82576) mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + /* reset */ + if (mac->type == e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ @@ -193,6 +225,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->ops.reset = igb_phy_hw_reset_sgmii_82575; phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else if (hw->mac.type == e1000_82580) { + phy->ops.reset = igb_phy_hw_reset; + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; } else { phy->ops.reset = igb_phy_hw_reset; phy->ops.read_reg = igb_read_phy_reg_igp; @@ -224,6 +260,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; break; + case I82580_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + break; default: return -E1000_ERR_PHY; } @@ -240,9 +282,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) **/ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) { - u16 mask; + u16 mask = E1000_SWFW_PHY0_SM; - mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; return igb_acquire_swfw_sync_82575(hw, mask); } @@ -256,9 +299,11 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) **/ static void igb_release_phy_82575(struct e1000_hw *hw) { - u16 mask; + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; - mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; igb_release_swfw_sync_82575(hw, mask); } @@ -274,45 +319,23 @@ static void igb_release_phy_82575(struct e1000_hw *hw) static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data) { - struct e1000_phy_info *phy = &hw->phy; - u32 i, i2ccmd = 0; + s32 ret_val = -E1000_ERR_PARAM; if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { hw_dbg("PHY Address %u is out of range\n", offset); - return -E1000_ERR_PARAM; + goto out; } - /* - * Set up Op-code, Phy Address, and register address in the I2CCMD - * register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - (E1000_I2CCMD_OPCODE_READ)); - - wr32(E1000_I2CCMD, i2ccmd); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - i2ccmd = rd32(E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) - break; - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } + ret_val = igb_read_phy_reg_i2c(hw, offset, data); - /* Need to byte-swap the 16-bit value. */ - *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + hw->phy.ops.release(hw); - return 0; +out: + return ret_val; } /** @@ -327,47 +350,24 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data) { - struct e1000_phy_info *phy = &hw->phy; - u32 i, i2ccmd = 0; - u16 phy_data_swapped; + s32 ret_val = -E1000_ERR_PARAM; + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { hw_dbg("PHY Address %d is out of range\n", offset); - return -E1000_ERR_PARAM; + goto out; } - /* Swap the data bytes for the I2C interface */ - phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; - /* - * Set up Op-code, Phy Address, and register address in the I2CCMD - * register. The MAC will take care of interfacing with the - * PHY to retrieve the desired data. - */ - i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | - (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | - E1000_I2CCMD_OPCODE_WRITE | - phy_data_swapped); - - wr32(E1000_I2CCMD, i2ccmd); - - /* Poll the ready bit to see if the I2C read completed */ - for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { - udelay(50); - i2ccmd = rd32(E1000_I2CCMD); - if (i2ccmd & E1000_I2CCMD_READY) - break; - } - if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Write did not complete\n"); - return -E1000_ERR_PHY; - } - if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); - return -E1000_ERR_PHY; - } + ret_val = igb_write_phy_reg_i2c(hw, offset, data); - return 0; + hw->phy.ops.release(hw); + +out: + return ret_val; } /** @@ -676,6 +676,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) if (hw->bus.func == 1) mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; while (timeout) { if (rd32(E1000_EEMNGCTL) & mask) @@ -706,9 +710,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) s32 ret_val; u16 speed, duplex; - /* SGMII link check is done through the PCS register. */ - if ((hw->phy.media_type != e1000_media_type_copper) || - (igb_sgmii_active_82575(hw))) { + if (hw->phy.media_type != e1000_media_type_copper) { ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, &duplex); /* @@ -723,6 +725,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) return ret_val; } + /** * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure @@ -788,13 +791,27 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; + u16 eeprom_data = 0; if (hw->phy.media_type != e1000_media_type_internal_serdes || igb_sgmii_active_82575(hw)) return; - /* if the management interface is not enabled, then power down */ - if (!igb_enable_mng_pass_thru(hw)) { + if (hw->bus.func == E1000_FUNC_0) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + else if (hw->mac.type == e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == E1000_FUNC_1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + /* + * If APM is not enabled in the EEPROM and management interface is + * not enabled, then power down. + */ + if (!(eeprom_data & E1000_NVM_APME_82575) && + !igb_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ reg = rd32(E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; @@ -908,6 +925,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) for (i = 0; i < mac->mta_reg_count; i++) array_wr32(E1000_MTA, i, 0); + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + /* Setup link and flow control */ ret_val = igb_setup_link(hw); @@ -934,7 +956,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; - bool link; ctrl = rd32(E1000_CTRL); ctrl |= E1000_CTRL_SLU; @@ -946,6 +967,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) goto out; if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + ret_val = hw->phy.ops.reset(hw); if (ret_val) { hw_dbg("Error resetting the PHY.\n"); @@ -959,6 +983,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_igp_3: ret_val = igb_copper_link_setup_igp(hw); break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; default: ret_val = -E1000_ERR_PHY; break; @@ -967,57 +994,24 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) if (ret_val) goto out; - if (hw->mac.autoneg) { - /* - * Setup autoneg and flow control advertisement - * and perform autonegotiation. - */ - ret_val = igb_copper_link_autoneg(hw); - if (ret_val) - goto out; - } else { - /* - * PHY will be set to 10H, 10F, 100H or 100F - * depending on user settings. - */ - hw_dbg("Forcing Speed and Duplex\n"); - ret_val = hw->phy.ops.force_speed_duplex(hw); - if (ret_val) { - hw_dbg("Error Forcing Speed and Duplex\n"); - goto out; - } - } - - /* - * Check link status. Wait up to 100 microseconds for link to become - * valid. - */ - ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); - if (ret_val) - goto out; - - if (link) { - hw_dbg("Valid link established!!!\n"); - /* Config the MAC and PHY after link is up */ - igb_config_collision_dist(hw); - ret_val = igb_config_fc_after_link_up(hw); - } else { - hw_dbg("Unable to establish link!!!\n"); - } - + ret_val = igb_setup_copper_link(hw); out: return ret_val; } /** - * igb_setup_serdes_link_82575 - Setup link for fiber/serdes + * igb_setup_serdes_link_82575 - Setup link for serdes * @hw: pointer to the HW structure * - * Configures speed and duplex for fiber and serdes links. + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. **/ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) { - u32 ctrl_reg, reg; + u32 ctrl_ext, ctrl_reg, reg; + bool pcs_autoneg; if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !igb_sgmii_active_82575(hw)) @@ -1032,9 +1026,9 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); /* power on the sfp cage if present */ - reg = rd32(E1000_CTRL_EXT); - reg &= ~E1000_CTRL_EXT_SDP3_DATA; - wr32(E1000_CTRL_EXT, reg); + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext); ctrl_reg = rd32(E1000_CTRL); ctrl_reg |= E1000_CTRL_SLU; @@ -1051,15 +1045,31 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) reg = rd32(E1000_PCS_LCTL); - if (igb_sgmii_active_82575(hw)) { - /* allow time for SFP cage to power up phy */ - msleep(300); + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; - /* AN time out should be disabled for SGMII mode */ + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); - } else { + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + default: + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; } wr32(E1000_CTRL, ctrl_reg); @@ -1070,7 +1080,6 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) * mode that will be compatible with older link partners and switches. * However, both are supported by the hardware and some drivers/tools. */ - reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); @@ -1080,25 +1089,18 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) */ reg |= E1000_PCS_LCTL_FORCE_FCTRL; - /* - * we always set sgmii to autoneg since it is the phy that will be - * forcing the link and the serdes is just a go-between - */ - if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) { + if (pcs_autoneg) { /* Set PCS register for autoneg */ - reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ - E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ - E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ - E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ - hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { - /* Set PCS register for forced speed */ - reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ - E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ - E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ - E1000_PCS_LCTL_FSD | /* Force Speed */ - E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ - hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK | /* Force Link */ + E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */ + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); } wr32(E1000_PCS_LCTL, reg); @@ -1167,9 +1169,18 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) { s32 ret_val = 0; - if (igb_check_alt_mac_addr(hw)) - ret_val = igb_read_mac_addr(hw); + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); +out: return ret_val; } @@ -1181,61 +1192,59 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) **/ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) { - u32 temp; - igb_clear_hw_cntrs_base(hw); - temp = rd32(E1000_PRC64); - temp = rd32(E1000_PRC127); - temp = rd32(E1000_PRC255); - temp = rd32(E1000_PRC511); - temp = rd32(E1000_PRC1023); - temp = rd32(E1000_PRC1522); - temp = rd32(E1000_PTC64); - temp = rd32(E1000_PTC127); - temp = rd32(E1000_PTC255); - temp = rd32(E1000_PTC511); - temp = rd32(E1000_PTC1023); - temp = rd32(E1000_PTC1522); - - temp = rd32(E1000_ALGNERRC); - temp = rd32(E1000_RXERRC); - temp = rd32(E1000_TNCRS); - temp = rd32(E1000_CEXTERR); - temp = rd32(E1000_TSCTC); - temp = rd32(E1000_TSCTFC); - - temp = rd32(E1000_MGTPRC); - temp = rd32(E1000_MGTPDC); - temp = rd32(E1000_MGTPTC); - - temp = rd32(E1000_IAC); - temp = rd32(E1000_ICRXOC); - - temp = rd32(E1000_ICRXPTC); - temp = rd32(E1000_ICRXATC); - temp = rd32(E1000_ICTXPTC); - temp = rd32(E1000_ICTXATC); - temp = rd32(E1000_ICTXQEC); - temp = rd32(E1000_ICTXQMTC); - temp = rd32(E1000_ICRXDMTC); - - temp = rd32(E1000_CBTMPC); - temp = rd32(E1000_HTDPMC); - temp = rd32(E1000_CBRMPC); - temp = rd32(E1000_RPTHC); - temp = rd32(E1000_HGPTC); - temp = rd32(E1000_HTCBDPC); - temp = rd32(E1000_HGORCL); - temp = rd32(E1000_HGORCH); - temp = rd32(E1000_HGOTCL); - temp = rd32(E1000_HGOTCH); - temp = rd32(E1000_LENERRS); + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); /* This register should not be read in copper configurations */ if (hw->phy.media_type == e1000_media_type_internal_serdes || igb_sgmii_active_82575(hw)) - temp = rd32(E1000_SCVPC); + rd32(E1000_SCVPC); } /** @@ -1400,8 +1409,183 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) wr32(E1000_VT_CTL, vt_ctl); } +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 mdicnfg = 0; + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* + * We config the phy address in MDICNFG register now. Same bits + * as before. The values in MDIC can be written but will be + * ignored. This allows us to call the old function after + * configuring the PHY address in the new register + */ + mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); + wr32(E1000_MDICNFG, mdicnfg); + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 mdicnfg = 0; + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* + * We config the phy address in MDICNFG register now. Same bits + * as before. The values in MDIC can be written but will be + * ignored. This allows us to call the old function after + * configuring the PHY address in the new register + */ + mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); + wr32(E1000_MDICNFG, mdicnfg); + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl, icr; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + + hw->dev_spec._82575.global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + msleep(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + msleep(5); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + icr = rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + igb_release_swfw_sync_82575(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + static struct e1000_mac_operations e1000_mac_ops_82575 = { - .reset_hw = igb_reset_hw_82575, .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, .rar_set = igb_rar_set, diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index ebd146fd4e15..d51c9927c819 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -38,6 +38,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); #define E1000_RAR_ENTRIES_82575 16 #define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 /* SRRCTL bit definitions */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ @@ -66,6 +71,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); E1000_EICR_RX_QUEUE3) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { @@ -98,6 +105,7 @@ union e1000_adv_rx_desc { #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { @@ -167,6 +175,18 @@ struct e1000_adv_tx_context_desc { #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_1588 (1 << 30) + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 #define MAX_NUM_VFS 8 #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ @@ -202,9 +222,21 @@ struct e1000_adv_tx_context_desc { #define E1000_IOVCTL 0x05BBC #define E1000_IOVCTL_REUSE_VFQ 0x00000001 +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + #define ALL_QUEUES 0xFFFF +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); #endif diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index cb916833f303..6e036ae3138f 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -49,6 +49,7 @@ #define E1000_CTRL_EXT_PFRSTD 0x00004000 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_IRCA 0x00000001 @@ -329,6 +330,7 @@ #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_INT_ASSERTED 0x80000000 /* LAN connected device generates an interrupt */ @@ -370,6 +372,7 @@ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ #define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ /* Extended Interrupt Mask Set */ @@ -378,6 +381,7 @@ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ /* Extended Interrupt Cause Set */ @@ -435,6 +439,39 @@ /* Flow Control */ #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 @@ -524,8 +561,12 @@ #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F -#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ -#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) /* Mask bits for fields in Word 0x0f of the NVM */ #define NVM_WORD0F_PAUSE_MASK 0x3000 @@ -592,6 +633,7 @@ */ #define M88E1111_I_PHY_ID 0x01410CC0 #define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 #define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ @@ -678,4 +720,8 @@ #define E1000_VFTA_ENTRY_MASK 0x7F #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F +/* DMA Coalescing register fields */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based + on DMA coal */ + #endif diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 119869b1124d..dbaeb5f5e0c7 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -42,20 +42,35 @@ struct e1000_hw; #define E1000_DEV_ID_82576_SERDES 0x10E7 #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 #define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D #define E1000_DEV_ID_82575EB_COPPER 0x10A7 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 #define E1000_REVISION_2 2 #define E1000_REVISION_4 4 +#define E1000_FUNC_0 0 #define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 enum e1000_mac_type { e1000_undefined = 0, e1000_82575, e1000_82576, + e1000_82580, e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ }; @@ -70,7 +85,6 @@ enum e1000_nvm_type { e1000_nvm_unknown = 0, e1000_nvm_none, e1000_nvm_eeprom_spi, - e1000_nvm_eeprom_microwire, e1000_nvm_flash_hw, e1000_nvm_flash_sw }; @@ -79,8 +93,6 @@ enum e1000_nvm_override { e1000_nvm_override_none = 0, e1000_nvm_override_spi_small, e1000_nvm_override_spi_large, - e1000_nvm_override_microwire_small, - e1000_nvm_override_microwire_large }; enum e1000_phy_type { @@ -92,6 +104,7 @@ enum e1000_phy_type { e1000_phy_gg82563, e1000_phy_igp_3, e1000_phy_ife, + e1000_phy_82580, }; enum e1000_bus_type { @@ -288,6 +301,7 @@ struct e1000_mac_operations { struct e1000_phy_operations { s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); s32 (*check_reset_block)(struct e1000_hw *); s32 (*force_speed_duplex)(struct e1000_hw *); s32 (*get_cfg_done)(struct e1000_hw *hw); @@ -339,6 +353,7 @@ struct e1000_mac_info { u16 ifs_ratio; u16 ifs_step_size; u16 mta_reg_count; + u16 uta_reg_count; /* Maximum size of the MTA register table in all supported adapters */ #define MAX_MTA_REG 128 @@ -463,6 +478,7 @@ struct e1000_mbx_info { struct e1000_dev_spec_82575 { bool sgmii_active; + bool global_device_reset; }; struct e1000_hw { diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 7d76bb085e10..2ad358a240bf 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) } if (nvm_alt_mac_addr_offset == 0xFFFF) { - ret_val = -(E1000_NOT_IMPLEMENTED); + /* There is no Alternate MAC Address */ goto out; } if (hw->bus.func == E1000_FUNC_1) - nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16); - + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; for (i = 0; i < ETH_ALEN; i += 2) { offset = nvm_alt_mac_addr_offset + (i >> 1); ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); @@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) /* if multicast bit is set, the alternate address will not be used */ if (alt_mac_addr[0] & 0x01) { - ret_val = -(E1000_NOT_IMPLEMENTED); + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); goto out; } - for (i = 0; i < ETH_ALEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; - - hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0); + /* + * We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); out: return ret_val; @@ -246,8 +247,15 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) if (rar_low || rar_high) rar_high |= E1000_RAH_AV; + /* + * Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ wr32(E1000_RAL(index), rar_low); + wrfl(); wr32(E1000_RAH(index), rar_high); + wrfl(); } /** @@ -399,45 +407,43 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, **/ void igb_clear_hw_cntrs_base(struct e1000_hw *hw) { - u32 temp; - - temp = rd32(E1000_CRCERRS); - temp = rd32(E1000_SYMERRS); - temp = rd32(E1000_MPC); - temp = rd32(E1000_SCC); - temp = rd32(E1000_ECOL); - temp = rd32(E1000_MCC); - temp = rd32(E1000_LATECOL); - temp = rd32(E1000_COLC); - temp = rd32(E1000_DC); - temp = rd32(E1000_SEC); - temp = rd32(E1000_RLEC); - temp = rd32(E1000_XONRXC); - temp = rd32(E1000_XONTXC); - temp = rd32(E1000_XOFFRXC); - temp = rd32(E1000_XOFFTXC); - temp = rd32(E1000_FCRUC); - temp = rd32(E1000_GPRC); - temp = rd32(E1000_BPRC); - temp = rd32(E1000_MPRC); - temp = rd32(E1000_GPTC); - temp = rd32(E1000_GORCL); - temp = rd32(E1000_GORCH); - temp = rd32(E1000_GOTCL); - temp = rd32(E1000_GOTCH); - temp = rd32(E1000_RNBC); - temp = rd32(E1000_RUC); - temp = rd32(E1000_RFC); - temp = rd32(E1000_ROC); - temp = rd32(E1000_RJC); - temp = rd32(E1000_TORL); - temp = rd32(E1000_TORH); - temp = rd32(E1000_TOTL); - temp = rd32(E1000_TOTH); - temp = rd32(E1000_TPR); - temp = rd32(E1000_TPT); - temp = rd32(E1000_MPTC); - temp = rd32(E1000_BPTC); + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); } /** diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index ed9058eca45c..c474cdb70047 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c @@ -143,12 +143,16 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) if (!countdown || !mbx->ops.check_for_msg) goto out; - while (mbx->ops.check_for_msg(hw, mbx_id)) { + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) break; udelay(mbx->usec_delay); } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; out: return countdown ? 0 : -E1000_ERR_MBX; } @@ -168,12 +172,16 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) if (!countdown || !mbx->ops.check_for_ack) goto out; - while (mbx->ops.check_for_ack(hw, mbx_id)) { + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) break; udelay(mbx->usec_delay); } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; out: return countdown ? 0 : -E1000_ERR_MBX; } @@ -217,12 +225,13 @@ out: static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; - s32 ret_val = 0; + s32 ret_val = -E1000_ERR_MBX; - if (!mbx->ops.write) + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) goto out; - /* send msg*/ + /* send msg */ ret_val = mbx->ops.write(hw, msg, size, mbx_id); /* if msg sent wait until we receive an ack */ @@ -305,6 +314,30 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) } /** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = 0; + + return ret_val; +} + +/** * igb_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer @@ -316,27 +349,17 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { - u32 p2v_mailbox; - s32 ret_val = 0; + s32 ret_val; u16 i; - /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); - - /* Make sure we have ownership now... */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); - if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) { - /* failed to grab ownership */ - ret_val = -E1000_ERR_MBX; + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) goto out_no_write; - } - /* - * flush any ack or msg which may already be in the queue - * as they are likely the result of an error - */ - igb_check_for_ack_pf(hw, vf_number); + /* flush msg and acks as we are overwriting the message buffer */ igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) @@ -367,20 +390,13 @@ out_no_write: static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { - u32 p2v_mailbox; - s32 ret_val = 0; + s32 ret_val; u16 i; - /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); - - /* Make sure we have ownership now... */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); - if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) { - /* failed to grab ownership */ - ret_val = -E1000_ERR_MBX; + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) goto out_no_read; - } /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) @@ -392,8 +408,6 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, /* update stats */ hw->mbx.stats.msgs_rx++; - ret_val = 0; - out_no_read: return ret_val; } diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h index ebc02ea3f198..bb112fb6c3a1 100644 --- a/drivers/net/igb/e1000_mbx.h +++ b/drivers/net/igb/e1000_mbx.h @@ -58,10 +58,12 @@ #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) #define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index a88bfe2f1e8f..d83b77fa4038 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c @@ -78,9 +78,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) u32 mask; mask = 0x01 << (count - 1); - if (nvm->type == e1000_nvm_eeprom_microwire) - eecd &= ~E1000_EECD_DO; - else if (nvm->type == e1000_nvm_eeprom_spi) + if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; do { @@ -220,22 +218,7 @@ static void igb_standby_nvm(struct e1000_hw *hw) struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = rd32(E1000_EECD); - if (nvm->type == e1000_nvm_eeprom_microwire) { - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - wr32(E1000_EECD, eecd); - wrfl(); - udelay(nvm->delay_usec); - - igb_raise_eec_clk(hw, &eecd); - - /* Select EEPROM */ - eecd |= E1000_EECD_CS; - wr32(E1000_EECD, eecd); - wrfl(); - udelay(nvm->delay_usec); - - igb_lower_eec_clk(hw, &eecd); - } else if (nvm->type == e1000_nvm_eeprom_spi) { + if (nvm->type == e1000_nvm_eeprom_spi) { /* Toggle CS to flush commands */ eecd |= E1000_EECD_CS; wr32(E1000_EECD, eecd); @@ -263,12 +246,6 @@ static void e1000_stop_nvm(struct e1000_hw *hw) /* Pull CS high */ eecd |= E1000_EECD_CS; igb_lower_eec_clk(hw, &eecd); - } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { - /* CS on Microcwire is active-high */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); - wr32(E1000_EECD, eecd); - igb_raise_eec_clk(hw, &eecd); - igb_lower_eec_clk(hw, &eecd); } } @@ -304,14 +281,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) u8 spi_stat_reg; - if (nvm->type == e1000_nvm_eeprom_microwire) { - /* Clear SK and DI */ - eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); - wr32(E1000_EECD, eecd); - /* Set CS */ - eecd |= E1000_EECD_CS; - wr32(E1000_EECD, eecd); - } else if (nvm->type == e1000_nvm_eeprom_spi) { + if (nvm->type == e1000_nvm_eeprom_spi) { /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); wr32(E1000_EECD, eecd); diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c index ee460600e74b..5c9d73e9bb8d 100644 --- a/drivers/net/igb/e1000_phy.c +++ b/drivers/net/igb/e1000_phy.c @@ -39,6 +39,9 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, @@ -109,7 +112,10 @@ out: **/ static s32 igb_phy_reset_dsp(struct e1000_hw *hw) { - s32 ret_val; + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) @@ -130,7 +136,7 @@ out: * Reads the MDI control regsiter in the PHY at offset and stores the * information read to data. **/ -static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; @@ -188,7 +194,7 @@ out: * * Writes data to MDI control register in the PHY at offset. **/ -static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; @@ -239,6 +245,103 @@ out: } /** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** * igb_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read @@ -318,6 +421,57 @@ out: } /** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set number of link attempts before downshift */ + ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK; + ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data); + +out: + return ret_val; +} + +/** * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * @@ -572,7 +726,7 @@ out: * and restart the negotiation process between the link partner. If * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. **/ -s32 igb_copper_link_autoneg(struct e1000_hw *hw) +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; @@ -796,6 +950,65 @@ out: } /** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * @@ -903,22 +1116,19 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) igb_phy_force_speed_duplex_setup(hw, &phy_data); - /* Reset the phy to commit changes. */ - phy_data |= MII_CR_RESET; - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) goto out; - udelay(1); + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; if (phy->autoneg_wait_to_complete) { hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); - ret_val = igb_phy_has_link(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) goto out; @@ -928,8 +1138,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) * Reset the DSP and cross our fingers. */ ret_val = phy->ops.write_reg(hw, - M88E1000_PHY_PAGE_SELECT, - 0x001d); + M88E1000_PHY_PAGE_SELECT, + 0x001d); if (ret_val) goto out; ret_val = igb_phy_reset_dsp(hw); @@ -939,7 +1149,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) /* Try once more */ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) goto out; } @@ -1051,9 +1261,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; + s32 ret_val = 0; u16 data; + if (!(hw->phy.ops.read_reg)) + goto out; + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) goto out; @@ -1288,8 +1501,14 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, * it across the board. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; + if (ret_val) { + /* + * If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + udelay(usec_interval); + } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; @@ -1333,8 +1552,13 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + phy->min_cable_length = e1000_m88_cable_length_table[index]; - phy->max_cable_length = e1000_m88_cable_length_table[index+1]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; @@ -1715,3 +1939,194 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) return 0; } +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* + * Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; + phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h index ebe4b616db8a..555eb54bb6ed 100644 --- a/drivers/net/igb/e1000_phy.h +++ b/drivers/net/igb/e1000_phy.h @@ -43,7 +43,6 @@ enum e1000_smart_speed { s32 igb_check_downshift(struct e1000_hw *hw); s32 igb_check_reset_block(struct e1000_hw *hw); -s32 igb_copper_link_autoneg(struct e1000_hw *hw); s32 igb_copper_link_setup_igp(struct e1000_hw *hw); s32 igb_copper_link_setup_m88(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); @@ -57,10 +56,19 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw); s32 igb_phy_hw_reset(struct e1000_hw *hw); s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); /* IGP01E1000 Specific Registers */ #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ @@ -75,6 +83,33 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw); #define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ #define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 +#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 /* Enable flexible speed on link-up */ #define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ #define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index 345d1442d6d6..dd4e6ffd29f5 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -34,6 +34,7 @@ #define E1000_EERD 0x00014 /* EEPROM Read - RW */ #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ #define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ @@ -76,59 +77,20 @@ #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ /* IEEE 1588 TIMESYNCH */ -#define E1000_TSYNCTXCTL 0x0B614 -#define E1000_TSYNCTXCTL_VALID (1<<0) -#define E1000_TSYNCTXCTL_ENABLED (1<<4) -#define E1000_TSYNCRXCTL 0x0B620 -#define E1000_TSYNCRXCTL_VALID (1<<0) -#define E1000_TSYNCRXCTL_ENABLED (1<<4) -enum { - E1000_TSYNCRXCTL_TYPE_L2_V2 = 0, - E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1), - E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2), - E1000_TSYNCRXCTL_TYPE_ALL = (1<<3), - E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1), -}; -#define E1000_TSYNCRXCFG 0x05F50 -enum { - E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0, - E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0, - E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0, - E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0, - E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0, - - E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8, - E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8, - E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8, - E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8, - E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8, - E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8, - E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8, - E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8, - E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8, - E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8, -}; -#define E1000_SYSTIML 0x0B600 -#define E1000_SYSTIMH 0x0B604 -#define E1000_TIMINCA 0x0B608 - -#define E1000_RXMTRL 0x0B634 -#define E1000_RXSTMPL 0x0B624 -#define E1000_RXSTMPH 0x0B628 -#define E1000_RXSATRL 0x0B62C -#define E1000_RXSATRH 0x0B630 - -#define E1000_TXSTMPL 0x0B618 -#define E1000_TXSTMPH 0x0B61C - -#define E1000_ETQF0 0x05CB0 -#define E1000_ETQF1 0x05CB4 -#define E1000_ETQF2 0x05CB8 -#define E1000_ETQF3 0x05CBC -#define E1000_ETQF4 0x05CC0 -#define E1000_ETQF5 0x05CC4 -#define E1000_ETQF6 0x05CC8 -#define E1000_ETQF7 0x05CCC +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ /* Filtering Registers */ #define E1000_SAQF(_n) (0x5980 + 4 * (_n)) @@ -143,7 +105,9 @@ enum { #define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + /* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ /* * Convenience macros * @@ -288,10 +252,17 @@ enum { #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ #define E1000_RA 0x05400 /* Receive Address - RW Array */ #define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ (0x054E0 + ((_i - 16) * 8))) #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ (0x054E4 + ((_i - 16) * 8))) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ @@ -331,6 +302,7 @@ enum { #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ /* These act per VF so an array friendly macro is used */ #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) @@ -348,4 +320,6 @@ enum { #define array_rd32(reg, offset) \ (readl(hw->hw_addr + reg + ((offset) << 2))) +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ #endif diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 7126fea26fec..b1c1eb88893f 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -55,12 +55,14 @@ struct igb_adapter; #define IGB_DEFAULT_ITR 3 /* dynamic */ #define IGB_MAX_ITR_USECS 10000 #define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ - (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) -#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES -#define IGB_ABS_MAX_TX_QUEUES 4 +#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ + (hw->mac.type > e1000_82575 ? 8 : 4)) +#define IGB_ABS_MAX_TX_QUEUES 8 +#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES #define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 @@ -71,9 +73,14 @@ struct vf_data_storage { u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; u16 vlans_enabled; - bool clear_to_send; + u32 flags; + unsigned long last_nack; }; +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ + /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of * descriptors available in its onboard memory. @@ -85,17 +92,19 @@ struct vf_data_storage { * descriptors until either it has this many to write back, or the * ITR timer expires. */ -#define IGB_RX_PTHRESH 16 +#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8) #define IGB_RX_HTHRESH 8 #define IGB_RX_WTHRESH 1 +#define IGB_TX_PTHRESH 8 +#define IGB_TX_HTHRESH 1 +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + adapter->msix_entries) ? 0 : 16) /* this is the size past which hardware will drop packets when setting LPE=0 */ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_128 128 /* Used for packet split */ -#define IGB_RXBUFFER_256 256 /* Used for packet split */ -#define IGB_RXBUFFER_512 512 #define IGB_RXBUFFER_1024 1024 #define IGB_RXBUFFER_2048 2048 #define IGB_RXBUFFER_16384 16384 @@ -128,12 +137,13 @@ struct igb_buffer { unsigned long time_stamp; u16 length; u16 next_to_watch; + u16 mapped_as_page; }; /* RX */ struct { struct page *page; - u64 page_dma; - unsigned int page_offset; + dma_addr_t page_dma; + u16 page_offset; }; }; }; @@ -141,36 +151,55 @@ struct igb_buffer { struct igb_tx_queue_stats { u64 packets; u64 bytes; + u64 restart_queue; }; struct igb_rx_queue_stats { u64 packets; u64 bytes; u64 drops; + u64 csum_err; + u64 alloc_failed; }; -struct igb_ring { +struct igb_q_vector { struct igb_adapter *adapter; /* backlink */ - void *desc; /* descriptor ring memory */ - dma_addr_t dma; /* phys address of the ring */ - unsigned int size; /* length of desc. ring in bytes */ - unsigned int count; /* number of desc. in the ring */ - u16 next_to_use; - u16 next_to_clean; - u16 head; - u16 tail; - struct igb_buffer *buffer_info; /* array of buffer info structs */ + struct igb_ring *rx_ring; + struct igb_ring *tx_ring; + struct napi_struct napi; u32 eims_value; - u32 itr_val; - u16 itr_register; u16 cpu; - u16 queue_index; - u16 reg_idx; + u16 itr_val; + u8 set_itr; + u8 itr_shift; + void __iomem *itr_register; + + char name[IFNAMSIZ + 9]; +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct pci_dev *pdev; /* pci device for dma mapping */ + dma_addr_t dma; /* phys address of the ring */ + void *desc; /* descriptor ring memory */ + unsigned int size; /* length of desc. ring in bytes */ + u16 count; /* number of desc. in the ring */ + u16 next_to_use; + u16 next_to_clean; + u8 queue_index; + u8 reg_idx; + void __iomem *head; + void __iomem *tail; + struct igb_buffer *buffer_info; /* array of buffer info structs */ + unsigned int total_bytes; unsigned int total_packets; + u32 flags; + union { /* TX */ struct { @@ -180,16 +209,18 @@ struct igb_ring { /* RX */ struct { struct igb_rx_queue_stats rx_stats; - u64 rx_queue_drops; - struct napi_struct napi; - int set_itr; - struct igb_ring *buddy; + u32 rx_buffer_len; }; }; - - char name[IFNAMSIZ + 5]; }; +#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ +#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ + +#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ + +#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) + #define E1000_RX_DESC_ADV(R, i) \ (&(((union e1000_adv_rx_desc *)((R).desc))[i])) #define E1000_TX_DESC_ADV(R, i) \ @@ -197,6 +228,15 @@ struct igb_ring { #define E1000_TX_CTXTDESC_ADV(R, i) \ (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + /* board specific private data structure */ struct igb_adapter { @@ -205,18 +245,14 @@ struct igb_adapter { struct vlan_group *vlgrp; u16 mng_vlan_id; u32 bd_number; - u32 rx_buffer_len; u32 wol; u32 en_mng_pt; u16 link_speed; u16 link_duplex; - unsigned int total_tx_bytes; - unsigned int total_tx_packets; - unsigned int total_rx_bytes; - unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ - u32 itr; - u32 itr_setting; + u32 rx_itr_setting; + u32 tx_itr_setting; u16 tx_itr; u16 rx_itr; @@ -229,13 +265,7 @@ struct igb_adapter { /* TX */ struct igb_ring *tx_ring; /* One per active queue */ - unsigned int restart_queue; unsigned long tx_queue_len; - u32 txd_cmd; - u32 gotc; - u64 gotc_old; - u64 tpt_old; - u64 colc_old; u32 tx_timeout_count; /* RX */ @@ -243,20 +273,12 @@ struct igb_adapter { int num_tx_queues; int num_rx_queues; - u64 hw_csum_err; - u64 hw_csum_good; - u32 alloc_rx_buff_failed; - u32 gorc; - u64 gorc_old; - u16 rx_ps_hdr_size; u32 max_frame_size; u32 min_frame_size; /* OS defined structs */ struct net_device *netdev; - struct napi_struct napi; struct pci_dev *pdev; - struct net_device_stats net_stats; struct cyclecounter cycles; struct timecounter clock; struct timecompare compare; @@ -273,6 +295,9 @@ struct igb_adapter { struct igb_ring test_rx_ring; int msg_enable; + + unsigned int num_q_vectors; + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; struct msix_entry *msix_entries; u32 eims_enable_mask; u32 eims_other; @@ -283,18 +308,20 @@ struct igb_adapter { u32 eeprom_wol; struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; - unsigned int tx_ring_count; - unsigned int rx_ring_count; + u16 tx_ring_count; + u16 rx_ring_count; unsigned int vfs_allocated_count; struct vf_data_storage *vf_data; + u32 rss_queues; }; #define IGB_FLAG_HAS_MSI (1 << 0) #define IGB_FLAG_DCA_ENABLED (1 << 1) #define IGB_FLAG_QUAD_PORT_A (1 << 2) -#define IGB_FLAG_NEED_CTX_IDX (1 << 3) -#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4) +#define IGB_FLAG_QUEUE_PAIRS (1 << 3) +#define IGB_82576_TSYNC_SHIFT 19 +#define IGB_82580_TSYNC_SHIFT 24 enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, @@ -314,10 +341,18 @@ extern void igb_down(struct igb_adapter *); extern void igb_reinit_locked(struct igb_adapter *); extern void igb_reset(struct igb_adapter *); extern int igb_set_spd_dplx(struct igb_adapter *, u16); -extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); -extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); +extern int igb_setup_tx_resources(struct igb_ring *); +extern int igb_setup_rx_resources(struct igb_ring *); extern void igb_free_tx_resources(struct igb_ring *); extern void igb_free_rx_resources(struct igb_ring *); +extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_setup_tctl(struct igb_adapter *); +extern void igb_setup_rctl(struct igb_adapter *); +extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); +extern void igb_unmap_and_free_tx_resource(struct igb_ring *, + struct igb_buffer *); +extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); extern void igb_update_stats(struct igb_adapter *); extern void igb_set_ethtool_ops(struct net_device *); diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index b243ed3b0c36..ac9d5272650d 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -44,78 +44,94 @@ struct igb_stats { int stat_offset; }; -#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ - offsetof(struct igb_adapter, m) +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} static const struct igb_stats igb_gstrings_stats[] = { - { "rx_packets", IGB_STAT(stats.gprc) }, - { "tx_packets", IGB_STAT(stats.gptc) }, - { "rx_bytes", IGB_STAT(stats.gorc) }, - { "tx_bytes", IGB_STAT(stats.gotc) }, - { "rx_broadcast", IGB_STAT(stats.bprc) }, - { "tx_broadcast", IGB_STAT(stats.bptc) }, - { "rx_multicast", IGB_STAT(stats.mprc) }, - { "tx_multicast", IGB_STAT(stats.mptc) }, - { "rx_errors", IGB_STAT(net_stats.rx_errors) }, - { "tx_errors", IGB_STAT(net_stats.tx_errors) }, - { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, - { "multicast", IGB_STAT(stats.mprc) }, - { "collisions", IGB_STAT(stats.colc) }, - { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, - { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, - { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, - { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, - { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, - { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, - { "rx_missed_errors", IGB_STAT(stats.mpc) }, - { "tx_aborted_errors", IGB_STAT(stats.ecol) }, - { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, - { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, - { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, - { "tx_window_errors", IGB_STAT(stats.latecol) }, - { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, - { "tx_deferred_ok", IGB_STAT(stats.dc) }, - { "tx_single_coll_ok", IGB_STAT(stats.scc) }, - { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, - { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, - { "tx_restart_queue", IGB_STAT(restart_queue) }, - { "rx_long_length_errors", IGB_STAT(stats.roc) }, - { "rx_short_length_errors", IGB_STAT(stats.ruc) }, - { "rx_align_errors", IGB_STAT(stats.algnerrc) }, - { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, - { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, - { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, - { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, - { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, - { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, - { "rx_long_byte_count", IGB_STAT(stats.gorc) }, - { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, - { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, - { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, - { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, - { "tx_smbus", IGB_STAT(stats.mgptc) }, - { "rx_smbus", IGB_STAT(stats.mgprc) }, - { "dropped_smbus", IGB_STAT(stats.mgpdc) }, + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) }; -#define IGB_QUEUE_STATS_LEN \ - (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \ - (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ - ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \ - (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) #define IGB_GLOBAL_STATS_LEN \ - sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) -#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) +#define IGB_TX_QUEUE_STATS_LEN \ + (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; -#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 status; if (hw->phy.media_type == e1000_media_type_copper) { @@ -150,17 +166,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->transceiver = XCVR_INTERNAL; - if (rd32(E1000_STATUS) & E1000_STATUS_LU) { + status = rd32(E1000_STATUS); - adapter->hw.mac.ops.get_speed_and_duplex(hw, - &adapter->link_speed, - &adapter->link_duplex); - ecmd->speed = adapter->link_speed; + if (status & E1000_STATUS_LU) { - /* unfortunately FULL_DUPLEX != DUPLEX_FULL - * and HALF_DUPLEX != DUPLEX_HALF */ + if ((status & E1000_STATUS_SPEED_1000) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; - if (adapter->link_duplex == FULL_DUPLEX) + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; @@ -251,8 +270,9 @@ static int igb_set_pauseparam(struct net_device *netdev, if (netif_running(adapter->netdev)) { igb_down(adapter); igb_up(adapter); - } else + } else { igb_reset(adapter); + } } else { if (pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = e1000_fc_full; @@ -276,17 +296,20 @@ static int igb_set_pauseparam(struct net_device *netdev, static u32 igb_get_rx_csum(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); - return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); + return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM); } static int igb_set_rx_csum(struct net_device *netdev, u32 data) { struct igb_adapter *adapter = netdev_priv(netdev); + int i; - if (data) - adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; - else - adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; + for (i = 0; i < adapter->num_rx_queues; i++) { + if (data) + adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM; + else + adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM; + } return 0; } @@ -302,7 +325,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data) if (data) { netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - if (adapter->hw.mac.type == e1000_82576) + if (adapter->hw.mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CSUM; } else { netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -496,19 +519,10 @@ static void igb_get_regs(struct net_device *netdev, regs_buff[119] = adapter->stats.scvpc; regs_buff[120] = adapter->stats.hrmpc; - /* These should probably be added to e1000_regs.h instead */ - #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4)) - #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) - #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) - #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) - #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) - #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) - #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) - for (i = 0; i < 4; i++) regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); for (i = 0; i < 4; i++) - regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); for (i = 0; i < 4; i++) regs_buff[129 + i] = rd32(E1000_RDBAL(i)); for (i = 0; i < 4; i++) @@ -733,17 +747,17 @@ static int igb_set_ringparam(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; int i, err = 0; - u32 new_rx_count, new_tx_count; + u16 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring_count) && @@ -788,7 +802,7 @@ static int igb_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_tx_queues; i++) { temp_ring[i].count = new_tx_count; - err = igb_setup_tx_resources(adapter, &temp_ring[i]); + err = igb_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; @@ -813,7 +827,7 @@ static int igb_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) { temp_ring[i].count = new_rx_count; - err = igb_setup_rx_resources(adapter, &temp_ring[i]); + err = igb_setup_rx_resources(&temp_ring[i]); if (err) { while (i) { i--; @@ -867,6 +881,49 @@ struct igb_reg_test { #define TABLE64_TEST_LO 5 #define TABLE64_TEST_HI 6 +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + /* 82576 reg test */ static struct igb_reg_test reg_test_82576[] = { { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -944,7 +1001,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, { struct e1000_hw *hw = &adapter->hw; u32 pat, val; - u32 _test[] = + static const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { wr32(reg, (_test[pat] & write)); @@ -957,6 +1014,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, return 1; } } + return 0; } @@ -974,6 +1032,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, *data = reg; return 1; } + return 0; } @@ -996,14 +1055,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) u32 value, before, after; u32 i, toggle; - toggle = 0x7FFFF3FF; - switch (adapter->hw.mac.type) { + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; case e1000_82576: test = reg_test_82576; + toggle = 0x7FFFF3FF; break; default: test = reg_test_82575; + toggle = 0x7FFFF3FF; break; } @@ -1081,8 +1144,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) *data = 0; /* Read and add up the contents of the EEPROM */ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) - < 0) { + if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { *data = 1; break; } @@ -1098,8 +1160,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) static irqreturn_t igb_test_intr(int irq, void *data) { - struct net_device *netdev = (struct net_device *) data; - struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_adapter *adapter = (struct igb_adapter *) data; struct e1000_hw *hw = &adapter->hw; adapter->test_icr |= rd32(E1000_ICR); @@ -1117,38 +1178,45 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) *data = 0; /* Hook up test interrupt handler just for this test */ - if (adapter->msix_entries) - /* NOTE: we don't test MSI-X interrupts here, yet */ - return 0; - - if (adapter->flags & IGB_FLAG_HAS_MSI) { + if (adapter->msix_entries) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = false; - if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { *data = 1; return -1; } - } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, - netdev->name, netdev)) { + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { shared_int = false; - } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, - netdev->name, netdev)) { + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { *data = 1; return -1; } dev_info(&adapter->pdev->dev, "testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + /* Disable all the interrupts */ - wr32(E1000_IMC, 0xFFFFFFFF); + wr32(E1000_IMC, ~0); msleep(10); /* Define all writable bits for ICS */ - switch(hw->mac.type) { + switch (hw->mac.type) { case e1000_82575: ics_mask = 0x37F47EDD; break; case e1000_82576: ics_mask = 0x77D4FBFD; break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; default: ics_mask = 0x7FFFFFFF; break; @@ -1232,190 +1300,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) msleep(10); /* Unhook test interrupt handler */ - free_irq(irq, netdev); + if (adapter->msix_entries) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); return *data; } static void igb_free_desc_rings(struct igb_adapter *adapter) { - struct igb_ring *tx_ring = &adapter->test_tx_ring; - struct igb_ring *rx_ring = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - int i; - - if (tx_ring->desc && tx_ring->buffer_info) { - for (i = 0; i < tx_ring->count; i++) { - struct igb_buffer *buf = &(tx_ring->buffer_info[i]); - if (buf->dma) - pci_unmap_single(pdev, buf->dma, buf->length, - PCI_DMA_TODEVICE); - if (buf->skb) - dev_kfree_skb(buf->skb); - } - } - - if (rx_ring->desc && rx_ring->buffer_info) { - for (i = 0; i < rx_ring->count; i++) { - struct igb_buffer *buf = &(rx_ring->buffer_info[i]); - if (buf->dma) - pci_unmap_single(pdev, buf->dma, - IGB_RXBUFFER_2048, - PCI_DMA_FROMDEVICE); - if (buf->skb) - dev_kfree_skb(buf->skb); - } - } - - if (tx_ring->desc) { - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, - tx_ring->dma); - tx_ring->desc = NULL; - } - if (rx_ring->desc) { - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, - rx_ring->dma); - rx_ring->desc = NULL; - } - - kfree(tx_ring->buffer_info); - tx_ring->buffer_info = NULL; - kfree(rx_ring->buffer_info); - rx_ring->buffer_info = NULL; - - return; + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); } static int igb_setup_desc_rings(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; struct igb_ring *tx_ring = &adapter->test_tx_ring; struct igb_ring *rx_ring = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - struct igb_buffer *buffer_info; - u32 rctl; - int i, ret_val; + struct e1000_hw *hw = &adapter->hw; + int ret_val; /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->pdev = adapter->pdev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; - if (!tx_ring->count) - tx_ring->count = IGB_DEFAULT_TXD; - - tx_ring->buffer_info = kcalloc(tx_ring->count, - sizeof(struct igb_buffer), - GFP_KERNEL); - if (!tx_ring->buffer_info) { + if (igb_setup_tx_resources(tx_ring)) { ret_val = 1; goto err_nomem; } - tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, - &tx_ring->dma); - if (!tx_ring->desc) { - ret_val = 2; - goto err_nomem; - } - tx_ring->next_to_use = tx_ring->next_to_clean = 0; - - wr32(E1000_TDBAL(0), - ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); - wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); - wr32(E1000_TDLEN(0), - tx_ring->count * sizeof(union e1000_adv_tx_desc)); - wr32(E1000_TDH(0), 0); - wr32(E1000_TDT(0), 0); - wr32(E1000_TCTL, - E1000_TCTL_PSP | E1000_TCTL_EN | - E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | - E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); - - for (i = 0; i < tx_ring->count; i++) { - union e1000_adv_tx_desc *tx_desc; - struct sk_buff *skb; - unsigned int size = 1024; - - tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); - skb = alloc_skb(size, GFP_KERNEL); - if (!skb) { - ret_val = 3; - goto err_nomem; - } - skb_put(skb, size); - buffer_info = &tx_ring->buffer_info[i]; - buffer_info->skb = skb; - buffer_info->length = skb->len; - buffer_info->dma = pci_map_single(pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); - tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->read.olinfo_status = cpu_to_le32(skb->len) << - E1000_ADVTXD_PAYLEN_SHIFT; - tx_desc->read.cmd_type_len = cpu_to_le32(skb->len); - tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP | - E1000_TXD_CMD_IFCS | - E1000_TXD_CMD_RS | - E1000_ADVTXD_DTYP_DATA | - E1000_ADVTXD_DCMD_DEXT); - } + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); /* Setup Rx descriptor ring and Rx buffers */ - - if (!rx_ring->count) - rx_ring->count = IGB_DEFAULT_RXD; - - rx_ring->buffer_info = kcalloc(rx_ring->count, - sizeof(struct igb_buffer), - GFP_KERNEL); - if (!rx_ring->buffer_info) { - ret_val = 4; + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->pdev = adapter->pdev; + rx_ring->netdev = adapter->netdev; + rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; goto err_nomem; } - rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); - rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, - &rx_ring->dma); - if (!rx_ring->desc) { - ret_val = 5; - goto err_nomem; - } - rx_ring->next_to_use = rx_ring->next_to_clean = 0; + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); - rctl = rd32(E1000_RCTL); - wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); - wr32(E1000_RDBAL(0), - ((u64) rx_ring->dma & 0xFFFFFFFF)); - wr32(E1000_RDBAH(0), - ((u64) rx_ring->dma >> 32)); - wr32(E1000_RDLEN(0), rx_ring->size); - wr32(E1000_RDH(0), 0); - wr32(E1000_RDT(0), 0); - rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); - wr32(E1000_RCTL, rctl); - wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF); - - for (i = 0; i < rx_ring->count; i++) { - union e1000_adv_rx_desc *rx_desc; - struct sk_buff *skb; - - buffer_info = &rx_ring->buffer_info[i]; - rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, - GFP_KERNEL); - if (!skb) { - ret_val = 6; - goto err_nomem; - } - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; - buffer_info->dma = pci_map_single(pdev, skb->data, - IGB_RXBUFFER_2048, - PCI_DMA_FROMDEVICE); - rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); - memset(skb->data, 0x00, skb->len); - } + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring)); return 0; @@ -1449,6 +1388,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); /* autoneg off */ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); } ctrl_reg = rd32(E1000_CTRL); @@ -1491,7 +1433,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 reg; - if (hw->phy.media_type == e1000_media_type_internal_serdes) { + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { reg = rd32(E1000_RCTL); reg |= E1000_RCTL_LBM_TCVR; wr32(E1000_RCTL, reg); @@ -1522,11 +1467,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) wr32(E1000_PCS_LCTL, reg); return 0; - } else if (hw->phy.media_type == e1000_media_type_copper) { - return igb_set_phy_loopback(adapter); } - return 7; + return igb_set_phy_loopback(adapter); } static void igb_loopback_cleanup(struct igb_adapter *adapter) @@ -1552,35 +1495,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); } static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { - frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) + frame_size /= 2; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size + 10) == 0xBE) && + (*(skb->data + frame_size + 12) == 0xAF)) { return 0; + } + } return 13; } +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_buffer *buffer_info; + int rx_ntc, tx_ntc, count = 0; + u32 staterr; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + /* check rx buffer */ + buffer_info = &rx_ring->buffer_info[rx_ntc]; + + /* unmap rx buffer, will be remapped by alloc_rx_buffers */ + pci_unmap_single(rx_ring->pdev, + buffer_info->dma, + rx_ring->rx_buffer_len, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + /* verify contents of skb */ + if (!igb_check_lbtest_frame(buffer_info->skb, size)) + count++; + + /* unmap buffer on tx side */ + buffer_info = &tx_ring->buffer_info[tx_ntc]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + + /* increment rx/tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers_adv(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + static int igb_run_loopback_test(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; struct igb_ring *tx_ring = &adapter->test_tx_ring; struct igb_ring *rx_ring = &adapter->test_rx_ring; - struct pci_dev *pdev = adapter->pdev; - int i, j, k, l, lc, good_cnt; - int ret_val = 0; - unsigned long time; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; - wr32(E1000_RDT(0), rx_ring->count - 1); + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; - /* Calculate the loop count based on the largest descriptor ring + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ @@ -1590,50 +1597,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) else lc = ((rx_ring->count / 64) * 2) + 1; - k = l = 0; for (j = 0; j <= lc; j++) { /* loop count loop */ - for (i = 0; i < 64; i++) { /* send the packets */ - igb_create_lbtest_frame(tx_ring->buffer_info[k].skb, - 1024); - pci_dma_sync_single_for_device(pdev, - tx_ring->buffer_info[k].dma, - tx_ring->buffer_info[k].length, - PCI_DMA_TODEVICE); - k++; - if (k == tx_ring->count) - k = 0; - } - wr32(E1000_TDT(0), k); - msleep(200); - time = jiffies; /* set the start time for the receive */ + /* reset count of good packets */ good_cnt = 0; - do { /* receive the sent packets */ - pci_dma_sync_single_for_cpu(pdev, - rx_ring->buffer_info[l].dma, - IGB_RXBUFFER_2048, - PCI_DMA_FROMDEVICE); - - ret_val = igb_check_lbtest_frame( - rx_ring->buffer_info[l].skb, 1024); - if (!ret_val) + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) good_cnt++; - l++; - if (l == rx_ring->count) - l = 0; - /* time + 20 msecs (200 msecs on 2.4) is more than - * enough time to complete the receives, if it's - * exceeded, break and error off - */ - } while (good_cnt < 64 && jiffies < (time + 20)); + } + if (good_cnt != 64) { - ret_val = 13; /* ret_val is the same as mis-compare */ + ret_val = 12; break; } - if (jiffies >= (time + 20)) { - ret_val = 14; /* error code for time out error */ + + /* allow 200 milliseconds for packets to go from tx to rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; break; } } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + return ret_val; } @@ -1686,8 +1679,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) if (hw->mac.autoneg) msleep(4000); - if (!(rd32(E1000_STATUS) & - E1000_STATUS_LU)) + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) *data = 1; } return *data; @@ -1869,7 +1861,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) adapter->wol |= E1000_WUFC_BC; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= E1000_WUFC_MAG; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; @@ -1882,12 +1873,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + unsigned long timeout; - if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) - data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); + timeout = data * 1000; + + /* + * msleep_interruptable only accepts unsigned int so we are limited + * in how long a duration we can wait + */ + if (!timeout || timeout > UINT_MAX) + timeout = UINT_MAX; igb_blink_led(hw); - msleep_interruptible(data * 1000); + msleep_interruptible(timeout); igb_led_off(hw); clear_bit(IGB_LED_ON, &adapter->led_status); @@ -1900,7 +1898,6 @@ static int igb_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int i; if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || @@ -1909,17 +1906,39 @@ static int igb_set_coalesce(struct net_device *netdev, (ec->rx_coalesce_usecs == 2)) return -EINVAL; + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { - adapter->itr_setting = ec->rx_coalesce_usecs; - adapter->itr = IGB_START_ITR; - } else { - adapter->itr_setting = ec->rx_coalesce_usecs << 2; - adapter->itr = adapter->itr_setting; - } + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; - for (i = 0; i < adapter->num_rx_queues; i++) - wr32(adapter->rx_ring[i].itr_register, adapter->itr); + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->rx_ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } return 0; } @@ -1929,15 +1948,21 @@ static int igb_get_coalesce(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); - if (adapter->itr_setting <= 3) - ec->rx_coalesce_usecs = adapter->itr_setting; + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; else - ec->rx_coalesce_usecs = adapter->itr_setting >> 2; + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } return 0; } - static int igb_nway_reset(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -1962,31 +1987,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; u64 *queue_stat; - int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); - int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); - int j; - int i; + int i, j, k; + char *p; igb_update_stats(adapter); + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; data[i] = (igb_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } for (j = 0; j < adapter->num_tx_queues; j++) { - int k; queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; - for (k = 0; k < stat_count_tx; k++) - data[i + k] = queue_stat[k]; - i += k; + for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) + data[i] = queue_stat[k]; } for (j = 0; j < adapter->num_rx_queues; j++) { - int k; queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; - for (k = 0; k < stat_count_rx; k++) - data[i + k] = queue_stat[k]; - i += k; + for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) + data[i] = queue_stat[k]; } } @@ -2007,11 +2033,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { + memcpy(p, igb_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < adapter->num_tx_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_restart", i); + p += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_rx_queues; i++) { sprintf(p, "rx_queue_%u_packets", i); @@ -2020,6 +2053,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_drops", i); p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_csum_err", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ break; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 714c3a4a44ef..16349ba68736 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -49,7 +49,7 @@ #endif #include "igb.h" -#define DRV_VERSION "1.3.16-k2" +#define DRV_VERSION "2.1.0-k2" char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = @@ -61,8 +61,14 @@ static const struct e1000_info *igb_info_tbl[] = { }; static struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, @@ -81,6 +87,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *); static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); void igb_update_stats(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); @@ -89,7 +96,6 @@ static int igb_open(struct net_device *); static int igb_close(struct net_device *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); -static void igb_setup_rctl(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); @@ -98,28 +104,22 @@ static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); -static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, - struct net_device *, - struct igb_ring *); -static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, - struct net_device *); +static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); static struct net_device_stats *igb_get_stats(struct net_device *); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); -static irqreturn_t igb_msix_rx(int irq, void *); -static irqreturn_t igb_msix_tx(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); #ifdef CONFIG_IGB_DCA -static void igb_update_rx_dca(struct igb_ring *); -static void igb_update_tx_dca(struct igb_ring *); +static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ -static bool igb_clean_tx_irq(struct igb_ring *); +static bool igb_clean_tx_irq(struct igb_q_vector *); static int igb_poll(struct napi_struct *, int); -static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); -static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); +static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -127,57 +127,13 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *); static void igb_vlan_rx_add_vid(struct net_device *, u16); static void igb_vlan_rx_kill_vid(struct net_device *, u16); static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); -static int igb_rcv_msg_from_vf(struct igb_adapter *, u32); -static inline void igb_set_rah_pool(struct e1000_hw *, int , int); static void igb_vmm_control(struct igb_adapter *); -static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); -static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn) -{ - u32 reg_data; - - reg_data = rd32(E1000_VMOLR(vfn)); - reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */ - E1000_VMOLR_ROPE | /* Accept packets matched in UTA */ - E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */ - E1000_VMOLR_AUPE | /* Accept untagged packets */ - E1000_VMOLR_STRVLAN; /* Strip vlan tags */ - wr32(E1000_VMOLR(vfn), reg_data); -} - -static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, - int vfn) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vmolr; - - /* if it isn't the PF check to see if VFs are enabled and - * increase the size to support vlan tags */ - if (vfn < adapter->vfs_allocated_count && - adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; - - vmolr = rd32(E1000_VMOLR(vfn)); - vmolr &= ~E1000_VMOLR_RLPML_MASK; - vmolr |= size | E1000_VMOLR_LPE; - wr32(E1000_VMOLR(vfn), vmolr); - - return 0; -} - -static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry) -{ - u32 reg_data; - - reg_data = rd32(E1000_RAH(entry)); - reg_data &= ~E1000_RAH_POOL_MASK; - reg_data |= E1000_RAH_POOL_1 << pool;; - wr32(E1000_RAH(entry), reg_data); -} - #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *, pm_message_t); static int igb_resume(struct pci_dev *); @@ -228,46 +184,12 @@ static struct pci_driver igb_driver = { .err_handler = &igb_err_handler }; -static int global_quad_port_a; /* global quad port a indication */ - MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /** - * Scale the NIC clock cycle by a large factor so that - * relatively small clock corrections can be added or - * substracted at each clock tick. The drawbacks of a - * large factor are a) that the clock register overflows - * more quickly (not such a big deal) and b) that the - * increment per tick has to fit into 24 bits. - * - * Note that - * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * - * IGB_TSYNC_SCALE - * TIMINCA += TIMINCA * adjustment [ppm] / 1e9 - * - * The base scale factor is intentionally a power of two - * so that the division in %struct timecounter can be done with - * a shift. - */ -#define IGB_TSYNC_SHIFT (19) -#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT) - -/** - * The duration of one clock cycle of the NIC. - * - * @todo This hard-coded value is part of the specification and might change - * in future hardware revisions. Add revision check. - */ -#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16 - -#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24) -# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA -#endif - -/** * igb_read_clock - read raw cycle counter (to be used by time counter) */ static cycle_t igb_read_clock(const struct cyclecounter *tc) @@ -275,11 +197,21 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc) struct igb_adapter *adapter = container_of(tc, struct igb_adapter, cycles); struct e1000_hw *hw = &adapter->hw; - u64 stamp; + u64 stamp = 0; + int shift = 0; - stamp = rd32(E1000_SYSTIML); - stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL; + /* + * The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we never + * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. + */ + if (hw->mac.type == e1000_82580) { + stamp = rd32(E1000_SYSTIMR) >> 8; + shift = IGB_82580_TSYNC_SHIFT; + } + stamp |= (u64)rd32(E1000_SYSTIML) << shift; + stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); return stamp; } @@ -320,17 +252,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter, #endif /** - * igb_desc_unused - calculate if we have unused descriptors - **/ -static int igb_desc_unused(struct igb_ring *ring) -{ - if (ring->next_to_clean > ring->next_to_use) - return ring->next_to_clean - ring->next_to_use - 1; - - return ring->count + ring->next_to_clean - ring->next_to_use - 1; -} - -/** * igb_init_module - Driver Registration Routine * * igb_init_module is the first routine called when the driver is @@ -344,12 +265,9 @@ static int __init igb_init_module(void) printk(KERN_INFO "%s\n", igb_copyright); - global_quad_port_a = 0; - #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); #endif - ret = pci_register_driver(&igb_driver); return ret; } @@ -382,8 +300,8 @@ module_exit(igb_exit_module); **/ static void igb_cache_ring_register(struct igb_adapter *adapter) { - int i; - unsigned int rbase_offset = adapter->vfs_allocated_count; + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; switch (adapter->hw.mac.type) { case e1000_82576: @@ -392,23 +310,37 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) * In order to avoid collision we start at the first free queue * and continue consuming queues in the same sequence */ - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i].reg_idx = rbase_offset + - Q_IDX_82576(i); - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i].reg_idx = rbase_offset + - Q_IDX_82576(i); - break; + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i].reg_idx = rbase_offset + + Q_IDX_82576(i); + for (; j < adapter->rss_queues; j++) + adapter->tx_ring[j].reg_idx = rbase_offset + + Q_IDX_82576(j); + } case e1000_82575: + case e1000_82580: default: - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i].reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i].reg_idx = i; + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j].reg_idx = rbase_offset + j; break; } } +static void igb_free_queues(struct igb_adapter *adapter) +{ + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + adapter->tx_ring = NULL; + adapter->rx_ring = NULL; + + adapter->num_rx_queues = 0; + adapter->num_tx_queues = 0; +} + /** * igb_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize @@ -423,59 +355,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter) adapter->tx_ring = kcalloc(adapter->num_tx_queues, sizeof(struct igb_ring), GFP_KERNEL); if (!adapter->tx_ring) - return -ENOMEM; + goto err; adapter->rx_ring = kcalloc(adapter->num_rx_queues, sizeof(struct igb_ring), GFP_KERNEL); - if (!adapter->rx_ring) { - kfree(adapter->tx_ring); - return -ENOMEM; - } - - adapter->rx_ring->buddy = adapter->tx_ring; + if (!adapter->rx_ring) + goto err; for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = &(adapter->tx_ring[i]); ring->count = adapter->tx_ring_count; - ring->adapter = adapter; ring->queue_index = i; + ring->pdev = adapter->pdev; + ring->netdev = adapter->netdev; + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + ring->flags = IGB_RING_FLAG_TX_CTX_IDX; } + for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = &(adapter->rx_ring[i]); ring->count = adapter->rx_ring_count; - ring->adapter = adapter; ring->queue_index = i; - ring->itr_register = E1000_ITR; - - /* set a default napi handler for each rx_ring */ - netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); + ring->pdev = adapter->pdev; + ring->netdev = adapter->netdev; + ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; } igb_cache_ring_register(adapter); - return 0; -} - -static void igb_free_queues(struct igb_adapter *adapter) -{ - int i; - for (i = 0; i < adapter->num_rx_queues; i++) - netif_napi_del(&adapter->rx_ring[i].napi); + return 0; - adapter->num_rx_queues = 0; - adapter->num_tx_queues = 0; +err: + igb_free_queues(adapter); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); + return -ENOMEM; } #define IGB_N0_QUEUE -1 -static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, - int tx_queue, int msix_vector) +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) { u32 msixbm = 0; + struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u32 ivar, index; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + + if (q_vector->rx_ring) + rx_queue = q_vector->rx_ring->reg_idx; + if (q_vector->tx_ring) + tx_queue = q_vector->tx_ring->reg_idx; switch (hw->mac.type) { case e1000_82575: @@ -483,16 +417,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, bitmask for the EICR/EIMS/EIMC registers. To assign one or more queues to a vector, we write the appropriate bits into the MSIXBM register for that vector. */ - if (rx_queue > IGB_N0_QUEUE) { + if (rx_queue > IGB_N0_QUEUE) msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; - adapter->rx_ring[rx_queue].eims_value = msixbm; - } - if (tx_queue > IGB_N0_QUEUE) { + if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; - adapter->tx_ring[tx_queue].eims_value = - E1000_EICR_TX_QUEUE0 << tx_queue; - } array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; break; case e1000_82576: /* 82576 uses a table-based method for assigning vectors. @@ -500,7 +430,40 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, a vector number along with a "valid" bit. Sadly, the layout of the table is somewhat counterintuitive. */ if (rx_queue > IGB_N0_QUEUE) { - index = (rx_queue >> 1) + adapter->vfs_allocated_count; + index = (rx_queue & 0x7); + ivar = array_rd32(E1000_IVAR0, index); + if (rx_queue < 8) { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } else { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } + array_wr32(E1000_IVAR0, index, ivar); + } + if (tx_queue > IGB_N0_QUEUE) { + index = (tx_queue & 0x7); + ivar = array_rd32(E1000_IVAR0, index); + if (tx_queue < 8) { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } else { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } + array_wr32(E1000_IVAR0, index, ivar); + } + q_vector->eims_value = 1 << msix_vector; + break; + case e1000_82580: + /* 82580 uses the same table-based approach as 82576 but has fewer + entries as a result we carry over for queues greater than 4. */ + if (rx_queue > IGB_N0_QUEUE) { + index = (rx_queue >> 1); ivar = array_rd32(E1000_IVAR0, index); if (rx_queue & 0x1) { /* vector goes into third byte of register */ @@ -511,11 +474,10 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } - adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; array_wr32(E1000_IVAR0, index, ivar); } if (tx_queue > IGB_N0_QUEUE) { - index = (tx_queue >> 1) + adapter->vfs_allocated_count; + index = (tx_queue >> 1); ivar = array_rd32(E1000_IVAR0, index); if (tx_queue & 0x1) { /* vector goes into high byte of register */ @@ -526,9 +488,9 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } - adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; array_wr32(E1000_IVAR0, index, ivar); } + q_vector->eims_value = 1 << msix_vector; break; default: BUG(); @@ -549,43 +511,10 @@ static void igb_configure_msix(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; adapter->eims_enable_mask = 0; - if (hw->mac.type == e1000_82576) - /* Turn on MSI-X capability first, or our settings - * won't stick. And it will take days to debug. */ - wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | - E1000_GPIE_PBA | E1000_GPIE_EIAME | - E1000_GPIE_NSICR); - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *tx_ring = &adapter->tx_ring[i]; - igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++); - adapter->eims_enable_mask |= tx_ring->eims_value; - if (tx_ring->itr_val) - writel(tx_ring->itr_val, - hw->hw_addr + tx_ring->itr_register); - else - writel(1, hw->hw_addr + tx_ring->itr_register); - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *rx_ring = &adapter->rx_ring[i]; - rx_ring->buddy = NULL; - igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); - adapter->eims_enable_mask |= rx_ring->eims_value; - if (rx_ring->itr_val) - writel(rx_ring->itr_val, - hw->hw_addr + rx_ring->itr_register); - else - writel(1, hw->hw_addr + rx_ring->itr_register); - } - /* set vector for other causes, i.e. link changes */ switch (hw->mac.type) { case e1000_82575: - array_wr32(E1000_MSIXBM(0), vector++, - E1000_EIMS_OTHER); - tmp = rd32(E1000_CTRL_EXT); /* enable MSI-X PBA support*/ tmp |= E1000_CTRL_EXT_PBA_CLR; @@ -595,22 +524,41 @@ static void igb_configure_msix(struct igb_adapter *adapter) tmp |= E1000_CTRL_EXT_IRCA; wr32(E1000_CTRL_EXT, tmp); - adapter->eims_enable_mask |= E1000_EIMS_OTHER; + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, + E1000_EIMS_OTHER); adapter->eims_other = E1000_EIMS_OTHER; break; case e1000_82576: + case e1000_82580: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = 1 << vector; tmp = (vector++ | E1000_IVAR_VALID) << 8; - wr32(E1000_IVAR_MISC, tmp); - adapter->eims_enable_mask = (1 << (vector)) - 1; - adapter->eims_other = 1 << (vector - 1); + wr32(E1000_IVAR_MISC, tmp); break; default: /* do nothing, since nothing else supports MSI-X */ break; } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + igb_assign_vector(q_vector, vector++); + adapter->eims_enable_mask |= q_vector->eims_value; + } + wrfl(); } @@ -623,43 +571,40 @@ static void igb_configure_msix(struct igb_adapter *adapter) static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; int i, err = 0, vector = 0; - vector = 0; - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *ring = &(adapter->tx_ring[i]); - sprintf(ring->name, "%s-tx-%d", netdev->name, i); - err = request_irq(adapter->msix_entries[vector].vector, - &igb_msix_tx, 0, ring->name, - &(adapter->tx_ring[i])); - if (err) - goto out; - ring->itr_register = E1000_EITR(0) + (vector << 2); - ring->itr_val = 976; /* ~4000 ints/sec */ - vector++; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *ring = &(adapter->rx_ring[i]); - if (strlen(netdev->name) < (IFNAMSIZ - 5)) - sprintf(ring->name, "%s-rx-%d", netdev->name, i); + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto out; + vector++; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + + if (q_vector->rx_ring && q_vector->tx_ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx_ring->queue_index); + else if (q_vector->tx_ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx_ring->queue_index); + else if (q_vector->rx_ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx_ring->queue_index); else - memcpy(ring->name, netdev->name, IFNAMSIZ); + sprintf(q_vector->name, "%s-unused", netdev->name); + err = request_irq(adapter->msix_entries[vector].vector, - &igb_msix_rx, 0, ring->name, - &(adapter->rx_ring[i])); + igb_msix_ring, 0, q_vector->name, + q_vector); if (err) goto out; - ring->itr_register = E1000_EITR(0) + (vector << 2); - ring->itr_val = adapter->itr; vector++; } - err = request_irq(adapter->msix_entries[vector].vector, - &igb_msix_other, 0, netdev->name, netdev); - if (err) - goto out; - igb_configure_msix(adapter); return 0; out: @@ -672,11 +617,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; - } else if (adapter->flags & IGB_FLAG_HAS_MSI) + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { pci_disable_msi(adapter->pdev); - return; + } +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx; + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } + adapter->num_q_vectors = 0; } +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_queues(adapter); + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} /** * igb_set_interrupt_capability - set MSI or MSI-X if supported @@ -690,11 +668,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) int numvecs, i; /* Number of supported queues. */ - /* Having more queues than CPUs doesn't make sense. */ - adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); - adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); + adapter->num_rx_queues = adapter->rss_queues; + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every rx queue */ + numvecs = adapter->num_rx_queues; + + /* if tx handler is seperate add 1 for every tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; - numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; + /* add 1 vector for link status interrupts */ + numvecs++; adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) @@ -728,8 +716,12 @@ msi_only: dev_info(&adapter->pdev->dev, "IOV Disabled\n"); } #endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; out: @@ -739,6 +731,143 @@ out: } /** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + struct igb_q_vector *q_vector; + struct e1000_hw *hw = &adapter->hw; + int v_idx; + + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0; + q_vector->itr_register = hw->hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); + adapter->q_vector[v_idx] = q_vector; + } + return 0; + +err_out: + while (v_idx) { + v_idx--; + q_vector = adapter->q_vector[v_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[v_idx] = NULL; + } + return -ENOMEM; +} + +static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, + int ring_idx, int v_idx) +{ + struct igb_q_vector *q_vector; + + q_vector = adapter->q_vector[v_idx]; + q_vector->rx_ring = &adapter->rx_ring[ring_idx]; + q_vector->rx_ring->q_vector = q_vector; + q_vector->itr_val = adapter->rx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; +} + +static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, + int ring_idx, int v_idx) +{ + struct igb_q_vector *q_vector; + + q_vector = adapter->q_vector[v_idx]; + q_vector->tx_ring = &adapter->tx_ring[ring_idx]; + q_vector->tx_ring->q_vector = q_vector; + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; +} + +/** + * igb_map_ring_to_vector - maps allocated queues to vectors + * + * This function maps the recently allocated queues to vectors. + **/ +static int igb_map_ring_to_vector(struct igb_adapter *adapter) +{ + int i; + int v_idx = 0; + + if ((adapter->num_q_vectors < adapter->num_rx_queues) || + (adapter->num_q_vectors < adapter->num_tx_queues)) + return -ENOMEM; + + if (adapter->num_q_vectors >= + (adapter->num_rx_queues + adapter->num_tx_queues)) { + for (i = 0; i < adapter->num_rx_queues; i++) + igb_map_rx_ring_to_vector(adapter, i, v_idx++); + for (i = 0; i < adapter->num_tx_queues; i++) + igb_map_tx_ring_to_vector(adapter, i, v_idx++); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) { + if (i < adapter->num_tx_queues) + igb_map_tx_ring_to_vector(adapter, i, v_idx); + igb_map_rx_ring_to_vector(adapter, i, v_idx++); + } + for (; i < adapter->num_tx_queues; i++) + igb_map_tx_ring_to_vector(adapter, i, v_idx++); + } + return 0; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + err = igb_alloc_queues(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + err = igb_map_ring_to_vector(adapter); + if (err) { + dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); + goto err_map_queues; + } + + + return 0; +err_map_queues: + igb_free_queues(adapter); +err_alloc_queues: + igb_free_q_vectors(adapter); +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** * igb_request_irq - initialize interrupts * * Attempts to configure interrupts using the best available @@ -747,6 +876,7 @@ out: static int igb_request_irq(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; int err = 0; @@ -755,19 +885,38 @@ static int igb_request_irq(struct igb_adapter *adapter) if (!err) goto request_done; /* fall back to MSI */ - igb_reset_interrupt_capability(adapter); + igb_clear_interrupt_scheme(adapter); if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); + adapter->num_tx_queues = 1; adapter->num_rx_queues = 1; - igb_alloc_queues(adapter); + adapter->num_q_vectors = 1; + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, + "Unable to allocate memory for vectors\n"); + goto request_done; + } + err = igb_alloc_queues(adapter); + if (err) { + dev_err(&pdev->dev, + "Unable to allocate memory for queues\n"); + igb_free_q_vectors(adapter); + goto request_done; + } + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); } else { switch (hw->mac.type) { case e1000_82575: wr32(E1000_MSIXBM(0), - (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); + (E1000_EICR_RX_QUEUE0 | + E1000_EICR_TX_QUEUE0 | + E1000_EIMS_OTHER)); break; + case e1000_82580: case e1000_82576: wr32(E1000_IVAR0, E1000_IVAR_VALID); break; @@ -777,17 +926,18 @@ static int igb_request_irq(struct igb_adapter *adapter) } if (adapter->flags & IGB_FLAG_HAS_MSI) { - err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, - netdev->name, netdev); + err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); if (!err) goto request_done; + /* fall back to legacy interrupts */ igb_reset_interrupt_capability(adapter); adapter->flags &= ~IGB_FLAG_HAS_MSI; } - err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, - netdev->name, netdev); + err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); if (err) dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", @@ -799,23 +949,19 @@ request_done: static void igb_free_irq(struct igb_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - if (adapter->msix_entries) { int vector = 0, i; - for (i = 0; i < adapter->num_tx_queues; i++) - free_irq(adapter->msix_entries[vector++].vector, - &(adapter->tx_ring[i])); - for (i = 0; i < adapter->num_rx_queues; i++) - free_irq(adapter->msix_entries[vector++].vector, - &(adapter->rx_ring[i])); + free_irq(adapter->msix_entries[vector++].vector, adapter); - free_irq(adapter->msix_entries[vector++].vector, netdev); - return; + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + free_irq(adapter->msix_entries[vector++].vector, + q_vector); + } + } else { + free_irq(adapter->pdev->irq, adapter); } - - free_irq(adapter->pdev->irq, netdev); } /** @@ -826,6 +972,11 @@ static void igb_irq_disable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + /* + * we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ if (adapter->msix_entries) { u32 regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); @@ -849,41 +1000,47 @@ static void igb_irq_enable(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; u32 regval = rd32(E1000_EIAC); wr32(E1000_EIAC, regval | adapter->eims_enable_mask); regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval | adapter->eims_enable_mask); wr32(E1000_EIMS, adapter->eims_enable_mask); - if (adapter->vfs_allocated_count) + if (adapter->vfs_allocated_count) { wr32(E1000_MBVFIMR, 0xFF); - wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | - E1000_IMS_DOUTSYNC)); + ims |= E1000_IMS_VMMB; + } + if (adapter->hw.mac.type == e1000_82580) + ims |= E1000_IMS_DRSTA; + + wr32(E1000_IMS, ims); } else { - wr32(E1000_IMS, IMS_ENABLE_MASK); - wr32(E1000_IAM, IMS_ENABLE_MASK); + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); } } static void igb_update_mng_vlan(struct igb_adapter *adapter) { - struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; - if (adapter->vlgrp) { - if (!vlan_group_get_device(adapter->vlgrp, vid)) { - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { - igb_vlan_rx_add_vid(netdev, vid); - adapter->mng_vlan_id = vid; - } else - adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; - if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && - (vid != old_vid) && - !vlan_group_get_device(adapter->vlgrp, old_vid)) - igb_vlan_rx_kill_vid(netdev, old_vid); - } else - adapter->mng_vlan_id = vid; + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !vlan_group_get_device(adapter->vlgrp, old_vid)) { + /* remove VID from filter table */ + igb_vfta_set(hw, old_vid, false); } } @@ -907,7 +1064,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter) ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } - /** * igb_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure @@ -942,8 +1098,11 @@ static void igb_configure(struct igb_adapter *adapter) igb_restore_vlan(adapter); - igb_configure_tx(adapter); + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); igb_setup_rctl(adapter); + + igb_configure_tx(adapter); igb_configure_rx(adapter); igb_rx_fifo_flush_82575(&adapter->hw); @@ -965,7 +1124,6 @@ static void igb_configure(struct igb_adapter *adapter) * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure **/ - int igb_up(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -976,30 +1134,37 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_rx_queues; i++) - napi_enable(&adapter->rx_ring[i].napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + napi_enable(&q_vector->napi); + } if (adapter->msix_entries) igb_configure_msix(adapter); - igb_vmm_control(adapter); - igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); - igb_set_vmolr(hw, adapter->vfs_allocated_count); - /* Clear any pending interrupts. */ rd32(E1000_ICR); igb_irq_enable(adapter); + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + netif_tx_start_all_queues(adapter->netdev); - /* Fire a link change interrupt to start the watchdog. */ - wr32(E1000_ICS, E1000_ICS_LSC); + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + return 0; } void igb_down(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; u32 tctl, rctl; int i; @@ -1022,8 +1187,10 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); - for (i = 0; i < adapter->num_rx_queues; i++) - napi_disable(&adapter->rx_ring[i].napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + napi_disable(&q_vector->napi); + } igb_irq_disable(adapter); @@ -1062,6 +1229,7 @@ void igb_reinit_locked(struct igb_adapter *adapter) void igb_reset(struct igb_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; @@ -1072,8 +1240,13 @@ void igb_reset(struct igb_adapter *adapter) * To take effect CTRL.RST is required. */ switch (mac->type) { + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; case e1000_82576: - pba = E1000_PBA_64K; + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82575: default: @@ -1148,10 +1321,10 @@ void igb_reset(struct igb_adapter *adapter) if (adapter->vfs_allocated_count) { int i; for (i = 0 ; i < adapter->vfs_allocated_count; i++) - adapter->vf_data[i].clear_to_send = false; + adapter->vf_data[i].flags = 0; /* ping all the active vfs to let them know we are going down */ - igb_ping_all_vfs(adapter); + igb_ping_all_vfs(adapter); /* disable transmits and receives */ wr32(E1000_VFRE, 0); @@ -1159,23 +1332,28 @@ void igb_reset(struct igb_adapter *adapter) } /* Allow time for pending master requests to run */ - adapter->hw.mac.ops.reset_hw(&adapter->hw); + hw->mac.ops.reset_hw(hw); wr32(E1000_WUC, 0); - if (adapter->hw.mac.ops.init_hw(&adapter->hw)) - dev_err(&adapter->pdev->dev, "Hardware Error\n"); + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + wr32(E1000_PCIEMISC, + reg & ~E1000_PCIEMISC_LX_DECISION); + } igb_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); - igb_reset_adaptive(&adapter->hw); - igb_get_phy_info(&adapter->hw); + igb_reset_adaptive(hw); + igb_get_phy_info(hw); } static const struct net_device_ops igb_netdev_ops = { - .ndo_open = igb_open, + .ndo_open = igb_open, .ndo_stop = igb_close, .ndo_start_xmit = igb_xmit_frame_adv, .ndo_get_stats = igb_get_stats, @@ -1211,10 +1389,11 @@ static int __devinit igb_probe(struct pci_dev *pdev, struct net_device *netdev; struct igb_adapter *adapter; struct e1000_hw *hw; + u16 eeprom_data = 0; + static int global_quad_port_a; /* global quad port a indication */ const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; unsigned long mmio_start, mmio_len; int err, pci_using_dac; - u16 eeprom_data = 0; u16 eeprom_apme_mask = IGB_EEPROM_APME; u32 part_num; @@ -1291,8 +1470,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; - /* setup the private structure */ - hw->back = adapter; /* Copy the default MAC, PHY and NVM function pointers */ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); @@ -1302,46 +1479,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_sw_init; -#ifdef CONFIG_PCI_IOV - /* since iov functionality isn't critical to base device function we - * can accept failure. If it fails we don't allow iov to be enabled */ - if (hw->mac.type == e1000_82576) { - /* 82576 supports a maximum of 7 VFs in addition to the PF */ - unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs; - int i; - unsigned char mac_addr[ETH_ALEN]; - - if (num_vfs) { - adapter->vf_data = kcalloc(num_vfs, - sizeof(struct vf_data_storage), - GFP_KERNEL); - if (!adapter->vf_data) { - dev_err(&pdev->dev, - "Could not allocate VF private data - " - "IOV enable failed\n"); - } else { - err = pci_enable_sriov(pdev, num_vfs); - if (!err) { - adapter->vfs_allocated_count = num_vfs; - dev_info(&pdev->dev, - "%d vfs allocated\n", - num_vfs); - for (i = 0; - i < adapter->vfs_allocated_count; - i++) { - random_ether_addr(mac_addr); - igb_set_vf_mac(adapter, i, - mac_addr); - } - } else { - kfree(adapter->vf_data); - adapter->vf_data = NULL; - } - } - } - } - -#endif /* setup the private structure */ err = igb_sw_init(adapter); if (err) @@ -1349,16 +1486,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, igb_get_bus_info_pcie(hw); - /* set flags */ - switch (hw->mac.type) { - case e1000_82575: - adapter->flags |= IGB_FLAG_NEED_CTX_IDX; - break; - case e1000_82576: - default: - break; - } - hw->phy.autoneg_wait_to_complete = false; hw->mac.adaptive_ifs = true; @@ -1382,7 +1509,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO6; - netdev->features |= NETIF_F_GRO; netdev->vlan_features |= NETIF_F_TSO; @@ -1394,10 +1520,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - if (adapter->hw.mac.type == e1000_82576) + if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CSUM; - adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); /* before reading the NVM, reset the controller to put the device in a * known good starting state */ @@ -1439,9 +1565,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, hw->fc.requested_mode = e1000_fc_default; hw->fc.current_mode = e1000_fc_default; - adapter->itr_setting = IGB_DEFAULT_ITR; - adapter->itr = IGB_START_ITR; - igb_validate_mdi_setting(hw); /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, @@ -1450,6 +1573,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (hw->bus.func == 0) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + else if (hw->mac.type == e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); else if (hw->bus.func == 1) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); @@ -1508,66 +1635,14 @@ static int __devinit igb_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); } -#endif - - /* - * Initialize hardware timer: we keep it running just in case - * that some program needs it later on. - */ - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - adapter->cycles.shift = IGB_TSYNC_SHIFT; - wr32(E1000_TIMINCA, - (1<<24) | - IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE); -#if 0 - /* - * Avoid rollover while we initialize by resetting the time counter. - */ - wr32(E1000_SYSTIML, 0x00000000); - wr32(E1000_SYSTIMH, 0x00000000); -#else - /* - * Set registers so that rollover occurs soon to test this. - */ - wr32(E1000_SYSTIML, 0x00000000); - wr32(E1000_SYSTIMH, 0xFF800000); -#endif - wrfl(); - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - -#ifdef DEBUG - { - char buffer[160]; - printk(KERN_DEBUG - "igb: %s: hw %p initialized timer\n", - igb_get_time_str(adapter, buffer), - &adapter->hw); - } #endif - dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", netdev->name, - ((hw->bus.speed == e1000_bus_speed_2500) - ? "2.5Gb/s" : "unknown"), + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + "unknown"), ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : @@ -1594,15 +1669,14 @@ err_eeprom: if (hw->flash_address) iounmap(hw->flash_address); - - igb_free_queues(adapter); err_sw_init: + igb_clear_interrupt_scheme(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -1647,12 +1721,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) unregister_netdev(netdev); - if (!igb_check_reset_block(&adapter->hw)) - igb_reset_phy(&adapter->hw); - - igb_reset_interrupt_capability(adapter); + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); - igb_free_queues(adapter); + igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV /* reclaim resources allocated to VFs */ @@ -1668,11 +1740,12 @@ static void __devexit igb_remove(struct pci_dev *pdev) dev_info(&pdev->dev, "IOV Disabled\n"); } #endif + iounmap(hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); @@ -1682,6 +1755,160 @@ static void __devexit igb_remove(struct pci_dev *pdev) } /** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void __devinit igb_probe_vfs(struct igb_adapter * adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + + if (adapter->vfs_allocated_count > 7) + adapter->vfs_allocated_count = 7; + + if (adapter->vfs_allocated_count) { + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), + GFP_KERNEL); + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + dev_err(&pdev->dev, "Unable to allocate memory for VF " + "Data Storage\n"); + } + } + + if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) { + kfree(adapter->vf_data); + adapter->vf_data = NULL; +#endif /* CONFIG_PCI_IOV */ + adapter->vfs_allocated_count = 0; +#ifdef CONFIG_PCI_IOV + } else { + unsigned char mac_addr[ETH_ALEN]; + int i; + dev_info(&pdev->dev, "%d vfs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) { + random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, i, mac_addr); + } + } +#endif /* CONFIG_PCI_IOV */ +} + + +/** + * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp + * @adapter: board private structure to initialize + * + * igb_init_hw_timer initializes the function pointer and values for the hw + * timer found in hardware. + **/ +static void igb_init_hw_timer(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case e1000_82580: + memset(&adapter->cycles, 0, sizeof(adapter->cycles)); + adapter->cycles.read = igb_read_clock; + adapter->cycles.mask = CLOCKSOURCE_MASK(64); + adapter->cycles.mult = 1; + /* + * The 82580 timesync updates the system timer every 8ns by 8ns + * and the value cannot be shifted. Instead we need to shift + * the registers to generate a 64bit timer value. As a result + * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by + * 24 in order to generate a larger value for synchronization. + */ + adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; + /* disable system timer temporarily by setting bit 31 */ + wr32(E1000_TSAUXC, 0x80000000); + wrfl(); + + /* Set registers so that rollover occurs soon to test this. */ + wr32(E1000_SYSTIMR, 0x00000000); + wr32(E1000_SYSTIML, 0x80000000); + wr32(E1000_SYSTIMH, 0x000000FF); + wrfl(); + + /* enable system timer by clearing bit 31 */ + wr32(E1000_TSAUXC, 0x0); + wrfl(); + + timecounter_init(&adapter->clock, + &adapter->cycles, + ktime_to_ns(ktime_get_real())); + /* + * Synchronize our NIC clock against system wall clock. NIC + * time stamp reading requires ~3us per sample, each sample + * was pretty stable even under load => only require 10 + * samples for each offset comparison. + */ + memset(&adapter->compare, 0, sizeof(adapter->compare)); + adapter->compare.source = &adapter->clock; + adapter->compare.target = ktime_get_real; + adapter->compare.num_samples = 10; + timecompare_update(&adapter->compare, 0); + break; + case e1000_82576: + /* + * Initialize hardware timer: we keep it running just in case + * that some program needs it later on. + */ + memset(&adapter->cycles, 0, sizeof(adapter->cycles)); + adapter->cycles.read = igb_read_clock; + adapter->cycles.mask = CLOCKSOURCE_MASK(64); + adapter->cycles.mult = 1; + /** + * Scale the NIC clock cycle by a large factor so that + * relatively small clock corrections can be added or + * substracted at each clock tick. The drawbacks of a large + * factor are a) that the clock register overflows more quickly + * (not such a big deal) and b) that the increment per tick has + * to fit into 24 bits. As a result we need to use a shift of + * 19 so we can fit a value of 16 into the TIMINCA register. + */ + adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; + wr32(E1000_TIMINCA, + (1 << E1000_TIMINCA_16NS_SHIFT) | + (16 << IGB_82576_TSYNC_SHIFT)); + + /* Set registers so that rollover occurs soon to test this. */ + wr32(E1000_SYSTIML, 0x00000000); + wr32(E1000_SYSTIMH, 0xFF800000); + wrfl(); + + timecounter_init(&adapter->clock, + &adapter->cycles, + ktime_to_ns(ktime_get_real())); + /* + * Synchronize our NIC clock against system wall clock. NIC + * time stamp reading requires ~3us per sample, each sample + * was pretty stable even under load => only require 10 + * samples for each offset comparison. + */ + memset(&adapter->compare, 0, sizeof(adapter->compare)); + adapter->compare.source = &adapter->clock; + adapter->compare.target = ktime_get_real; + adapter->compare.num_samples = 10; + timecompare_update(&adapter->compare, 0); + break; + case e1000_82575: + /* 82575 does not support timesync */ + default: + break; + } + +} + +/** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize * @@ -1699,20 +1926,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->rx_ring_count = IGB_DEFAULT_RXD; - adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - adapter->rx_ps_hdr_size = 0; /* disable packet split */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - /* This call may decrease the number of queues depending on - * interrupt mode. */ - igb_set_interrupt_capability(adapter); +#ifdef CONFIG_PCI_IOV + if (hw->mac.type == e1000_82576) + adapter->vfs_allocated_count = max_vfs; - if (igb_alloc_queues(adapter)) { +#endif /* CONFIG_PCI_IOV */ + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); + + /* + * if rss_queues > 4 or vfs are going to be allocated with rss_queues + * then we should combine the queues into a queue pair in order to + * conserve interrupts due to limited supply + */ + if ((adapter->rss_queues > 4) || + ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } + igb_init_hw_timer(adapter); + igb_probe_vfs(adapter); + /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); @@ -1757,21 +2001,12 @@ static int igb_open(struct net_device *netdev) /* e1000_power_up_phy(adapter); */ - adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) - igb_update_mng_vlan(adapter); - /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. */ igb_configure(adapter); - igb_vmm_control(adapter); - igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); - igb_set_vmolr(hw, adapter->vfs_allocated_count); - err = igb_request_irq(adapter); if (err) goto err_req_irq; @@ -1779,18 +2014,28 @@ static int igb_open(struct net_device *netdev) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_rx_queues; i++) - napi_enable(&adapter->rx_ring[i].napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + napi_enable(&q_vector->napi); + } /* Clear any pending interrupts. */ rd32(E1000_ICR); igb_irq_enable(adapter); + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + netif_tx_start_all_queues(netdev); - /* Fire a link status change interrupt to start the watchdog. */ - wr32(E1000_ICS, E1000_ICS_LSC); + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); return 0; @@ -1829,28 +2074,18 @@ static int igb_close(struct net_device *netdev) igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); - /* kill manageability vlan ID if supported, but not if a vlan with - * the same ID is registered on the host OS (let 8021q kill it) */ - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && - !(adapter->vlgrp && - vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) - igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - return 0; } /** * igb_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ -int igb_setup_tx_resources(struct igb_adapter *adapter, - struct igb_ring *tx_ring) +int igb_setup_tx_resources(struct igb_ring *tx_ring) { - struct pci_dev *pdev = adapter->pdev; + struct pci_dev *pdev = tx_ring->pdev; int size; size = sizeof(struct igb_buffer) * tx_ring->count; @@ -1863,20 +2098,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter, tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, + tx_ring->desc = pci_alloc_consistent(pdev, + tx_ring->size, &tx_ring->dma); if (!tx_ring->desc) goto err; - tx_ring->adapter = adapter; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->buffer_info); - dev_err(&adapter->pdev->dev, + dev_err(&pdev->dev, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -1890,13 +2125,13 @@ err: **/ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; int i, err = 0; - int r_idx; for (i = 0; i < adapter->num_tx_queues; i++) { - err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); + err = igb_setup_tx_resources(&adapter->tx_ring[i]); if (err) { - dev_err(&adapter->pdev->dev, + dev_err(&pdev->dev, "Allocation for Tx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_tx_resources(&adapter->tx_ring[i]); @@ -1904,57 +2139,24 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) } } - for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { - r_idx = i % adapter->num_tx_queues; + for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { + int r_idx = i % adapter->num_tx_queues; adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; } return err; } /** - * igb_configure_tx - Configure transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure **/ -static void igb_configure_tx(struct igb_adapter *adapter) +void igb_setup_tctl(struct igb_adapter *adapter) { - u64 tdba; struct e1000_hw *hw = &adapter->hw; u32 tctl; - u32 txdctl, txctrl; - int i, j; - - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *ring = &adapter->tx_ring[i]; - j = ring->reg_idx; - wr32(E1000_TDLEN(j), - ring->count * sizeof(union e1000_adv_tx_desc)); - tdba = ring->dma; - wr32(E1000_TDBAL(j), - tdba & 0x00000000ffffffffULL); - wr32(E1000_TDBAH(j), tdba >> 32); - - ring->head = E1000_TDH(j); - ring->tail = E1000_TDT(j); - writel(0, hw->hw_addr + ring->tail); - writel(0, hw->hw_addr + ring->head); - txdctl = rd32(E1000_TXDCTL(j)); - txdctl |= E1000_TXDCTL_QUEUE_ENABLE; - wr32(E1000_TXDCTL(j), txdctl); - - /* Turn off Relaxed Ordering on head write-backs. The - * writebacks MUST be delivered in order or it will - * completely screw up our bookeeping. - */ - txctrl = rd32(E1000_DCA_TXCTRL(j)); - txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; - wr32(E1000_DCA_TXCTRL(j), txctrl); - } - /* disable queue 0 to prevent tail bump w/o re-configuration */ - if (adapter->vfs_allocated_count) - wr32(E1000_TXDCTL(0), 0); + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); /* Program the Transmit Control Register */ tctl = rd32(E1000_TCTL); @@ -1964,9 +2166,6 @@ static void igb_configure_tx(struct igb_adapter *adapter) igb_config_collision_dist(hw); - /* Setup Transmit Descriptor Settings for eop descriptor */ - adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS; - /* Enable transmits */ tctl |= E1000_TCTL_EN; @@ -1974,16 +2173,69 @@ static void igb_configure_tx(struct igb_adapter *adapter) } /** - * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + /* disable the queue */ + txdctl = rd32(E1000_TXDCTL(reg_idx)); + wr32(E1000_TXDCTL(reg_idx), + txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + wrfl(); + mdelay(10); + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->head = hw->hw_addr + E1000_TDH(reg_idx); + ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + writel(0, ring->head); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ -int igb_setup_rx_resources(struct igb_adapter *adapter, - struct igb_ring *rx_ring) +int igb_setup_rx_resources(struct igb_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; + struct pci_dev *pdev = rx_ring->pdev; int size, desc_len; size = sizeof(struct igb_buffer) * rx_ring->count; @@ -2007,13 +2259,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - rx_ring->adapter = adapter; - return 0; err: vfree(rx_ring->buffer_info); - dev_err(&adapter->pdev->dev, "Unable to allocate memory for " + rx_ring->buffer_info = NULL; + dev_err(&pdev->dev, "Unable to allocate memory for " "the receive descriptor ring\n"); return -ENOMEM; } @@ -2027,12 +2278,13 @@ err: **/ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); + err = igb_setup_rx_resources(&adapter->rx_ring[i]); if (err) { - dev_err(&adapter->pdev->dev, + dev_err(&pdev->dev, "Allocation for Rx Queue %u failed\n", i); for (i--; i >= 0; i--) igb_free_rx_resources(&adapter->rx_ring[i]); @@ -2044,15 +2296,122 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) } /** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues, shift = 0, shift2 = 0; + union e1000_reta { + u32 dword; + u8 bytes[4]; + } reta; + static const u8 rsshash[40] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, + 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, + 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, + 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; + + /* Fill out hash function seeds */ + for (j = 0; j < 10; j++) { + u32 rsskey = rsshash[(j * 4)]; + rsskey |= rsshash[(j * 4) + 1] << 8; + rsskey |= rsshash[(j * 4) + 2] << 16; + rsskey |= rsshash[(j * 4) + 3] << 24; + array_wr32(E1000_RSSRK(0), j, rsskey); + } + + num_rx_queues = adapter->rss_queues; + + if (adapter->vfs_allocated_count) { + /* 82575 and 82576 supports 2 RSS queues for VMDq */ + switch (hw->mac.type) { + case e1000_82580: + num_rx_queues = 1; + shift = 0; + break; + case e1000_82576: + shift = 3; + num_rx_queues = 2; + break; + case e1000_82575: + shift = 2; + shift2 = 6; + default: + break; + } + } else { + if (hw->mac.type == e1000_82575) + shift = 6; + } + + for (j = 0; j < (32 * 4); j++) { + reta.bytes[j & 3] = (j % num_rx_queues) << shift; + if (shift2) + reta.bytes[j & 3] |= num_rx_queues << shift2; + if ((j & 3) == 3) + wr32(E1000_RETA(j >> 2), reta.dword); + } + + /* + * Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + else + mrqc = E1000_MRQC_ENABLE_VMDQ; + } else { + mrqc = E1000_MRQC_ENABLE_RSS_4Q; + } + igb_vmm_control(adapter); + + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP); + mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP); + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + wr32(E1000_MRQC, mrqc); +} + +/** * igb_setup_rctl - configure the receive control registers * @adapter: Board private structure **/ -static void igb_setup_rctl(struct igb_adapter *adapter) +void igb_setup_rctl(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl; - u32 srrctl = 0; - int i; rctl = rd32(E1000_RCTL); @@ -2069,75 +2428,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter) */ rctl |= E1000_RCTL_SECRC; - /* - * disable store bad packets and clear size bits. - */ + /* disable store bad packets and clear size bits. */ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - /* enable LPE when to prevent packets larger than max_frame_size */ - rctl |= E1000_RCTL_LPE; - - /* Setup buffer sizes */ - switch (adapter->rx_buffer_len) { - case IGB_RXBUFFER_256: - rctl |= E1000_RCTL_SZ_256; - break; - case IGB_RXBUFFER_512: - rctl |= E1000_RCTL_SZ_512; - break; - default: - srrctl = ALIGN(adapter->rx_buffer_len, 1024) - >> E1000_SRRCTL_BSIZEPKT_SHIFT; - break; - } + /* enable LPE to prevent packets larger than max_frame_size */ + rctl |= E1000_RCTL_LPE; - /* 82575 and greater support packet-split where the protocol - * header is placed in skb->data and the packet data is - * placed in pages hanging off of skb_shinfo(skb)->nr_frags. - * In the case of a non-split, skb->data is linearly filled, - * followed by the page buffers. Therefore, skb->data is - * sized to hold the largest protocol header. - */ - /* allocations using alloc_page take too long for regular MTU - * so only enable packet split for jumbo frames */ - if (adapter->netdev->mtu > ETH_DATA_LEN) { - adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; - srrctl |= adapter->rx_ps_hdr_size << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else { - adapter->rx_ps_hdr_size = 0; - srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; - } + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); /* Attention!!! For SR-IOV PF driver operations you must enable * queue drop for all VF and PF queues to prevent head of line blocking * if an un-trusted VF does not provide descriptors to hardware. */ if (adapter->vfs_allocated_count) { - u32 vmolr; - /* set all queue drop enable bits */ wr32(E1000_QDE, ALL_QUEUES); - srrctl |= E1000_SRRCTL_DROP_EN; + } - /* disable queue 0 to prevent tail write w/o re-config */ - wr32(E1000_RXDCTL(0), 0); + wr32(E1000_RCTL, rctl); +} - vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); - if (rctl & E1000_RCTL_LPE) - vmolr |= E1000_VMOLR_LPE; - if (adapter->num_rx_queues > 1) - vmolr |= E1000_VMOLR_RSSE; - wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); - } +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; - for (i = 0; i < adapter->num_rx_queues; i++) { - int j = adapter->rx_ring[i].reg_idx; - wr32(E1000_SRRCTL(j), srrctl); - } + /* if it isn't the PF check to see if VFs are enabled and + * increase the size to support vlan tags */ + if (vfn < adapter->vfs_allocated_count && + adapter->vf_data[vfn].vlans_enabled) + size += VLAN_TAG_SIZE; - wr32(E1000_RCTL, rctl); + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; } /** @@ -2159,33 +2488,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter) * size and set the VMOLR RLPML to the size we need */ if (pf_id) { igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; + max_frame_size = MAX_JUMBO_FRAME_SIZE; } wr32(E1000_RLPML, max_frame_size); } +static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* + * This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */ + E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* + * for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + /** - * igb_configure_vt_default_pool - Configure VT default pool + * igb_configure_rx_ring - Configure a receive ring after Reset * @adapter: board private structure + * @ring: receive ring to be configured * - * Configure the default pool + * Configure the Rx unit of the MAC after a reset. **/ -static void igb_configure_vt_default_pool(struct igb_adapter *adapter) +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; - u16 pf_id = adapter->vfs_allocated_count; - u32 vtctl; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 srrctl, rxdctl; + + /* disable the queue */ + rxdctl = rd32(E1000_RXDCTL(reg_idx)); + wr32(E1000_RXDCTL(reg_idx), + rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->head = hw->hw_addr + E1000_RDH(reg_idx); + ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + writel(0, ring->head); + writel(0, ring->tail); + + /* set descriptor configuration */ + if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { + srrctl = ALIGN(ring->rx_buffer_len, 64) << + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; +#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 + srrctl |= IGB_RXBUFFER_16384 >> + E1000_SRRCTL_BSIZEPKT_SHIFT; +#else + srrctl |= (PAGE_SIZE / 2) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; +#endif + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else { + srrctl = ALIGN(ring->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + } - /* not in sr-iov mode - do nothing */ - if (!pf_id) - return; + wr32(E1000_SRRCTL(reg_idx), srrctl); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7); - vtctl = rd32(E1000_VT_CTL); - vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | - E1000_VT_CTL_DISABLE_DEF_POOL); - vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; - wr32(E1000_VT_CTL, vtctl); + /* enable receive descriptor fetching */ + rxdctl = rd32(E1000_RXDCTL(reg_idx)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + wr32(E1000_RXDCTL(reg_idx), rxdctl); } /** @@ -2196,112 +2599,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter) **/ static void igb_configure_rx(struct igb_adapter *adapter) { - u64 rdba; - struct e1000_hw *hw = &adapter->hw; - u32 rctl, rxcsum; - u32 rxdctl; int i; - /* disable receives while setting up the descriptors */ - rctl = rd32(E1000_RCTL); - wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); - wrfl(); - mdelay(10); + /* set UTA to appropriate mode */ + igb_set_uta(adapter); - if (adapter->itr_setting > 3) - wr32(E1000_ITR, adapter->itr); + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, + adapter->vfs_allocated_count); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *ring = &adapter->rx_ring[i]; - int j = ring->reg_idx; - rdba = ring->dma; - wr32(E1000_RDBAL(j), - rdba & 0x00000000ffffffffULL); - wr32(E1000_RDBAH(j), rdba >> 32); - wr32(E1000_RDLEN(j), - ring->count * sizeof(union e1000_adv_rx_desc)); - - ring->head = E1000_RDH(j); - ring->tail = E1000_RDT(j); - writel(0, hw->hw_addr + ring->tail); - writel(0, hw->hw_addr + ring->head); - - rxdctl = rd32(E1000_RXDCTL(j)); - rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; - rxdctl &= 0xFFF00000; - rxdctl |= IGB_RX_PTHRESH; - rxdctl |= IGB_RX_HTHRESH << 8; - rxdctl |= IGB_RX_WTHRESH << 16; - wr32(E1000_RXDCTL(j), rxdctl); - } - - if (adapter->num_rx_queues > 1) { - u32 random[10]; - u32 mrqc; - u32 j, shift; - union e1000_reta { - u32 dword; - u8 bytes[4]; - } reta; - - get_random_bytes(&random[0], 40); - - if (hw->mac.type >= e1000_82576) - shift = 0; - else - shift = 6; - for (j = 0; j < (32 * 4); j++) { - reta.bytes[j & 3] = - adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift; - if ((j & 3) == 3) - writel(reta.dword, - hw->hw_addr + E1000_RETA(0) + (j & ~3)); - } - if (adapter->vfs_allocated_count) - mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; - else - mrqc = E1000_MRQC_ENABLE_RSS_4Q; - - /* Fill out hash function seeds */ - for (j = 0; j < 10; j++) - array_wr32(E1000_RSSRK(0), j, random[j]); - - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | - E1000_MRQC_RSS_FIELD_IPV4_TCP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | - E1000_MRQC_RSS_FIELD_IPV6_TCP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | - E1000_MRQC_RSS_FIELD_IPV6_UDP); - mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | - E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); - - wr32(E1000_MRQC, mrqc); - } else if (adapter->vfs_allocated_count) { - /* Enable multi-queue for sr-iov */ - wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ); - } - - /* Enable Receive Checksum Offload for TCP and UDP */ - rxcsum = rd32(E1000_RXCSUM); - /* Disable raw packet checksumming */ - rxcsum |= E1000_RXCSUM_PCSD; - - if (adapter->hw.mac.type == e1000_82576) - /* Enable Receive Checksum Offload for SCTP */ - rxcsum |= E1000_RXCSUM_CRCOFL; - - /* Don't need to set TUOFL or IPOFL, they default to 1 */ - wr32(E1000_RXCSUM, rxcsum); - - /* Set the default pool for the PF's first queue */ - igb_configure_vt_default_pool(adapter); - - igb_rlpml_set(adapter); - - /* Enable Receives */ - wr32(E1000_RCTL, rctl); + for (i = 0; i < adapter->num_rx_queues; i++) + igb_configure_rx_ring(adapter, &adapter->rx_ring[i]); } /** @@ -2312,14 +2622,17 @@ static void igb_configure_rx(struct igb_adapter *adapter) **/ void igb_free_tx_resources(struct igb_ring *tx_ring) { - struct pci_dev *pdev = tx_ring->adapter->pdev; - igb_clean_tx_ring(tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + pci_free_consistent(tx_ring->pdev, tx_ring->size, + tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } @@ -2338,18 +2651,30 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) igb_free_tx_resources(&adapter->tx_ring[i]); } -static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, - struct igb_buffer *buffer_info) +void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, + struct igb_buffer *buffer_info) { - buffer_info->dma = 0; + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + pci_unmap_page(tx_ring->pdev, + buffer_info->dma, + buffer_info->length, + PCI_DMA_TODEVICE); + else + pci_unmap_single(tx_ring->pdev, + buffer_info->dma, + buffer_info->length, + PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } if (buffer_info->skb) { - skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, - DMA_TO_DEVICE); dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; - /* buffer_info must be completely set up in the transmit path */ + buffer_info->length = 0; + buffer_info->next_to_watch = 0; + buffer_info->mapped_as_page = false; } /** @@ -2358,7 +2683,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, **/ static void igb_clean_tx_ring(struct igb_ring *tx_ring) { - struct igb_adapter *adapter = tx_ring->adapter; struct igb_buffer *buffer_info; unsigned long size; unsigned int i; @@ -2369,21 +2693,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; - igb_unmap_and_free_tx_resource(adapter, buffer_info); + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } size = sizeof(struct igb_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - - writel(0, adapter->hw.hw_addr + tx_ring->head); - writel(0, adapter->hw.hw_addr + tx_ring->tail); } /** @@ -2406,14 +2726,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) **/ void igb_free_rx_resources(struct igb_ring *rx_ring) { - struct pci_dev *pdev = rx_ring->adapter->pdev; - igb_clean_rx_ring(rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + pci_free_consistent(rx_ring->pdev, rx_ring->size, + rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } @@ -2438,26 +2761,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) **/ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { - struct igb_adapter *adapter = rx_ring->adapter; struct igb_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; if (!rx_ring->buffer_info) return; + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { - if (adapter->rx_ps_hdr_size) - pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_ps_hdr_size, - PCI_DMA_FROMDEVICE); - else - pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); + pci_unmap_single(rx_ring->pdev, + buffer_info->dma, + rx_ring->rx_buffer_len, + PCI_DMA_FROMDEVICE); buffer_info->dma = 0; } @@ -2465,14 +2783,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } + if (buffer_info->page_dma) { + pci_unmap_page(rx_ring->pdev, + buffer_info->page_dma, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + buffer_info->page_dma = 0; + } if (buffer_info->page) { - if (buffer_info->page_dma) - pci_unmap_page(pdev, buffer_info->page_dma, - PAGE_SIZE / 2, - PCI_DMA_FROMDEVICE); put_page(buffer_info->page); buffer_info->page = NULL; - buffer_info->page_dma = 0; buffer_info->page_offset = 0; } } @@ -2485,9 +2805,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - - writel(0, adapter->hw.hw_addr + rx_ring->head); - writel(0, adapter->hw.hw_addr + rx_ring->tail); } /** @@ -2521,61 +2838,90 @@ static int igb_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - igb_rar_set(hw, hw->mac.addr, 0); - igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_rar_set_qsel(adapter, hw->mac.addr, 0, + adapter->vfs_allocated_count); return 0; } /** - * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * igb_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure * - * The set_rx_mode entry point is called whenever the unicast or multicast - * address lists or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast, - * promiscuous mode, and all-multi behavior. + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA **/ -static void igb_set_rx_mode(struct net_device *netdev) +static int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - unsigned int rar_entries = hw->mac.rar_entry_count - - (adapter->vfs_allocated_count + 1); struct dev_mc_list *mc_ptr = netdev->mc_list; - u8 *mta_list = NULL; - u32 rctl; + u8 *mta_list; + u32 vmolr = 0; int i; - /* Check for Promiscuous and All Multicast modes */ - rctl = rd32(E1000_RCTL); + if (!netdev->mc_count) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } - if (netdev->flags & IFF_PROMISC) { - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - rctl &= ~E1000_RCTL_VFE; - } else { - if (netdev->flags & IFF_ALLMULTI) - rctl |= E1000_RCTL_MPE; - else - rctl &= ~E1000_RCTL_MPE; + mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; - if (netdev->uc.count > rar_entries) - rctl |= E1000_RCTL_UPE; - else - rctl &= ~E1000_RCTL_UPE; - rctl |= E1000_RCTL_VFE; + /* set vmolr receive overflow multicast bit */ + vmolr |= E1000_VMOLR_ROMPE; + + /* The shared function expects a packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); + mc_ptr = mc_ptr->next; } - wr32(E1000_RCTL, rctl); + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev->mc_count; +} + +/** + * igb_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_uc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev->uc.count > rar_entries) + return -ENOMEM; if (netdev->uc.count && rar_entries) { struct netdev_hw_addr *ha; list_for_each_entry(ha, &netdev->uc.list, list) { if (!rar_entries) break; - igb_rar_set(hw, ha->addr, rar_entries); - igb_set_rah_pool(hw, adapter->vfs_allocated_count, - rar_entries); - rar_entries--; + igb_rar_set_qsel(adapter, ha->addr, + rar_entries--, + vfn); + count++; } } /* write the addresses in reverse order to avoid write combining */ @@ -2585,29 +2931,79 @@ static void igb_set_rx_mode(struct net_device *netdev) } wrfl(); - if (!netdev->mc_count) { - /* nothing to program, so clear mc list */ - igb_update_mc_addr_list(hw, NULL, 0); - igb_restore_vf_multicasts(adapter); - return; + return count; +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl, vmolr = 0; + int count; + + /* Check for Promiscuous and All Multicast modes */ + rctl = rd32(E1000_RCTL); + + /* clear the effected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* + * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + rctl |= E1000_RCTL_VFE; } + wr32(E1000_RCTL, rctl); - mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); - if (!mta_list) { - dev_err(&adapter->pdev->dev, - "failed to allocate multicast filter list\n"); + /* + * In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if (hw->mac.type < e1000_82576) return; - } - /* The shared function expects a packed array of only addresses. */ - for (i = 0; i < netdev->mc_count; i++) { - if (!mc_ptr) - break; - memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); - mc_ptr = mc_ptr->next; - } - igb_update_mc_addr_list(hw, mta_list, i); - kfree(mta_list); + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + wr32(E1000_VMOLR(vfn), vmolr); igb_restore_vf_multicasts(adapter); } @@ -2669,37 +3065,33 @@ static void igb_watchdog(unsigned long data) static void igb_watchdog_task(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, - struct igb_adapter, watchdog_task); + struct igb_adapter, + watchdog_task); struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - struct igb_ring *tx_ring = adapter->tx_ring; u32 link; - u32 eics = 0; int i; link = igb_has_link(adapter); - if ((netif_carrier_ok(netdev)) && link) - goto link_up; - if (link) { if (!netif_carrier_ok(netdev)) { u32 ctrl; - hw->mac.ops.get_speed_and_duplex(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); ctrl = rd32(E1000_CTRL); /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " "Flow Control: %s\n", - netdev->name, - adapter->link_speed, - adapter->link_duplex == FULL_DUPLEX ? + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & - E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & - E1000_CTRL_RFCE) ? "RX" : ((ctrl & - E1000_CTRL_TFCE) ? "TX" : "None"))); + ((ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); /* tweak tx_queue_len according to speed/duplex and * adjust the timeout factor */ @@ -2743,46 +3135,40 @@ static void igb_watchdog_task(struct work_struct *work) } } -link_up: igb_update_stats(adapter); + igb_update_adaptive(hw); - hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; - adapter->tpt_old = adapter->stats.tpt; - hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old; - adapter->colc_old = adapter->stats.colc; - - adapter->gorc = adapter->stats.gorc - adapter->gorc_old; - adapter->gorc_old = adapter->stats.gorc; - adapter->gotc = adapter->stats.gotc - adapter->gotc_old; - adapter->gotc_old = adapter->stats.gotc; - - igb_update_adaptive(&adapter->hw); - - if (!netif_carrier_ok(netdev)) { - if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = &adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } } + + /* Force detection of hung controller every watchdog period */ + tx_ring->detect_tx_hung = true; } /* Cause software interrupt to ensure rx ring is cleaned */ if (adapter->msix_entries) { - for (i = 0; i < adapter->num_rx_queues; i++) - eics |= adapter->rx_ring[i].eims_value; + u32 eics = 0; + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + eics |= q_vector->eims_value; + } wr32(E1000_EICS, eics); } else { wr32(E1000_ICS, E1000_ICS_RXDMT0); } - /* Force detection of hung controller every watchdog period */ - tx_ring->detect_tx_hung = true; - /* Reset the timer */ if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, @@ -2796,7 +3182,6 @@ enum latency_range { latency_invalid = 255 }; - /** * igb_update_ring_itr - update the dynamic ITR value based on packet size * @@ -2811,25 +3196,37 @@ enum latency_range { * parameter (see igb_param.c) * NOTE: This function is called only when operating in a multiqueue * receive environment. - * @rx_ring: pointer to ring + * @q_vector: pointer to q_vector **/ -static void igb_update_ring_itr(struct igb_ring *rx_ring) +static void igb_update_ring_itr(struct igb_q_vector *q_vector) { - int new_val = rx_ring->itr_val; + int new_val = q_vector->itr_val; int avg_wire_size = 0; - struct igb_adapter *adapter = rx_ring->adapter; - - if (!rx_ring->total_packets) - goto clear_counts; /* no packets, so don't do anything */ + struct igb_adapter *adapter = q_vector->adapter; /* For non-gigabit speeds, just fix the interrupt rate at 4000 * ints/sec - ITR timer value of 120 ticks. */ if (adapter->link_speed != SPEED_1000) { - new_val = 120; + new_val = 976; goto set_itr_val; } - avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; + + if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { + struct igb_ring *ring = q_vector->rx_ring; + avg_wire_size = ring->total_bytes / ring->total_packets; + } + + if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { + struct igb_ring *ring = q_vector->tx_ring; + avg_wire_size = max_t(u32, avg_wire_size, + (ring->total_bytes / + ring->total_packets)); + } + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; /* Add 24 bytes to size to account for CRC, preamble, and gap */ avg_wire_size += 24; @@ -2844,13 +3241,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring) new_val = avg_wire_size / 2; set_itr_val: - if (new_val != rx_ring->itr_val) { - rx_ring->itr_val = new_val; - rx_ring->set_itr = 1; + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; } clear_counts: - rx_ring->total_bytes = 0; - rx_ring->total_packets = 0; + if (q_vector->rx_ring) { + q_vector->rx_ring->total_bytes = 0; + q_vector->rx_ring->total_packets = 0; + } + if (q_vector->tx_ring) { + q_vector->tx_ring->total_bytes = 0; + q_vector->tx_ring->total_packets = 0; + } } /** @@ -2867,7 +3270,7 @@ clear_counts: * NOTE: These calculations are only valid when operating in a single- * queue environment. * @adapter: pointer to adapter - * @itr_setting: current adapter->itr + * @itr_setting: current q_vector->itr_val * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval **/ @@ -2919,8 +3322,9 @@ update_itr_done: static void igb_set_itr(struct igb_adapter *adapter) { + struct igb_q_vector *q_vector = adapter->q_vector[0]; u16 current_itr; - u32 new_itr = adapter->itr; + u32 new_itr = q_vector->itr_val; /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ if (adapter->link_speed != SPEED_1000) { @@ -2934,18 +3338,14 @@ static void igb_set_itr(struct igb_adapter *adapter) adapter->rx_ring->total_packets, adapter->rx_ring->total_bytes); - if (adapter->rx_ring->buddy) { - adapter->tx_itr = igb_update_itr(adapter, - adapter->tx_itr, - adapter->tx_ring->total_packets, - adapter->tx_ring->total_bytes); - current_itr = max(adapter->rx_itr, adapter->tx_itr); - } else { - current_itr = adapter->rx_itr; - } + adapter->tx_itr = igb_update_itr(adapter, + adapter->tx_itr, + adapter->tx_ring->total_packets, + adapter->tx_ring->total_bytes); + current_itr = max(adapter->rx_itr, adapter->tx_itr); /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && current_itr == lowest_latency) + if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) current_itr = low_latency; switch (current_itr) { @@ -2966,18 +3366,17 @@ static void igb_set_itr(struct igb_adapter *adapter) set_itr_now: adapter->rx_ring->total_bytes = 0; adapter->rx_ring->total_packets = 0; - if (adapter->rx_ring->buddy) { - adapter->rx_ring->buddy->total_bytes = 0; - adapter->rx_ring->buddy->total_packets = 0; - } + adapter->tx_ring->total_bytes = 0; + adapter->tx_ring->total_packets = 0; - if (new_itr != adapter->itr) { + if (new_itr != q_vector->itr_val) { /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ - new_itr = new_itr > adapter->itr ? - max((new_itr * adapter->itr) / - (new_itr + (adapter->itr >> 2)), new_itr) : + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; /* Don't write the value here; it resets the adapter's * internal timer, and causes us to delay far longer than @@ -2985,25 +3384,22 @@ set_itr_now: * value at the beginning of the next interrupt so the timing * ends up being correct. */ - adapter->itr = new_itr; - adapter->rx_ring->itr_val = new_itr; - adapter->rx_ring->set_itr = 1; + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; } return; } - #define IGB_TX_FLAGS_CSUM 0x00000001 #define IGB_TX_FLAGS_VLAN 0x00000002 #define IGB_TX_FLAGS_TSO 0x00000004 #define IGB_TX_FLAGS_IPV4 0x00000008 -#define IGB_TX_FLAGS_TSTAMP 0x00000010 -#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGB_TX_FLAGS_VLAN_SHIFT 16 +#define IGB_TX_FLAGS_TSTAMP 0x00000010 +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 -static inline int igb_tso_adv(struct igb_adapter *adapter, - struct igb_ring *tx_ring, +static inline int igb_tso_adv(struct igb_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { struct e1000_adv_tx_context_desc *context_desc; @@ -3065,8 +3461,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); /* For 82575, context index must be unique per ring. */ - if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) - mss_l4len_idx |= tx_ring->queue_index << 4; + if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) + mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->seqnum_seed = 0; @@ -3083,14 +3479,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, return true; } -static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, - struct igb_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) +static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) { struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; + struct pci_dev *pdev = tx_ring->pdev; struct igb_buffer *buffer_info; u32 info = 0, tu_cmd = 0; + unsigned int i; if ((skb->ip_summed == CHECKSUM_PARTIAL) || (tx_flags & IGB_TX_FLAGS_VLAN)) { @@ -3100,6 +3496,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, if (tx_flags & IGB_TX_FLAGS_VLAN) info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= skb_network_header_len(skb); @@ -3137,7 +3534,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, break; default: if (unlikely(net_ratelimit())) - dev_warn(&adapter->pdev->dev, + dev_warn(&pdev->dev, "partial checksum but proto=%x!\n", skb->protocol); break; @@ -3146,11 +3543,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); context_desc->seqnum_seed = 0; - if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) + if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) context_desc->mss_l4len_idx = - cpu_to_le32(tx_ring->queue_index << 4); - else - context_desc->mss_l4len_idx = 0; + cpu_to_le32(tx_ring->reg_idx << 4); buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; @@ -3169,32 +3564,27 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, #define IGB_MAX_TXD_PWR 16 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) -static inline int igb_tx_map_adv(struct igb_adapter *adapter, - struct igb_ring *tx_ring, struct sk_buff *skb, +static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, unsigned int first) { struct igb_buffer *buffer_info; + struct pci_dev *pdev = tx_ring->pdev; unsigned int len = skb_headlen(skb); unsigned int count = 0, i; unsigned int f; - dma_addr_t *map; i = tx_ring->next_to_use; - if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { - dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); - return 0; - } - - map = skb_shinfo(skb)->dma_maps; - buffer_info = &tx_ring->buffer_info[i]; BUG_ON(len >= IGB_MAX_DATA_PER_TXD); buffer_info->length = len; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - buffer_info->dma = skb_shinfo(skb)->dma_head; + buffer_info->dma = pci_map_single(pdev, skb->data, len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, buffer_info->dma)) + goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { struct skb_frag_struct *frag; @@ -3211,25 +3601,55 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, buffer_info->length = len; buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - buffer_info->dma = map[count]; + buffer_info->mapped_as_page = true; + buffer_info->dma = pci_map_page(pdev, + frag->page, + frag->page_offset, + len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, buffer_info->dma)) + goto dma_error; + count++; } tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[first].next_to_watch = i; - return count + 1; + return ++count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed buffer_info mapping */ + buffer_info->dma = 0; + buffer_info->time_stamp = 0; + buffer_info->length = 0; + buffer_info->next_to_watch = 0; + buffer_info->mapped_as_page = false; + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count >= 0) { + count--; + i--; + if (i < 0) + i += tx_ring->count; + buffer_info = &tx_ring->buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + } + + return 0; } -static inline void igb_tx_queue_adv(struct igb_adapter *adapter, - struct igb_ring *tx_ring, +static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, int tx_flags, int count, u32 paylen, u8 hdr_len) { - union e1000_adv_tx_desc *tx_desc = NULL; + union e1000_adv_tx_desc *tx_desc; struct igb_buffer *buffer_info; u32 olinfo_status = 0, cmd_type_len; - unsigned int i; + unsigned int i = tx_ring->next_to_use; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); @@ -3254,27 +3674,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, olinfo_status |= E1000_TXD_POPTS_TXSM << 8; } - if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && - (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | + if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && + (tx_flags & (IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_VLAN))) - olinfo_status |= tx_ring->queue_index << 4; + olinfo_status |= tx_ring->reg_idx << 4; olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); - i = tx_ring->next_to_use; - while (count--) { + do { buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + count--; i++; if (i == tx_ring->count) i = 0; - } + } while (count > 0); - tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -3282,16 +3703,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, wmb(); tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); + writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail * at a time, it syncronizes IO on IA64/Altix systems */ mmiowb(); } -static int __igb_maybe_stop_tx(struct net_device *netdev, - struct igb_ring *tx_ring, int size) +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) { - struct igb_adapter *adapter = netdev_priv(netdev); + struct net_device *netdev = tx_ring->netdev; netif_stop_subqueue(netdev, tx_ring->queue_index); @@ -3307,66 +3727,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, /* A reprieve! */ netif_wake_subqueue(netdev, tx_ring->queue_index); - ++adapter->restart_queue; + tx_ring->tx_stats.restart_queue++; return 0; } -static int igb_maybe_stop_tx(struct net_device *netdev, - struct igb_ring *tx_ring, int size) +static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) { if (igb_desc_unused(tx_ring) >= size) return 0; - return __igb_maybe_stop_tx(netdev, tx_ring, size); + return __igb_maybe_stop_tx(tx_ring, size); } -static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, - struct net_device *netdev, - struct igb_ring *tx_ring) +netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, + struct igb_ring *tx_ring) { - struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; - int count = 0; - int tso = 0; - union skb_shared_tx *shtx; - - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + int tso = 0, count; + union skb_shared_tx *shtx = skb_tx(skb); /* need: 1 descriptor per page, * + 2 desc gap to keep tail from touching head, * + 1 desc for skb->data, * + 1 desc for context descriptor, * otherwise try next time */ - if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { + if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { /* this is a hard error */ return NETDEV_TX_BUSY; } - /* - * TODO: check that there currently is no other packet with - * time stamping in the queue - * - * When doing time stamping, keep the connection to the socket - * a while longer: it is still needed by skb_hwtstamp_tx(), - * called either in igb_tx_hwtstamp() or by our caller when - * doing software time stamping. - */ - shtx = skb_tx(skb); if (unlikely(shtx->hardware)) { shtx->in_progress = 1; tx_flags |= IGB_TX_FLAGS_TSTAMP; } - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + if (vlan_tx_tag_present(skb) && adapter->vlgrp) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } @@ -3375,37 +3772,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, tx_flags |= IGB_TX_FLAGS_IPV4; first = tx_ring->next_to_use; - tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, - &hdr_len) : 0; + if (skb_is_gso(skb)) { + tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } } if (tso) tx_flags |= IGB_TX_FLAGS_TSO; - else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && + else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGB_TX_FLAGS_CSUM; /* - * count reflects descriptors mapped, if 0 then mapping error + * count reflects descriptors mapped, if 0 or less then mapping error * has occured and we need to rewind the descriptor queue */ - count = igb_tx_map_adv(adapter, tx_ring, skb, first); - - if (count) { - igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, - skb->len, hdr_len); - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); - } else { + count = igb_tx_map_adv(tx_ring, skb, first); + if (!count) { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; tx_ring->next_to_use = first; + return NETDEV_TX_OK; } + igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); + return NETDEV_TX_OK; } @@ -3414,8 +3812,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *tx_ring; - int r_idx = 0; + + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); tx_ring = adapter->multi_tx_table[r_idx]; @@ -3423,7 +3831,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, * to a flow. Right now, performance is impacted slightly negatively * if using multiple tx queues. If the stack breaks away from a * single qdisc implementation, we can look at this again. */ - return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); + return igb_xmit_frame_ring_adv(skb, tx_ring); } /** @@ -3437,6 +3845,10 @@ static void igb_tx_timeout(struct net_device *netdev) /* Do the reset outside of interrupt context */ adapter->tx_timeout_count++; + + if (hw->mac.type == e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + schedule_work(&adapter->reset_task); wr32(E1000_EICS, (adapter->eims_enable_mask & ~adapter->eims_other)); @@ -3459,10 +3871,8 @@ static void igb_reset_task(struct work_struct *work) **/ static struct net_device_stats *igb_get_stats(struct net_device *netdev) { - struct igb_adapter *adapter = netdev_priv(netdev); - /* only return the current stats */ - return &adapter->net_stats; + return &netdev->stats; } /** @@ -3475,16 +3885,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev) static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + u32 rx_buffer_len, i; - if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_err(&pdev->dev, "Invalid MTU setting\n"); return -EINVAL; } if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); + dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); return -EINVAL; } @@ -3493,8 +3904,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; - if (netif_running(netdev)) - igb_down(adapter); /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next @@ -3502,35 +3911,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) * i.e. RXBUFFER_2048 --> size-4096 slab */ - if (max_frame <= IGB_RXBUFFER_256) - adapter->rx_buffer_len = IGB_RXBUFFER_256; - else if (max_frame <= IGB_RXBUFFER_512) - adapter->rx_buffer_len = IGB_RXBUFFER_512; - else if (max_frame <= IGB_RXBUFFER_1024) - adapter->rx_buffer_len = IGB_RXBUFFER_1024; - else if (max_frame <= IGB_RXBUFFER_2048) - adapter->rx_buffer_len = IGB_RXBUFFER_2048; + if (max_frame <= IGB_RXBUFFER_1024) + rx_buffer_len = IGB_RXBUFFER_1024; + else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) + rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; else -#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 - adapter->rx_buffer_len = IGB_RXBUFFER_16384; -#else - adapter->rx_buffer_len = PAGE_SIZE / 2; -#endif + rx_buffer_len = IGB_RXBUFFER_128; - /* if sr-iov is enabled we need to force buffer size to 1K or larger */ - if (adapter->vfs_allocated_count && - (adapter->rx_buffer_len < IGB_RXBUFFER_1024)) - adapter->rx_buffer_len = IGB_RXBUFFER_1024; - - /* adjust allocation if LPE protects us, and we aren't using SBP */ - if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) - adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + if (netif_running(netdev)) + igb_down(adapter); - dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", + dev_info(&pdev->dev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].rx_buffer_len = rx_buffer_len; + if (netif_running(netdev)) igb_up(adapter); else @@ -3548,9 +3945,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) void igb_update_stats(struct igb_adapter *adapter) { + struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; + u32 rnbc; u16 phy_tmp; + int i; + u64 bytes, packets; #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF @@ -3563,6 +3964,29 @@ void igb_update_stats(struct igb_adapter *adapter) if (pci_channel_offline(pdev)) return; + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; + net_stats->rx_fifo_errors += rqdpc_tmp; + bytes += adapter->rx_ring[i].rx_stats.bytes; + packets += adapter->rx_ring[i].rx_stats.packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + bytes += adapter->tx_ring[i].tx_stats.bytes; + packets += adapter->tx_ring[i].tx_stats.packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + + /* read stats registers */ adapter->stats.crcerrs += rd32(E1000_CRCERRS); adapter->stats.gprc += rd32(E1000_GPRC); adapter->stats.gorc += rd32(E1000_GORCL); @@ -3595,7 +4019,9 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.gptc += rd32(E1000_GPTC); adapter->stats.gotc += rd32(E1000_GOTCL); rd32(E1000_GOTCH); /* clear GOTCL */ - adapter->stats.rnbc += rd32(E1000_RNBC); + rnbc = rd32(E1000_RNBC); + adapter->stats.rnbc += rnbc; + net_stats->rx_fifo_errors += rnbc; adapter->stats.ruc += rd32(E1000_RUC); adapter->stats.rfc += rd32(E1000_RFC); adapter->stats.rjc += rd32(E1000_RJC); @@ -3614,7 +4040,6 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.bptc += rd32(E1000_BPTC); /* used for adaptive IFS */ - hw->mac.tx_packet_delta = rd32(E1000_TPT); adapter->stats.tpt += hw->mac.tx_packet_delta; hw->mac.collision_delta = rd32(E1000_COLC); @@ -3637,56 +4062,29 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); /* Fill out the OS statistics structure */ - adapter->net_stats.multicast = adapter->stats.mprc; - adapter->net_stats.collisions = adapter->stats.colc; + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; /* Rx Errors */ - if (hw->mac.type != e1000_82575) { - u32 rqdpc_tmp; - u64 rqdpc_total = 0; - int i; - /* Read out drops stats per RX queue. Notice RQDPC (Receive - * Queue Drop Packet Count) stats only gets incremented, if - * the DROP_EN but it set (in the SRRCTL register for that - * queue). If DROP_EN bit is NOT set, then the some what - * equivalent count is stored in RNBC (not per queue basis). - * Also note the drop count is due to lack of available - * descriptors. - */ - for (i = 0; i < adapter->num_rx_queues; i++) { - rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF; - adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; - rqdpc_total += adapter->rx_ring[i].rx_stats.drops; - } - adapter->net_stats.rx_fifo_errors = rqdpc_total; - } - - /* Note RNBC (Receive No Buffers Count) is an not an exact - * drop count as the hardware FIFO might save the day. Thats - * one of the reason for saving it in rx_fifo_errors, as its - * potentially not a true drop. - */ - adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc; - /* RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ - adapter->net_stats.rx_errors = adapter->stats.rxerrc + + net_stats->rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; - adapter->net_stats.rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; - adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; - adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; - adapter->net_stats.rx_missed_errors = adapter->stats.mpc; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - adapter->net_stats.tx_errors = adapter->stats.ecol + - adapter->stats.latecol; - adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; - adapter->net_stats.tx_window_errors = adapter->stats.latecol; - adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ @@ -3707,14 +4105,12 @@ void igb_update_stats(struct igb_adapter *adapter) static irqreturn_t igb_msix_other(int irq, void *data) { - struct net_device *netdev = data; - struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_adapter *adapter = data; struct e1000_hw *hw = &adapter->hw; u32 icr = rd32(E1000_ICR); - /* reading ICR causes bit 31 of EICR to be cleared */ - if(icr & E1000_ICR_DOUTSYNC) { + if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } @@ -3730,125 +4126,90 @@ static irqreturn_t igb_msix_other(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); + if (adapter->vfs_allocated_count) + wr32(E1000_IMS, E1000_IMS_LSC | + E1000_IMS_VMMB | + E1000_IMS_DOUTSYNC); + else + wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); wr32(E1000_EIMS, adapter->eims_other); return IRQ_HANDLED; } -static irqreturn_t igb_msix_tx(int irq, void *data) +static void igb_write_itr(struct igb_q_vector *q_vector) { - struct igb_ring *tx_ring = data; - struct igb_adapter *adapter = tx_ring->adapter; - struct e1000_hw *hw = &adapter->hw; + u32 itr_val = q_vector->itr_val & 0x7FFC; -#ifdef CONFIG_IGB_DCA - if (adapter->flags & IGB_FLAG_DCA_ENABLED) - igb_update_tx_dca(tx_ring); -#endif + if (!q_vector->set_itr) + return; - tx_ring->total_bytes = 0; - tx_ring->total_packets = 0; + if (!itr_val) + itr_val = 0x4; - /* auto mask will automatically reenable the interrupt when we write - * EICS */ - if (!igb_clean_tx_irq(tx_ring)) - /* Ring was not completely cleaned, so fire another interrupt */ - wr32(E1000_EICS, tx_ring->eims_value); + if (q_vector->itr_shift) + itr_val |= itr_val << q_vector->itr_shift; else - wr32(E1000_EIMS, tx_ring->eims_value); + itr_val |= 0x8000000; - return IRQ_HANDLED; -} - -static void igb_write_itr(struct igb_ring *ring) -{ - struct e1000_hw *hw = &ring->adapter->hw; - if ((ring->adapter->itr_setting & 3) && ring->set_itr) { - switch (hw->mac.type) { - case e1000_82576: - wr32(ring->itr_register, ring->itr_val | - 0x80000000); - break; - default: - wr32(ring->itr_register, ring->itr_val | - (ring->itr_val << 16)); - break; - } - ring->set_itr = 0; - } + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; } -static irqreturn_t igb_msix_rx(int irq, void *data) +static irqreturn_t igb_msix_ring(int irq, void *data) { - struct igb_ring *rx_ring = data; - - /* Write the ITR value calculated at the end of the - * previous interrupt. - */ + struct igb_q_vector *q_vector = data; - igb_write_itr(rx_ring); + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); - if (napi_schedule_prep(&rx_ring->napi)) - __napi_schedule(&rx_ring->napi); + napi_schedule(&q_vector->napi); -#ifdef CONFIG_IGB_DCA - if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) - igb_update_rx_dca(rx_ring); -#endif - return IRQ_HANDLED; + return IRQ_HANDLED; } #ifdef CONFIG_IGB_DCA -static void igb_update_rx_dca(struct igb_ring *rx_ring) +static void igb_update_dca(struct igb_q_vector *q_vector) { - u32 dca_rxctrl; - struct igb_adapter *adapter = rx_ring->adapter; + struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; int cpu = get_cpu(); - int q = rx_ring->reg_idx; - if (rx_ring->cpu != cpu) { - dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); - if (hw->mac.type == e1000_82576) { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_RXCTRL_CPUID_SHIFT; + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx_ring) { + int q = q_vector->tx_ring->reg_idx; + u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); + if (hw->mac.type == e1000_82575) { + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; + dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); } else { + dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; + dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << + E1000_DCA_TXCTRL_CPUID_SHIFT; + } + dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; + wr32(E1000_DCA_TXCTRL(q), dca_txctrl); + } + if (q_vector->rx_ring) { + int q = q_vector->rx_ring->reg_idx; + u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); + if (hw->mac.type == e1000_82575) { dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + } else { + dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; + dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << + E1000_DCA_RXCTRL_CPUID_SHIFT; } dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); - rx_ring->cpu = cpu; - } - put_cpu(); -} - -static void igb_update_tx_dca(struct igb_ring *tx_ring) -{ - u32 dca_txctrl; - struct igb_adapter *adapter = tx_ring->adapter; - struct e1000_hw *hw = &adapter->hw; - int cpu = get_cpu(); - int q = tx_ring->reg_idx; - - if (tx_ring->cpu != cpu) { - dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); - if (hw->mac.type == e1000_82576) { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_TXCTRL_CPUID_SHIFT; - } else { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } - dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; - wr32(E1000_DCA_TXCTRL(q), dca_txctrl); - tx_ring->cpu = cpu; } + q_vector->cpu = cpu; +out_no_update: put_cpu(); } @@ -3863,13 +4224,10 @@ static void igb_setup_dca(struct igb_adapter *adapter) /* Always use CB2 mode, difference is masked in the CB driver. */ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); - for (i = 0; i < adapter->num_tx_queues; i++) { - adapter->tx_ring[i].cpu = -1; - igb_update_tx_dca(&adapter->tx_ring[i]); - } - for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->rx_ring[i].cpu = -1; - igb_update_rx_dca(&adapter->rx_ring[i]); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + q_vector->cpu = -1; + igb_update_dca(q_vector); } } @@ -3877,6 +4235,7 @@ static int __igb_notify_dca(struct device *dev, void *data) { struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; unsigned long event = *(unsigned long *)data; @@ -3885,12 +4244,9 @@ static int __igb_notify_dca(struct device *dev, void *data) /* if already enabled, don't do it again */ if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; - /* Always use CB2 mode, difference is masked - * in the CB driver. */ - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); if (dca_add_requester(dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; - dev_info(&adapter->pdev->dev, "DCA enabled\n"); + dev_info(&pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); break; } @@ -3898,9 +4254,9 @@ static int __igb_notify_dca(struct device *dev, void *data) case DCA_PROVIDER_REMOVE: if (adapter->flags & IGB_FLAG_DCA_ENABLED) { /* without this a class_device is left - * hanging around in the sysfs model */ + * hanging around in the sysfs model */ dca_remove_requester(dev); - dev_info(&adapter->pdev->dev, "DCA disabled\n"); + dev_info(&pdev->dev, "DCA disabled\n"); adapter->flags &= ~IGB_FLAG_DCA_ENABLED; wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } @@ -3930,12 +4286,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter) for (i = 0 ; i < adapter->vfs_allocated_count; i++) { ping = E1000_PF_CONTROL_MSG; - if (adapter->vf_data[i].clear_to_send) + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) ping |= E1000_VT_MSGTYPE_CTS; igb_write_mbx(hw, &ping, 1, i); } } +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* + * if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; + +} + static int igb_set_vf_multicasts(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { @@ -3944,18 +4339,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter, struct vf_data_storage *vf_data = &adapter->vf_data[vf]; int i; - /* only up to 30 hash values supported */ - if (n > 30) - n = 30; - - /* salt away the number of multi cast addresses assigned + /* salt away the number of multicast addresses assigned * to this VF for later use to restore when the PF multi cast * list changes */ vf_data->num_vf_mc_hashes = n; - /* VFs are limited to using the MTA hash table for their multicast - * addresses */ + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ for (i = 0; i < n; i++) vf_data->vf_mc_hashes[i] = hash_list[i]; @@ -3972,9 +4366,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) int i, j; for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + vf_data = &adapter->vf_data[i]; - for (j = 0; j < vf_data->num_vf_mc_hashes; j++) - igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); } } @@ -4012,7 +4417,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) struct e1000_hw *hw = &adapter->hw; u32 reg, i; - /* It is an error to call this function when VFs are not enabled */ + /* The vlvf table only exists on 82576 hardware and newer */ + if (hw->mac.type < e1000_82576) + return -1; + + /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return -1; @@ -4042,16 +4451,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) /* if !enabled we need to set this up in vfta */ if (!(reg & E1000_VLVF_VLANID_ENABLE)) { - /* add VID to filter table, if bit already set - * PF must have added it outside of table */ - if (igb_vfta_set(hw, vid, true)) - reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count); + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); reg |= E1000_VLVF_VLANID_ENABLE; } reg &= ~E1000_VLVF_VLANID_MASK; reg |= vid; - wr32(E1000_VLVF(i), reg); /* do not modify RLPML for PF devices */ @@ -4067,8 +4472,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) reg |= size; wr32(E1000_VMOLR(vf), reg); } - adapter->vf_data[vf].vlans_enabled++; + adapter->vf_data[vf].vlans_enabled++; return 0; } } else { @@ -4110,15 +4515,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) return igb_vlvf_set(adapter, vid, add, vf); } -static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) { - struct e1000_hw *hw = &adapter->hw; - - /* disable mailbox functionality for vf */ - adapter->vf_data[vf].clear_to_send = false; + /* clear all flags */ + adapter->vf_data[vf].flags = 0; + adapter->vf_data[vf].last_nack = jiffies; /* reset offloads to defaults */ - igb_set_vmolr(hw, vf); + igb_set_vmolr(adapter, vf); /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); @@ -4130,7 +4534,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) igb_set_rx_mode(adapter->netdev); } -static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* generate a new mac address as we were hotplug removed/added */ + random_ether_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; @@ -4139,11 +4554,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ - igb_vf_reset_event(adapter, vf); + igb_vf_reset(adapter, vf); /* set vf mac address */ - igb_rar_set(hw, vf_mac, rar_entry); - igb_set_rah_pool(hw, vf, rar_entry); + igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); /* enable transmit and receive for vf */ reg = rd32(E1000_VFTE); @@ -4151,8 +4565,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) reg = rd32(E1000_VFRE); wr32(E1000_VFRE, reg | (1 << vf)); - /* enable mailbox functionality for vf */ - adapter->vf_data[vf].clear_to_send = true; + adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS; /* reply to reset with ack and vf mac address */ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; @@ -4162,66 +4575,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) { - unsigned char *addr = (char *)&msg[1]; - int err = -1; + unsigned char *addr = (char *)&msg[1]; + int err = -1; - if (is_valid_ether_addr(addr)) - err = igb_set_vf_mac(adapter, vf, addr); - - return err; + if (is_valid_ether_addr(addr)) + err = igb_set_vf_mac(adapter, vf, addr); + return err; } static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; u32 msg = E1000_VT_MSGTYPE_NACK; /* if device isn't clear to send it shouldn't be reading either */ - if (!adapter->vf_data[vf].clear_to_send) + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { igb_write_mbx(hw, &msg, 1, vf); -} - - -static void igb_msg_task(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u32 vf; - - for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { - /* process any reset requests */ - if (!igb_check_for_rst(hw, vf)) { - adapter->vf_data[vf].clear_to_send = false; - igb_vf_reset_event(adapter, vf); - } - - /* process any messages pending */ - if (!igb_check_for_msg(hw, vf)) - igb_rcv_msg_from_vf(adapter, vf); - - /* process any acks */ - if (!igb_check_for_ack(hw, vf)) - igb_rcv_ack_from_vf(adapter, vf); - + vf_data->last_nack = jiffies; } } -static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) { - u32 mbx_size = E1000_VFMAILBOX_SIZE; - u32 msgbuf[mbx_size]; + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; - retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); if (retval) - dev_err(&adapter->pdev->dev, - "Error receiving message from VF\n"); + dev_err(&pdev->dev, "Error receiving message from VF\n"); /* this is a message we already processed, do nothing */ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) - return retval; + return; /* * until the vf completes a reset it should not be @@ -4230,20 +4622,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) if (msgbuf[0] == E1000_VF_RESET) { igb_vf_reset_msg(adapter, vf); - - return retval; + return; } - if (!adapter->vf_data[vf].clear_to_send) { - msgbuf[0] |= E1000_VT_MSGTYPE_NACK; - igb_write_mbx(hw, msgbuf, 1, vf); - return retval; + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + msgbuf[0] = E1000_VT_MSGTYPE_NACK; + if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, msgbuf, 1, vf); + vf_data->last_nack = jiffies; + } + return; } switch ((msgbuf[0] & 0xFFFF)) { case E1000_VF_SET_MAC_ADDR: retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; case E1000_VF_SET_MULTICAST: retval = igb_set_vf_multicasts(adapter, msgbuf, vf); break; @@ -4254,7 +4651,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) retval = igb_set_vf_vlan(adapter, msgbuf, vf); break; default: - dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); retval = -1; break; } @@ -4268,8 +4665,53 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) msgbuf[0] |= E1000_VT_MSGTYPE_CTS; igb_write_mbx(hw, msgbuf, 1, vf); +} - return retval; +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous + * enable bit to allow vlan tag stripping when promiscous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* The UTA table only exists on 82576 hardware and newer */ + if (hw->mac.type < e1000_82576) + return; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = 0; i < hw->mac.uta_reg_count; i++) + array_wr32(E1000_UTA, i, ~0); } /** @@ -4279,15 +4721,15 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) **/ static irqreturn_t igb_intr_msi(int irq, void *data) { - struct net_device *netdev = data; - struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* read ICR disables interrupts using IAM */ u32 icr = rd32(E1000_ICR); - igb_write_itr(adapter->rx_ring); + igb_write_itr(q_vector); - if(icr & E1000_ICR_DOUTSYNC) { + if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } @@ -4298,7 +4740,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - napi_schedule(&adapter->rx_ring[0].napi); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; } @@ -4310,8 +4752,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data) **/ static irqreturn_t igb_intr(int irq, void *data) { - struct net_device *netdev = data; - struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; struct e1000_hw *hw = &adapter->hw; /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No * need for the IMC write */ @@ -4319,14 +4761,14 @@ static irqreturn_t igb_intr(int irq, void *data) if (!icr) return IRQ_NONE; /* Not our interrupt */ - igb_write_itr(adapter->rx_ring); + igb_write_itr(q_vector); /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE; - if(icr & E1000_ICR_DOUTSYNC) { + if (icr & E1000_ICR_DOUTSYNC) { /* HW is reporting DMA is out of sync */ adapter->stats.doosync++; } @@ -4338,26 +4780,27 @@ static irqreturn_t igb_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - napi_schedule(&adapter->rx_ring[0].napi); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; } -static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) +static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) { - struct igb_adapter *adapter = rx_ring->adapter; + struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; - if (adapter->itr_setting & 3) { - if (adapter->num_rx_queues == 1) + if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { + if (!adapter->msix_entries) igb_set_itr(adapter); else - igb_update_ring_itr(rx_ring); + igb_update_ring_itr(q_vector); } if (!test_bit(__IGB_DOWN, &adapter->state)) { if (adapter->msix_entries) - wr32(E1000_EIMS, rx_ring->eims_value); + wr32(E1000_EIMS, q_vector->eims_value); else igb_irq_enable(adapter); } @@ -4370,76 +4813,101 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) **/ static int igb_poll(struct napi_struct *napi, int budget) { - struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); - int work_done = 0; + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + int tx_clean_complete = 1, work_done = 0; #ifdef CONFIG_IGB_DCA - if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) - igb_update_rx_dca(rx_ring); + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); #endif - igb_clean_rx_irq_adv(rx_ring, &work_done, budget); + if (q_vector->tx_ring) + tx_clean_complete = igb_clean_tx_irq(q_vector); - if (rx_ring->buddy) { -#ifdef CONFIG_IGB_DCA - if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) - igb_update_tx_dca(rx_ring->buddy); -#endif - if (!igb_clean_tx_irq(rx_ring->buddy)) - work_done = budget; - } + if (q_vector->rx_ring) + igb_clean_rx_irq_adv(q_vector, &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; /* If not enough Rx work done, exit the polling mode */ if (work_done < budget) { napi_complete(napi); - igb_rx_irq_enable(rx_ring); + igb_ring_irq_enable(q_vector); } return work_done; } /** - * igb_hwtstamp - utility function which checks for TX time stamp + * igb_systim_to_hwtstamp - convert system time value to hw timestamp * @adapter: board private structure + * @shhwtstamps: timestamp structure to update + * @regval: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions + */ +static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *shhwtstamps, + u64 regval) +{ + u64 ns; + + /* + * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to + * 24 to match clock shift we setup earlier. + */ + if (adapter->hw.mac.type == e1000_82580) + regval <<= IGB_82580_TSYNC_SHIFT; + + ns = timecounter_cyc2time(&adapter->clock, regval); + timecompare_update(&adapter->compare, ns); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); +} + +/** + * igb_tx_hwtstamp - utility function which checks for TX time stamp + * @q_vector: pointer to q_vector containing needed info * @skb: packet that was just sent * * If we were asked to do hardware stamping and such a time stamp is * available, then it must have been for this skb here because we only * allow only one such packet into the queue. */ -static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) +static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) { + struct igb_adapter *adapter = q_vector->adapter; union skb_shared_tx *shtx = skb_tx(skb); struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; - if (unlikely(shtx->hardware)) { - u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; - if (valid) { - u64 regval = rd32(E1000_TXSTMPL); - u64 ns; - struct skb_shared_hwtstamps shhwtstamps; - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - regval |= (u64)rd32(E1000_TXSTMPH) << 32; - ns = timecounter_cyc2time(&adapter->clock, - regval); - timecompare_update(&adapter->compare, ns); - shhwtstamps.hwtstamp = ns_to_ktime(ns); - shhwtstamps.syststamp = - timecompare_transform(&adapter->compare, ns); - skb_tstamp_tx(skb, &shhwtstamps); - } - } + /* if skb does not support hw timestamp or TX stamp not valid exit */ + if (likely(!shtx->hardware) || + !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) + return; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(skb, &shhwtstamps); } /** * igb_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure + * @q_vector: pointer to q_vector containing needed info * returns true if ring is completely cleaned **/ -static bool igb_clean_tx_irq(struct igb_ring *tx_ring) +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) { - struct igb_adapter *adapter = tx_ring->adapter; - struct net_device *netdev = adapter->netdev; + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx_ring; + struct net_device *netdev = tx_ring->netdev; struct e1000_hw *hw = &adapter->hw; struct igb_buffer *buffer_info; struct sk_buff *skb; @@ -4470,10 +4938,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) total_packets += segs; total_bytes += bytecount; - igb_tx_hwtstamp(adapter, skb); + igb_tx_hwtstamp(q_vector, skb); } - igb_unmap_and_free_tx_resource(adapter, buffer_info); + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); tx_desc->wb.status = 0; i++; @@ -4496,7 +4964,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !(test_bit(__IGB_DOWN, &adapter->state))) { netif_wake_subqueue(netdev, tx_ring->queue_index); - ++adapter->restart_queue; + tx_ring->tx_stats.restart_queue++; } } @@ -4506,12 +4974,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) tx_ring->detect_tx_hung = false; if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + - (adapter->tx_timeout_factor * HZ)) - && !(rd32(E1000_STATUS) & - E1000_STATUS_TXOFF)) { + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ - dev_err(&adapter->pdev->dev, + dev_err(&tx_ring->pdev->dev, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH <%x>\n" @@ -4524,11 +4991,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) " jiffies <%lx>\n" " desc.status <%x>\n", tx_ring->queue_index, - readl(adapter->hw.hw_addr + tx_ring->head), - readl(adapter->hw.hw_addr + tx_ring->tail), + readl(tx_ring->head), + readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, - tx_ring->buffer_info[i].time_stamp, + tx_ring->buffer_info[eop].time_stamp, eop, jiffies, eop_desc->wb.status); @@ -4539,43 +5006,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) tx_ring->total_packets += total_packets; tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.packets += total_packets; - adapter->net_stats.tx_bytes += total_bytes; - adapter->net_stats.tx_packets += total_packets; return (count < tx_ring->count); } /** * igb_receive_skb - helper function to handle rx indications - * @ring: pointer to receive ring receving this packet - * @status: descriptor status field as written by hardware - * @rx_desc: receive descriptor containing vlan and type information. - * @skb: pointer to sk_buff to be indicated to stack + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + * @vlan_tag: vlan tag for packet **/ -static void igb_receive_skb(struct igb_ring *ring, u8 status, - union e1000_adv_rx_desc * rx_desc, - struct sk_buff *skb) -{ - struct igb_adapter * adapter = ring->adapter; - bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); - - skb_record_rx_queue(skb, ring->queue_index); - if (vlan_extracted) - vlan_gro_receive(&ring->napi, adapter->vlgrp, - le16_to_cpu(rx_desc->wb.upper.vlan), - skb); +static void igb_receive_skb(struct igb_q_vector *q_vector, + struct sk_buff *skb, + u16 vlan_tag) +{ + struct igb_adapter *adapter = q_vector->adapter; + + if (vlan_tag) + vlan_gro_receive(&q_vector->napi, adapter->vlgrp, + vlan_tag, skb); else - napi_gro_receive(&ring->napi, skb); + napi_gro_receive(&q_vector->napi, skb); } -static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, +static inline void igb_rx_checksum_adv(struct igb_ring *ring, u32 status_err, struct sk_buff *skb) { skb->ip_summed = CHECKSUM_NONE; /* Ignore Checksum bit is set or checksum is disabled through ethtool */ - if ((status_err & E1000_RXD_STAT_IXSM) || - (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) + if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || + (status_err & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ if (status_err & (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { @@ -4584,9 +5046,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) * packets, (aka let the stack check the crc32c) */ - if (!((adapter->hw.mac.type == e1000_82576) && - (skb->len == 60))) - adapter->hw_csum_err++; + if ((skb->len == 60) && + (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) + ring->rx_stats.csum_err++; + /* let the stack verify checksum errors */ return; } @@ -4594,11 +5057,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; - dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); - adapter->hw_csum_good++; + dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); } -static inline u16 igb_get_hlen(struct igb_adapter *adapter, +static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, + struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u64 regval; + + /* + * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a skb_shared_tx that we + * can turn into a skb_shared_hwtstamps. + */ + if (likely(!(staterr & E1000_RXDADV_STAT_TS))) + return; + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + + igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} +static inline u16 igb_get_hlen(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc) { /* HW will not DMA in data larger than the given buffer, even if it @@ -4607,27 +5097,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter, */ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > adapter->rx_ps_hdr_size) - hlen = adapter->rx_ps_hdr_size; + if (hlen > rx_ring->rx_buffer_len) + hlen = rx_ring->rx_buffer_len; return hlen; } -static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, - int *work_done, int budget) +static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, + int *work_done, int budget) { - struct igb_adapter *adapter = rx_ring->adapter; - struct net_device *netdev = adapter->netdev; - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; + struct igb_ring *rx_ring = q_vector->rx_ring; + struct net_device *netdev = rx_ring->netdev; + struct pci_dev *pdev = rx_ring->pdev; union e1000_adv_rx_desc *rx_desc , *next_rxd; struct igb_buffer *buffer_info , *next_buffer; struct sk_buff *skb; bool cleaned = false; int cleaned_count = 0; + int current_node = numa_node_id(); unsigned int total_bytes = 0, total_packets = 0; unsigned int i; u32 staterr; u16 length; + u16 vlan_tag; i = rx_ring->next_to_clean; buffer_info = &rx_ring->buffer_info[i]; @@ -4646,6 +5137,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, i++; if (i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); prefetch(next_rxd); next_buffer = &rx_ring->buffer_info[i]; @@ -4654,23 +5146,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, cleaned = true; cleaned_count++; - /* this is the fast path for the non-packet split case */ - if (!adapter->rx_ps_hdr_size) { - pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; - skb_put(skb, length); - goto send_up; - } - if (buffer_info->dma) { - u16 hlen = igb_get_hlen(adapter, rx_desc); pci_unmap_single(pdev, buffer_info->dma, - adapter->rx_ps_hdr_size, + rx_ring->rx_buffer_len, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; - skb_put(skb, hlen); + if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { + skb_put(skb, length); + goto send_up; + } + skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); } if (length) { @@ -4683,15 +5168,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, buffer_info->page_offset, length); - if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || - (page_count(buffer_info->page) != 1)) + if ((page_count(buffer_info->page) != 1) || + (page_to_nid(buffer_info->page) != current_node)) buffer_info->page = NULL; else get_page(buffer_info->page); skb->len += length; skb->data_len += length; - skb->truesize += length; } @@ -4703,60 +5187,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, goto next_desc; } send_up: - /* - * If this bit is set, then the RX registers contain - * the time stamp. No other packet will be time - * stamped until we read these registers, so read the - * registers to make them available again. Because - * only one packet can be time stamped at a time, we - * know that the register values must belong to this - * one here and therefore we don't need to compare - * any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a - * skb_shared_tx that we can turn into a - * skb_shared_hwtstamps. - * - * TODO: can time stamping be triggered (thus locking - * the registers) without the packet reaching this point - * here? In that case RX time stamping would get stuck. - * - * TODO: in "time stamp all packets" mode this bit is - * not set. Need a global flag for this mode and then - * always read the registers. Cannot be done without - * a race condition. - */ - if (unlikely(staterr & E1000_RXD_STAT_TS)) { - u64 regval; - u64 ns; - struct skb_shared_hwtstamps *shhwtstamps = - skb_hwtstamps(skb); - - WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID), - "igb: no RX time stamp available for time stamped packet"); - regval = rd32(E1000_RXSTMPL); - regval |= (u64)rd32(E1000_RXSTMPH) << 32; - ns = timecounter_cyc2time(&adapter->clock, regval); - timecompare_update(&adapter->compare, ns); - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - shhwtstamps->syststamp = - timecompare_transform(&adapter->compare, ns); - } - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { dev_kfree_skb_irq(skb); goto next_desc; } + igb_rx_hwtstamp(q_vector, staterr, skb); total_bytes += skb->len; total_packets++; - igb_rx_checksum_adv(adapter, staterr, skb); + igb_rx_checksum_adv(rx_ring, staterr, skb); skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, rx_ring->queue_index); - igb_receive_skb(rx_ring, staterr, rx_desc, skb); + vlan_tag = ((staterr & E1000_RXD_STAT_VP) ? + le16_to_cpu(rx_desc->wb.upper.vlan) : 0); + + igb_receive_skb(q_vector, skb, vlan_tag); next_desc: rx_desc->wb.upper.status_error = 0; @@ -4783,8 +5231,6 @@ next_desc: rx_ring->total_bytes += total_bytes; rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; - adapter->net_stats.rx_bytes += total_bytes; - adapter->net_stats.rx_packets += total_packets; return cleaned; } @@ -4792,12 +5238,9 @@ next_desc: * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split * @adapter: address of board private structure **/ -static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, - int cleaned_count) +void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) { - struct igb_adapter *adapter = rx_ring->adapter; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = rx_ring->netdev; union e1000_adv_rx_desc *rx_desc; struct igb_buffer *buffer_info; struct sk_buff *skb; @@ -4807,19 +5250,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; - if (adapter->rx_ps_hdr_size) - bufsz = adapter->rx_ps_hdr_size; - else - bufsz = adapter->rx_buffer_len; + bufsz = rx_ring->rx_buffer_len; while (cleaned_count--) { rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); - if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { + if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { if (!buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); + buffer_info->page = netdev_alloc_page(netdev); if (!buffer_info->page) { - adapter->alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_failed++; goto no_buffers; } buffer_info->page_offset = 0; @@ -4827,39 +5267,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, buffer_info->page_offset ^= PAGE_SIZE / 2; } buffer_info->page_dma = - pci_map_page(pdev, buffer_info->page, + pci_map_page(rx_ring->pdev, buffer_info->page, buffer_info->page_offset, PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(rx_ring->pdev, + buffer_info->page_dma)) { + buffer_info->page_dma = 0; + rx_ring->rx_stats.alloc_failed++; + goto no_buffers; + } } - if (!buffer_info->skb) { - skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); + skb = buffer_info->skb; + if (!skb) { + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (!skb) { - adapter->alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_failed++; goto no_buffers; } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; - buffer_info->dma = pci_map_single(pdev, skb->data, + } + if (!buffer_info->dma) { + buffer_info->dma = pci_map_single(rx_ring->pdev, + skb->data, bufsz, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(rx_ring->pdev, + buffer_info->dma)) { + buffer_info->dma = 0; + rx_ring->rx_stats.alloc_failed++; + goto no_buffers; + } } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ - if (adapter->rx_ps_hdr_size) { + if (bufsz < IGB_RXBUFFER_1024) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } @@ -4882,7 +5331,7 @@ no_buffers: * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); + writel(i, rx_ring->tail); } } @@ -4941,13 +5390,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct hwtstamp_config config; - u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; - u32 tsync_rx_ctl_type = 0; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 tsync_rx_cfg = 0; - int is_l4 = 0; - int is_l2 = 0; - short port = 319; /* PTP */ + bool is_l4 = false; + bool is_l2 = false; u32 regval; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) @@ -4959,10 +5406,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, switch (config.tx_type) { case HWTSTAMP_TX_OFF: - tsync_tx_ctl_bit = 0; - break; + tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: - tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; break; default: return -ERANGE; @@ -4970,7 +5415,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl_bit = 0; + tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: @@ -4981,86 +5426,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, * possible to time stamp both Sync and Delay_Req messages * => fall back to time stamping all packets */ - tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; config.rx_filter = HWTSTAMP_FILTER_ALL; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; - is_l4 = 1; + is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; - is_l4 = 1; + is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; - is_l2 = 1; - is_l4 = 1; + is_l2 = true; + is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; - is_l2 = 1; - is_l4 = 1; + is_l2 = true; + is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l2 = 1; + is_l2 = true; break; default: return -ERANGE; } + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + /* enable/disable TX */ regval = rd32(E1000_TSYNCTXCTL); - regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; wr32(E1000_TSYNCTXCTL, regval); - /* enable/disable RX, define which PTP packets are time stamped */ + /* enable/disable RX */ regval = rd32(E1000_TSYNCRXCTL); - regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; - regval = (regval & ~0xE) | tsync_rx_ctl_type; + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; wr32(E1000_TSYNCRXCTL, regval); - wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); - /* - * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 - * (Ethertype to filter on) - * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter) - * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping) - */ - wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0); - - /* L4 Queue Filter[0]: only filter by source and destination port */ - wr32(E1000_SPQF0, htons(port)); - wr32(E1000_IMIREXT(0), is_l4 ? - ((1<<12) | (1<<19) /* bypass size and control flags */) : 0); - wr32(E1000_IMIR(0), is_l4 ? - (htons(port) - | (0<<16) /* immediate interrupt disabled */ - | 0 /* (1<<17) bit cleared: do not bypass - destination port check */) - : 0); - wr32(E1000_FTQF0, is_l4 ? - (0x11 /* UDP */ - | (1<<15) /* VF not compared */ - | (1<<27) /* Enable Timestamping */ - | (7<<28) /* only source port filter enabled, - source/target address and protocol - masked */) - : ((1<<15) | (15<<28) /* all mask bits set = filter not - enabled */)); + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(3), 0); + +#define PTP_PORT 319 + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), htons(PTP_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), htons(PTP_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } wrfl(); adapter->hwtstamp_config = config; @@ -5137,21 +5593,15 @@ static void igb_vlan_rx_register(struct net_device *netdev, ctrl |= E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); - /* enable VLAN receive filtering */ + /* Disable CFI check */ rctl = rd32(E1000_RCTL); rctl &= ~E1000_RCTL_CFIEN; wr32(E1000_RCTL, rctl); - igb_update_mng_vlan(adapter); } else { /* disable VLAN tag insert/strip */ ctrl = rd32(E1000_CTRL); ctrl &= ~E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); - - if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) { - igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; - } } igb_rlpml_set(adapter); @@ -5166,16 +5616,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - if ((hw->mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && - (vid == adapter->mng_vlan_id)) - return; - - /* add vid to vlvf if sr-iov is enabled, - * if that fails add directly to filter table */ - if (igb_vlvf_set(adapter, vid, true, pf_id)) - igb_vfta_set(hw, vid, true); + /* attempt to add filter to vlvf array */ + igb_vlvf_set(adapter, vid, true, pf_id); + /* add the filter since PF can receive vlans w/o entry in vlvf */ + igb_vfta_set(hw, vid, true); } static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) @@ -5183,6 +5628,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; + s32 err; igb_irq_disable(adapter); vlan_group_set_device(adapter->vlgrp, vid, NULL); @@ -5190,17 +5636,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) if (!test_bit(__IGB_DOWN, &adapter->state)) igb_irq_enable(adapter); - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && - (vid == adapter->mng_vlan_id)) { - /* release control to f/w */ - igb_release_hw_control(adapter); - return; - } + /* remove vlan from VLVF table array */ + err = igb_vlvf_set(adapter, vid, false, pf_id); - /* remove vid from vlvf if sr-iov is enabled, - * if not in vlvf remove from vfta */ - if (igb_vlvf_set(adapter, vid, false, pf_id)) + /* if vid was not present in VLVF just remove it from table */ + if (err) igb_vfta_set(hw, vid, false); } @@ -5220,6 +5660,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter) int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) { + struct pci_dev *pdev = adapter->pdev; struct e1000_mac_info *mac = &adapter->hw.mac; mac->autoneg = 0; @@ -5243,8 +5684,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - dev_err(&adapter->pdev->dev, - "Unsupported Speed/Duplex configuration\n"); + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } return 0; @@ -5266,9 +5706,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) if (netif_running(netdev)) igb_close(netdev); - igb_reset_interrupt_capability(adapter); - - igb_free_queues(adapter); + igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -5300,7 +5738,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) wr32(E1000_CTRL, ctrl); /* Allow time for pending master requests to run */ - igb_disable_pcie_master(&adapter->hw); + igb_disable_pcie_master(hw); wr32(E1000_WUC, E1000_WUC_PME_EN); wr32(E1000_WUFC, wufc); @@ -5363,9 +5801,7 @@ static int igb_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - igb_set_interrupt_capability(adapter); - - if (igb_alloc_queues(adapter)) { + if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -5417,22 +5853,16 @@ static void igb_netpoll(struct net_device *netdev) int i; if (!adapter->msix_entries) { + struct igb_q_vector *q_vector = adapter->q_vector[0]; igb_irq_disable(adapter); - napi_schedule(&adapter->rx_ring[0].napi); + napi_schedule(&q_vector->napi); return; } - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *tx_ring = &adapter->tx_ring[i]; - wr32(E1000_EIMC, tx_ring->eims_value); - igb_clean_tx_irq(tx_ring); - wr32(E1000_EIMS, tx_ring->eims_value); - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igb_ring *rx_ring = &adapter->rx_ring[i]; - wr32(E1000_EIMC, rx_ring->eims_value); - napi_schedule(&rx_ring->napi); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + wr32(E1000_EIMC, q_vector->eims_value); + napi_schedule(&q_vector->napi); } } #endif /* CONFIG_NET_POLL_CONTROLLER */ @@ -5532,6 +5962,33 @@ static void igb_io_resume(struct pci_dev *pdev) igb_get_hw_control(adapter); } +static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, + u8 qsel) +{ + u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * qsel; + else + rar_high |= E1000_RAH_POOL_1 << qsel; + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + static int igb_set_vf_mac(struct igb_adapter *adapter, int vf, unsigned char *mac_addr) { @@ -5542,8 +5999,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter, memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - igb_rar_set(hw, mac_addr, rar_entry); - igb_set_rah_pool(hw, vf, rar_entry); + igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); return 0; } @@ -5551,19 +6007,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter, static void igb_vmm_control(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 reg_data; + u32 reg; - if (!adapter->vfs_allocated_count) + /* replication is not supported for 82575 */ + if (hw->mac.type == e1000_82575) return; - /* VF's need PF reset indication before they - * can send/receive mail */ - reg_data = rd32(E1000_CTRL_EXT); - reg_data |= E1000_CTRL_EXT_PFRSTD; - wr32(E1000_CTRL_EXT, reg_data); + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); - igb_vmdq_set_loopback_pf(hw, true); - igb_vmdq_set_replication_pf(hw, true); + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } } /* igb_main.c */ |