diff options
author | David S. Miller <davem@davemloft.net> | 2014-01-06 23:37:45 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-06 23:37:45 +0100 |
commit | 56a4342dfe3145cd66f766adccb28fd9b571606d (patch) | |
tree | d1593764488ff8cbb0b83cb9ae35fd968bf81760 /drivers/net | |
parent | net_sched: act: action flushing missaccounting (diff) | |
parent | bridge: use spin_lock_bh() in br_multicast_set_hash_max (diff) | |
download | linux-56a4342dfe3145cd66f766adccb28fd9b571606d.tar.xz linux-56a4342dfe3145cd66f766adccb28fd9b571606d.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
ipv6 tunnel statistic bug fixes conflicting with consolidation into
generic sw per-cpu net stats.
qlogic conflict between queue counting bug fix and the addition
of multiple MAC address support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
58 files changed, 875 insertions, 346 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 81559b2dedad..539e24a1c86c 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2209,20 +2209,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) port = &(SLAVE_AD_INFO(slave).port); - // if slave is null, the whole port is not initialized + /* if slave is null, the whole port is not initialized */ if (!port->slave) { pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } + __get_state_machine_lock(port); + port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= (__get_link_speed(port) << 1); pr_debug("Port %d changed speed\n", port->actor_port_number); - // there is no need to reselect a new aggregator, just signal the - // state machines to reinitialize + /* there is no need to reselect a new aggregator, just signal the + * state machines to reinitialize + */ port->sm_vars |= AD_PORT_BEGIN; + + __release_state_machine_lock(port); } /** @@ -2237,20 +2242,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) port = &(SLAVE_AD_INFO(slave).port); - // if slave is null, the whole port is not initialized + /* if slave is null, the whole port is not initialized */ if (!port->slave) { pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } + __get_state_machine_lock(port); + port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= __get_duplex(port); pr_debug("Port %d changed duplex\n", port->actor_port_number); - // there is no need to reselect a new aggregator, just signal the - // state machines to reinitialize + /* there is no need to reselect a new aggregator, just signal the + * state machines to reinitialize + */ port->sm_vars |= AD_PORT_BEGIN; + + __release_state_machine_lock(port); } /** @@ -2266,15 +2276,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) port = &(SLAVE_AD_INFO(slave).port); - // if slave is null, the whole port is not initialized + /* if slave is null, the whole port is not initialized */ if (!port->slave) { pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", slave->bond->dev->name, slave->dev->name); return; } - // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) - // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report + __get_state_machine_lock(port); + /* on link down we are zeroing duplex and speed since + * some of the adaptors(ce1000.lan) report full duplex/speed + * instead of N/A(duplex) / 0(speed). + * + * on link up we are forcing recheck on the duplex and speed since + * some of he adaptors(ce1000.lan) report. + */ if (link == BOND_LINK_UP) { port->is_enabled = true; port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; @@ -2290,10 +2306,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) port->actor_oper_port_key = (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS); } - //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); - // there is no need to reselect a new aggregator, just signal the - // state machines to reinitialize + pr_debug("Port %d changed link status to %s", + port->actor_port_number, + (link == BOND_LINK_UP) ? "UP" : "DOWN"); + /* there is no need to reselect a new aggregator, just signal the + * state machines to reinitialize + */ port->sm_vars |= AD_PORT_BEGIN; + + __release_state_machine_lock(port); } /* diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index eedf2a5fc2be..eeecc29cf5b7 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -555,6 +555,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) /* Make sure pointer to data buffer is set */ wmb(); + skb_tx_timestamp(skb); + *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); /* Increment index to point to the next BD */ @@ -569,8 +571,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) arc_reg_set(priv, R_STATUS, TXPL_MASK); - skb_tx_timestamp(skb); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a36a760ada28..29801750f239 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) * Mask some pcie error bits */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); - data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); - pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + if (pos) { + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); + data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + } /* clear error status */ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_NFED | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index dad67905f4e2..eb105abcf0e7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1250,7 +1250,10 @@ struct bnx2x_slowpath { * Therefore, if they would have been defined in the same union, * data can get corrupted. */ - struct afex_vif_list_ramrod_data func_afex_rdata; + union { + struct afex_vif_list_ramrod_data viflist_data; + struct function_update_data func_update; + } func_afex_rdata; /* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -2501,4 +2504,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) +#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 20dcc02431ca..11fc79585491 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, bnx2x_warpcore_enable_AN_KR2(phy, params, vars); } else { + /* Enable Auto-Detect to support 1G over CL37 as well */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); + + /* Force cl48 sync_status LOW to avoid getting stuck in CL73 + * parallel-detect loop when CL73 and CL37 are enabled. + */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); + bnx2x_set_aer_mmd(params, phy); + bnx2x_disable_kr2(params, vars, phy); } @@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, *edc_mode = EDC_MODE_ACTIVE_DAC; else check_limiting_mode = 1; - } else if (copper_module_type & - SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + } else { + *edc_mode = EDC_MODE_PASSIVE_DAC; + /* Even in case PASSIVE_DAC indication is not set, + * treat it as a passive DAC cable, since some cables + * don't have this indication. + */ + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { DP(NETIF_MSG_LINK, "Passive Copper cable detected\n"); - *edc_mode = - EDC_MODE_PASSIVE_DAC; - } else { - DP(NETIF_MSG_LINK, - "Unknown copper-cable-type 0x%x !!!\n", - copper_module_type); - return -EINVAL; + } else { + DP(NETIF_MSG_LINK, + "Unknown copper-cable-type\n"); + } } break; } @@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, (1<<11)); if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->req_line_speed == SPEED_1000)) { + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { an_1000_val |= (1<<8); autoneg_val |= (1<<9 | 1<<12); if (phy->req_duplex == DUPLEX_FULL) @@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 0x09, &an_1000_val); - /* Set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - - /* Set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1<<7); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } } /* Only 10/100 are allowed to work in FORCE mode */ @@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, old_status, status); + /* Do not touch the link in case physical link down */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return 1; + /* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ @@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, */ not_kr2_device = (((base_page & 0x8000) == 0) || (((base_page & 0x8000) && - ((next_page & 0xe0) == 0x2)))); + ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d3748bf3ac7b..18498fed520b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11517,9 +11517,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } } - /* adjust igu_sb_cnt to MF for E1x */ - if (CHIP_IS_E1x(bp) && IS_MF(bp)) - bp->igu_sb_cnt /= E1HVN_MAX; + /* adjust igu_sb_cnt to MF for E1H */ + if (CHIP_IS_E1H(bp) && IS_MF(bp)) + bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); /* port info */ bnx2x_get_port_hwinfo(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 08f8047188e9..2beb5430b876 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -7180,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 #define MDIO_WC_REG_XGXS_STATUS3 0x8129 #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index babf7b954ae6..98cccd487fc2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2036,6 +2036,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + unsigned long flags; int read_lock; int rc = 0; @@ -2044,8 +2045,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, spin_lock_bh(&exeq->lock); list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { - if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == - *vlan_mac_flags) { + flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { rc = exeq->remove(bp, exeq->owner, exeq_pos); if (rc) { BNX2X_ERR("Failed to remove command\n"); @@ -2078,7 +2080,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, return read_lock; list_for_each_entry(pos, &o->head, link) { - if (pos->vlan_mac_flags == *vlan_mac_flags) { + flags = pos->vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); rc = bnx2x_config_vlan_mac(bp, &p); @@ -4380,8 +4384,11 @@ int bnx2x_config_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; /* Do nothing if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", + p->ramrod_flags); return 0; + } r->set_pending(r); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf9..6a53c15c85a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -266,6 +266,13 @@ enum { BNX2X_DONT_CONSUME_CAM_CREDIT, BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, }; +/* When looking for matching filters, some flags are not interesting */ +#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ + 1 << BNX2X_ETH_MAC | \ + 1 << BNX2X_ISCSI_ETH_MAC | \ + 1 << BNX2X_NETQ_ETH_MAC) +#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ + ((flags) & BNX2X_VLAN_MAC_CMP_MASK) struct bnx2x_vlan_mac_ramrod_params { /* Object to run the command from */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6fe52d301dfe..31ab924600c1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1213,6 +1213,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) /* next state */ vfop->state = BNX2X_VFOP_RXMODE_DONE; + /* record the accept flags in vfdb so hypervisor can modify them + * if necessary + */ + bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = + ramrod->rx_accept_flags; vfop->rc = bnx2x_config_rx_mode(bp, ramrod); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); op_err: @@ -1228,39 +1233,43 @@ op_pending: return; } +static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, + struct bnx2x_rx_mode_ramrod_params *ramrod, + struct bnx2x_virtf *vf, + unsigned long accept_flags) +{ + struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); + + memset(ramrod, 0, sizeof(*ramrod)); + ramrod->cid = vfq->cid; + ramrod->cl_id = vfq_cl_id(vf, vfq); + ramrod->rx_mode_obj = &bp->rx_mode_obj; + ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); + ramrod->rx_accept_flags = accept_flags; + ramrod->tx_accept_flags = accept_flags; + ramrod->pstate = &vf->filter_state; + ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + set_bit(RAMROD_RX, &ramrod->ramrod_flags); + set_bit(RAMROD_TX, &ramrod->ramrod_flags); + + ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); + ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); +} + int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd, int qid, unsigned long accept_flags) { - struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); if (vfop) { struct bnx2x_rx_mode_ramrod_params *ramrod = &vf->op_params.rx_mode; - memset(ramrod, 0, sizeof(*ramrod)); - - /* Prepare ramrod parameters */ - ramrod->cid = vfq->cid; - ramrod->cl_id = vfq_cl_id(vf, vfq); - ramrod->rx_mode_obj = &bp->rx_mode_obj; - ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); - - ramrod->rx_accept_flags = accept_flags; - ramrod->tx_accept_flags = accept_flags; - ramrod->pstate = &vf->filter_state; - ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; - - set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); - set_bit(RAMROD_RX, &ramrod->ramrod_flags); - set_bit(RAMROD_TX, &ramrod->ramrod_flags); - - ramrod->rdata = - bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); - ramrod->rdata_mapping = - bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); + bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, bnx2x_vfop_rxmode, cmd->done); @@ -3213,13 +3222,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp) bnx2x_iov_static_resc(bp, vf); } - /* prepare msix vectors in VF configuration space */ + /* prepare msix vectors in VF configuration space - the value in the + * PCI configuration space should be the index of the last entry, + * namely one less than the actual size of the table + */ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, - num_vf_queues); + num_vf_queues - 1); DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", - vf_idx, num_vf_queues); + vf_idx, num_vf_queues - 1); } bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); @@ -3447,10 +3459,18 @@ out: int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + struct bnx2x_queue_update_params *update_params; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_rx_mode_ramrod_params rx_ramrod; struct bnx2x *bp = netdev_priv(dev); - int rc, q_logical_state; + struct bnx2x_vlan_mac_obj *vlan_obj; + unsigned long vlan_mac_flags = 0; + unsigned long ramrod_flags = 0; struct bnx2x_virtf *vf = NULL; - struct pf_vf_bulletin_content *bulletin = NULL; + unsigned long accept_flags; + int rc; /* sanity and init */ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); @@ -3468,104 +3488,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) /* update PF's copy of the VF's bulletin. No point in posting the vlan * to the VF since it doesn't have anything to do with it. But it useful * to store it here in case the VF is not up yet and we can only - * configure the vlan later when it does. + * configure the vlan later when it does. Treat vlan id 0 as remove the + * Host tag. */ - bulletin->valid_bitmap |= 1 << VLAN_VALID; + if (vlan > 0) + bulletin->valid_bitmap |= 1 << VLAN_VALID; + else + bulletin->valid_bitmap &= ~(1 << VLAN_VALID); bulletin->vlan = vlan; /* is vf initialized and queue set up? */ - q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); - if (vf->state == VF_ENABLED && - q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { - /* configure the vlan in device on this vf's queue */ - unsigned long ramrod_flags = 0; - unsigned long vlan_mac_flags = 0; - struct bnx2x_vlan_mac_obj *vlan_obj = - &bnx2x_leading_vfq(vf, vlan_obj); - struct bnx2x_vlan_mac_ramrod_params ramrod_param; - struct bnx2x_queue_state_params q_params = {NULL}; - struct bnx2x_queue_update_params *update_params; + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; - memset(&ramrod_param, 0, sizeof(ramrod_param)); + /* configure the vlan in device on this vf's queue */ + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; - /* must lock vfpf channel to protect against vf flows */ - bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - /* remove existing vlans */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, - &ramrod_flags); - if (rc) { - BNX2X_ERR("failed to delete vlans\n"); - rc = -EINVAL; - goto out; - } + /* remove existing vlans */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc) { + BNX2X_ERR("failed to delete vlans\n"); + rc = -EINVAL; + goto out; + } + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (vlan) + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = vlan_obj; + ramrod_param.ramrod_flags = ramrod_flags; + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod_param.user_req.vlan_mac_flags); + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + rc = -EINVAL; + goto out; + } - /* send queue update ramrod to configure default vlan and silent - * vlan removal + /* send queue update ramrod to configure default vlan and silent + * vlan removal + */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &update_params->update_flags); + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } - if (vlan == 0) { - /* if vlan is 0 then we want to leave the VF traffic - * untagged, and leave the incoming traffic untouched - * (i.e. do not remove any vlan tags). - */ - __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - } else { - /* configure the new vlan to device */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - ramrod_param.vlan_mac_obj = vlan_obj; - ramrod_param.ramrod_flags = ramrod_flags; - ramrod_param.user_req.u.vlan.vlan = vlan; - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; - rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc) { - BNX2X_ERR("failed to configure vlan\n"); - rc = -EINVAL; - goto out; - } - - /* configure default vlan to vf queue and set silent - * vlan removal (the vf remains unaware of this vlan). - */ - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - update_params->def_vlan = vlan; - } + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN\n"); + goto out; + } - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure default VLAN\n"); - goto out; - } - /* clear the flag indicating that this VF needs its vlan - * (will only be set if the HV configured the Vlan before vf was - * up and we were called because the VF came up later - */ + /* clear the flag indicating that this VF needs its vlan + * (will only be set if the HV configured the Vlan before vf was + * up and we were called because the VF came up later + */ out: - vf->cfg_flags &= ~VF_CFG_VLAN; - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - } + vf->cfg_flags &= ~VF_CFG_VLAN; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index a5c84a7d454c..d72ab7e24de0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -74,6 +74,7 @@ struct bnx2x_vf_queue { /* VLANs object */ struct bnx2x_vlan_mac_obj vlan_obj; atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + unsigned long accept_flags; /* last accept flags configured */ /* Queue Slow-path State object */ struct bnx2x_queue_sp_obj sp_obj; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index e5f7985a372c..1b1ad31b4553 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) return -EINVAL; } - BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); + DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; @@ -1610,6 +1610,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); /* covert VF-PF if mask to bnx2x accept flags */ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) @@ -1629,9 +1631,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); /* A packet arriving the vf's mac should be accepted - * with any vlan + * with any vlan, unless a vlan has already been + * configured. */ - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); /* set rx-mode */ rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, @@ -1722,6 +1726,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, goto response; } } + /* if vlan was set by hypervisor we don't allow guest to config vlan */ + if (bulletin->valid_bitmap & 1 << VLAN_VALID) { + int i; + + /* search for vlan filters */ + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (filters->filters[i].flags & + VFPF_Q_FILTER_VLAN_TAG_VALID) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + vf->op_rc = -EPERM; + goto response; + } + } + } /* verify vf_qid */ if (filters->vf_qid > vf_rxq_count(vf)) @@ -1817,6 +1836,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; /* flags handled individually for backward/forward compatability */ + vf_op_params->rss_flags = 0; + vf_op_params->ramrod_flags = 0; + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d88ef551dfcd..c37e9f27ff6d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7638,7 +7638,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) { u32 base = (u32) mapping & 0xffffffff; - return (base > 0xffffdcc0) && (base + len + 8 < base); + return base + len + 8 < base; } /* Test for TSO DMA buffers that cross into regions which are within MSS bytes diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 17fe50b91523..b97e35c33d17 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -228,6 +228,25 @@ struct tp_params { uint32_t dack_re; /* DACK timer resolution */ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ + + u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ + u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + + /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a + * subset of the set of fields which may be present in the Compressed + * Filter Tuple portion of filters and TCP TCB connections. The + * fields which are present are controlled by the TP_VLAN_PRI_MAP. + * Since a variable number of fields may or may not be present, their + * shifted field positions within the Compressed Filter Tuple may + * vary, or not even be present if the field isn't selected in + * TP_VLAN_PRI_MAP. Since some of these fields are needed in various + * places we store their offsets here, or a -1 if the field isn't + * present. + */ + int vlan_shift; + int vnic_shift; + int port_shift; + int protocol_shift; }; struct vpd_params { @@ -925,6 +944,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); +int t4_init_tp_params(struct adapter *adap); +int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d6b12e035a7d..fff02ed1295e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) if (stid >= 0) { t->stid_tab[stid].data = data; stid += t->stid_base; - t->stids_in_use++; + /* IPv6 requires max of 520 bits or 16 cells in TCAM + * This is equivalent to 4 TIDs. With CLIP enabled it + * needs 2 TIDs. + */ + if (family == PF_INET) + t->stids_in_use++; + else + t->stids_in_use += 4; } spin_unlock_bh(&t->stid_lock); return stid; @@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) } if (stid >= 0) { t->stid_tab[stid].data = data; - stid += t->stid_base; + stid -= t->nstids; + stid += t->sftid_base; t->stids_in_use++; } spin_unlock_bh(&t->stid_lock); @@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid); */ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) { - stid -= t->stid_base; + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + spin_lock_bh(&t->stid_lock); if (family == PF_INET) __clear_bit(stid, t->stid_bmap); else bitmap_release_region(t->stid_bmap, stid, 2); t->stid_tab[stid].data = NULL; - t->stids_in_use--; + if (family == PF_INET) + t->stids_in_use--; + else + t->stids_in_use -= 4; spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t) size_t size; unsigned int stid_bmap_size; unsigned int natids = t->natids; + struct adapter *adap = container_of(t, struct adapter, tids); stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); size = t->ntids * sizeof(*t->tid_tab) + @@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t) t->afree = t->atid_tab; } bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); + /* Reserve stid 0 for T4/T5 adapters */ + if (!t->stid_base && + (is_t4(adap->params.chip) || is_t5(adap->params.chip))) + __set_bit(0, t->stid_bmap); + return 0; } @@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> (adap->fn * 4)); - lli.filt_mode = adap->filter_mode; + lli.filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) lli.tx_modq[i] = i; @@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, adap = netdev2adap(dev); /* Adjust stid to correct filter index */ - stid -= adap->tids.nstids; + stid -= adap->tids.sftid_base; stid += adap->tids.nftids; /* Check to make sure the filter requested is writable ... @@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, f->fs.val.lip[i] = val[i]; f->fs.mask.lip[i] = ~0; } - if (adap->filter_mode & F_PORT) { + if (adap->params.tp.vlan_pri_map & F_PORT) { f->fs.val.iport = port; f->fs.mask.iport = mask; } } + if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { + f->fs.val.proto = IPPROTO_TCP; + f->fs.mask.proto = ~0; + } + f->fs.dirsteer = 1; f->fs.iq = queue; /* Mark filter as locked */ @@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, adap = netdev2adap(dev); /* Adjust stid to correct filter index */ - stid -= adap->tids.nstids; + stid -= adap->tids.sftid_base; stid += adap->tids.nftids; f = &adap->tids.ftid_tab[stid]; @@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap) enum dev_state state; u32 params[7], val[7]; struct fw_caps_config_cmd caps_cmd; - int reset = 1, j; + int reset = 1; /* * Contact FW, advertising Master capability (and potentially forcing @@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap) /* * These are finalized by FW initialization, load their values now. */ - v = t4_read_reg(adap, TP_TIMER_RESOLUTION); - adap->params.tp.tre = TIMERRESOLUTION_GET(v); - adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); t4_read_mtu_tbl(adap, adap->params.mtus, NULL); t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); - /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ - for (j = 0; j < NCHAN; j++) - adap->params.tp.tx_modq[j] = j; - - t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, - &adap->filter_mode, 1, - TP_VLAN_PRI_MAP); - + t4_init_tp_params(adap); adap->flags |= FW_OK; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6f21f2451c30..4dd0a82533e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) { - stid -= t->stid_base; + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 29878098101e..cb05be905def 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -45,6 +45,7 @@ #include "l2t.h" #include "t4_msg.h" #include "t4fw_api.h" +#include "t4_regs.h" #define VLAN_NONE 0xfff @@ -411,6 +412,40 @@ done: } EXPORT_SYMBOL(cxgb4_l2t_get); +u64 cxgb4_select_ntuple(struct net_device *dev, + const struct l2t_entry *l2t) +{ + struct adapter *adap = netdev2adap(dev); + struct tp_params *tp = &adap->params.tp; + u64 ntuple = 0; + + /* Initialize each of the fields which we care about which are present + * in the Compressed Filter Tuple. + */ + if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) + ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; + + if (tp->port_shift >= 0) + ntuple |= (u64)l2t->lport << tp->port_shift; + + if (tp->protocol_shift >= 0) + ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; + + if (tp->vnic_shift >= 0) { + u32 viid = cxgb4_port_viid(dev); + u32 vf = FW_VIID_VIN_GET(viid); + u32 pf = FW_VIID_PFN_GET(viid); + u32 vld = FW_VIID_VIVLD_GET(viid); + + ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | + V_FT_VNID_ID_PF(pf) | + V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; + } + + return ntuple; +} +EXPORT_SYMBOL(cxgb4_select_ntuple); + /* * Called when address resolution fails for an L2T entry to handle packets * on the arpq head. If a packet specifies a failure handler it is invoked, diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 108c0f1fce1c..85eb5c71358d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, const struct net_device *physdev, unsigned int priority); - +u64 cxgb4_select_ntuple(struct net_device *dev, + const struct l2t_entry *l2t); void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 42745438c1e0..47ffa64fcf19 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2583,7 +2583,7 @@ static int t4_sge_init_soft(struct adapter *adap) #undef READ_FL_BUF if (fl_small_pg != PAGE_SIZE || - (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || + (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || (fl_large_pg & (fl_large_pg-1)) != 0))) { dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", fl_small_pg, fl_large_pg); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 9903a66b7bad..a3964753935c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3682,6 +3682,109 @@ int t4_prep_adapter(struct adapter *adapter) return 0; } +/** + * t4_init_tp_params - initialize adap->params.tp + * @adap: the adapter + * + * Initialize various fields of the adapter's TP Parameters structure. + */ +int t4_init_tp_params(struct adapter *adap) +{ + int chan; + u32 v; + + v = t4_read_reg(adap, TP_TIMER_RESOLUTION); + adap->params.tp.tre = TIMERRESOLUTION_GET(v); + adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); + + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (chan = 0; chan < NCHAN; chan++) + adap->params.tp.tx_modq[chan] = chan; + + /* Cache the adapter's Compressed Filter Mode and global Incress + * Configuration. + */ + t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP); + t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG); + + /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field + * shift positions of several elements of the Compressed Filter Tuple + * for this adapter which we need frequently ... + */ + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); + adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); + adap->params.tp.protocol_shift = t4_filter_field_shift(adap, + F_PROTOCOL); + + /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID + * represents the presense of an Outer VLAN instead of a VNIC ID. + */ + if ((adap->params.tp.ingress_config & F_VNIC) == 0) + adap->params.tp.vnic_shift = -1; + + return 0; +} + +/** + * t4_filter_field_shift - calculate filter field shift + * @adap: the adapter + * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) + * + * Return the shift position of a filter field within the Compressed + * Filter Tuple. The filter field is specified via its selection bit + * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. + */ +int t4_filter_field_shift(const struct adapter *adap, int filter_sel) +{ + unsigned int filter_mode = adap->params.tp.vlan_pri_map; + unsigned int sel; + int field_shift; + + if ((filter_mode & filter_sel) == 0) + return -1; + + for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { + switch (filter_mode & sel) { + case F_FCOE: + field_shift += W_FT_FCOE; + break; + case F_PORT: + field_shift += W_FT_PORT; + break; + case F_VNIC_ID: + field_shift += W_FT_VNIC_ID; + break; + case F_VLAN: + field_shift += W_FT_VLAN; + break; + case F_TOS: + field_shift += W_FT_TOS; + break; + case F_PROTOCOL: + field_shift += W_FT_PROTOCOL; + break; + case F_ETHERTYPE: + field_shift += W_FT_ETHERTYPE; + break; + case F_MACMATCH: + field_shift += W_FT_MACMATCH; + break; + case F_MPSHITTYPE: + field_shift += W_FT_MPSHITTYPE; + break; + case F_FRAGMENTATION: + field_shift += W_FT_FRAGMENTATION; + break; + } + } + return field_shift; +} + int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { u8 addr[6]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 0a8205d69d2c..4082522d8140 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1171,10 +1171,50 @@ #define A_TP_TX_SCHED_PCMD 0x25 +#define S_VNIC 11 +#define V_VNIC(x) ((x) << S_VNIC) +#define F_VNIC V_VNIC(1U) + +#define S_FRAGMENTATION 9 +#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) +#define F_FRAGMENTATION V_FRAGMENTATION(1U) + +#define S_MPSHITTYPE 8 +#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) +#define F_MPSHITTYPE V_MPSHITTYPE(1U) + +#define S_MACMATCH 7 +#define V_MACMATCH(x) ((x) << S_MACMATCH) +#define F_MACMATCH V_MACMATCH(1U) + +#define S_ETHERTYPE 6 +#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) +#define F_ETHERTYPE V_ETHERTYPE(1U) + +#define S_PROTOCOL 5 +#define V_PROTOCOL(x) ((x) << S_PROTOCOL) +#define F_PROTOCOL V_PROTOCOL(1U) + +#define S_TOS 4 +#define V_TOS(x) ((x) << S_TOS) +#define F_TOS V_TOS(1U) + +#define S_VLAN 3 +#define V_VLAN(x) ((x) << S_VLAN) +#define F_VLAN V_VLAN(1U) + +#define S_VNIC_ID 2 +#define V_VNIC_ID(x) ((x) << S_VNIC_ID) +#define F_VNIC_ID V_VNIC_ID(1U) + #define S_PORT 1 #define V_PORT(x) ((x) << S_PORT) #define F_PORT V_PORT(1U) +#define S_FCOE 0 +#define V_FCOE(x) ((x) << S_FCOE) +#define F_FCOE V_FCOE(1U) + #define NUM_MPS_CLS_SRAM_L_INSTANCES 336 #define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 @@ -1213,4 +1253,37 @@ #define V_CHIPID(x) ((x) << S_CHIPID) #define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) +/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the + * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP + * selects for a particular field being present. These fields, when present + * in the Compressed Filter Tuple, have the following widths in bits. + */ +#define W_FT_FCOE 1 +#define W_FT_PORT 3 +#define W_FT_VNIC_ID 17 +#define W_FT_VLAN 17 +#define W_FT_TOS 8 +#define W_FT_PROTOCOL 8 +#define W_FT_ETHERTYPE 16 +#define W_FT_MACMATCH 9 +#define W_FT_MPSHITTYPE 3 +#define W_FT_FRAGMENTATION 1 + +/* Some of the Compressed Filter Tuple fields have internal structure. These + * bit shifts/masks describe those structures. All shifts are relative to the + * base position of the fields within the Compressed Filter Tuple + */ +#define S_FT_VLAN_VLD 16 +#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) +#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) + +#define S_FT_VNID_ID_VF 0 +#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) + +#define S_FT_VNID_ID_PF 7 +#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) + +#define S_FT_VNID_ID_VLD 16 +#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) + #endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 5878df619b53..4ccaf9af6fc9 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE3_MAX_RSS_QS 16 #define BE3_MAX_TX_QS 16 #define BE3_MAX_EVT_QS 16 +#define BE3_SRIOV_MAX_EVT_QS 8 #define MAX_RX_QS 32 #define MAX_EVT_QS 32 @@ -480,7 +481,7 @@ struct be_adapter { struct list_head entry; u32 flash_status; - struct completion flash_compl; + struct completion et_cmd_compl; struct be_resources res; /* resources available for the func */ u16 num_vfs; /* Number of VFs provisioned by PF */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index e0e8bc1ef14c..94c35c8d799d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter, subsystem = resp_hdr->subsystem; } + if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && + subsystem == CMD_SUBSYSTEM_LOWLEVEL) { + complete(&adapter->et_cmd_compl); + return 0; + } + if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || (opcode == OPCODE_COMMON_WRITE_OBJECT)) && (subsystem == CMD_SUBSYSTEM_COMMON)) { adapter->flash_status = compl_status; - complete(&adapter->flash_compl); + complete(&adapter->et_cmd_compl); } if (compl_status == MCC_STATUS_SUCCESS) { @@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 0x3ea83c02, 0x4a110304}; int status; + if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) + return 0; + if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); - if (!wait_for_completion_timeout(&adapter->flash_compl, + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) status = -1; else @@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); - if (!wait_for_completion_timeout(&adapter->flash_compl, - msecs_to_jiffies(40000))) + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(40000))) status = -1; else status = adapter->flash_status; @@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, { struct be_mcc_wrb *wrb; struct be_cmd_req_loopback_test *req; + struct be_cmd_resp_loopback_test *resp; int status; spin_lock_bh(&adapter->mcc_lock); @@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); - req->hdr.timeout = cpu_to_le32(4); + req->hdr.timeout = cpu_to_le32(15); req->pattern = cpu_to_le64(pattern); req->src_port = cpu_to_le32(port_num); req->dest_port = cpu_to_le32(port_num); @@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); - status = be_mcc_notify_wait(adapter); - if (!status) { - struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); - status = le32_to_cpu(resp->status); - } + be_mcc_notify(adapter); + + spin_unlock_bh(&adapter->mcc_lock); + wait_for_completion(&adapter->et_cmd_compl); + resp = embedded_payload(wrb); + status = le32_to_cpu(resp->status); + + return status; err: spin_unlock_bh(&adapter->mcc_lock); return status; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b5c238aa6861..3acf137b5784 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter) if (!BEx_chip(adapter)) adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; + } else { + /* Disable RSS, if only default RX Q is created */ + adapter->rss_flags = RSS_ENABLE_NONE; + } - rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, - 128); - if (rc) { - adapter->rss_flags = 0; - return rc; - } + rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, + 128); + if (rc) { + adapter->rss_flags = RSS_ENABLE_NONE; + return rc; } /* First time posting */ @@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; bool use_sriov = false; + int max_vfs; - if (BE3_chip(adapter) && sriov_want(adapter)) { - int max_vfs; + max_vfs = pci_sriov_get_totalvfs(pdev); - max_vfs = pci_sriov_get_totalvfs(pdev); + if (BE3_chip(adapter) && sriov_want(adapter)) { res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; use_sriov = res->max_vfs; } @@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter, BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; res->max_rx_qs = res->max_rss_qs + 1; - res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; + if (be_physfn(adapter)) + res->max_evt_qs = (max_vfs > 0) ? + BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; + else + res->max_evt_qs = 1; res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) @@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter) spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); - init_completion(&adapter->flash_compl); + init_completion(&adapter->et_cmd_compl); pci_save_state(adapter->pdev); return 0; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 05cd81aa9813..6530177d53e7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(bdp, fep); + skb_tx_timestamp(skb); + fep->cur_tx = bdp; if (fep->cur_tx == fep->dirty_tx) @@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Trigger transmission start */ writel(0, fep->hwp + FEC_X_DES_ACTIVE); - skb_tx_timestamp(skb); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 895450e9bb3c..ff2d806eaef7 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) e1000_release_phy_80003es2lan(hw); /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (ret_val) + return ret_val; kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, kum_reg_data); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 051d1583e211..d6570b2d5a6b 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6184,7 +6184,7 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM static int e1000_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -6203,7 +6203,7 @@ static int e1000_resume(struct device *dev) return __e1000_resume(pdev); } -#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME static int e1000_runtime_suspend(struct device *dev) diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index da2be59505c0..20e71f4ca426 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, * it across the board. */ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); - if (ret_val) + if (ret_val) { /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - udelay(usec_interval); + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; if (phy_status & BMSR_LSTATUS) break; if (usec_interval >= 1000) - mdelay(usec_interval / 1000); + msleep(usec_interval / 1000); else udelay(usec_interval); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 9ce07f3ef62d..359f6e60320d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) { struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int err; +#ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; +#endif err = ixgbe_disable_sriov(adapter); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 7354960b583b..c4eeb69a5bee 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus) if (time_is_before_jiffies(end)) ++timedout; } else { + /* wait_event_timeout does not guarantee a delay of at + * least one whole jiffie, so timeout must be no less + * than two. + */ + if (timeout < 2) + timeout = 2; wait_event_timeout(dev->smi_busy_wait, orion_mdio_smi_is_done(dev), timeout); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 3010abb55fbd..32058614151a 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1602,13 +1602,13 @@ netxen_process_lro(struct netxen_adapter *adapter, u32 seq_number; u8 vhdr_len = 0; - if (unlikely(ring > adapter->max_rds_rings)) + if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = netxen_get_lro_sts_refhandle(sts_data0); - if (unlikely(index > rds_ring->num_desc)) + if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 4afdef0cc175..35d48766d842 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -493,6 +493,7 @@ struct qlcnic_hardware_context { struct qlcnic_mailbox *mailbox; u8 extend_lb_time; u8 phys_port_id[ETH_ALEN]; + u8 lb_mode; }; struct qlcnic_adapter_stats { @@ -584,6 +585,8 @@ struct qlcnic_host_tx_ring { dma_addr_t phys_addr; dma_addr_t hw_cons_phys_addr; struct netdev_queue *txq; + /* Lock to protect Tx descriptors cleanup */ + spinlock_t tx_clean_lock; } ____cacheline_internodealigned_in_smp; /* @@ -815,6 +818,7 @@ struct qlcnic_mac_vlan_list { #define QLCNIC_ILB_MODE 0x1 #define QLCNIC_ELB_MODE 0x2 +#define QLCNIC_LB_MODE_MASK 0x3 #define QLCNIC_LINKEVENT 0x1 #define QLCNIC_LB_RESPONSE 0x2 @@ -1100,7 +1104,6 @@ struct qlcnic_adapter { struct qlcnic_filter_hash rx_fhash; struct list_head vf_mc_list; - spinlock_t tx_clean_lock; spinlock_t mac_learn_lock; /* spinlock for catching rcv filters for eswitch traffic */ spinlock_t rx_mac_learn_lock; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b3fd1605773e..03eb2ad9611a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1685,12 +1685,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) } } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); - /* Make sure carrier is off and queue is stopped during loopback */ - if (netif_running(netdev)) { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - } - ret = qlcnic_do_lb_test(adapter, mode); qlcnic_83xx_clear_lb_mode(adapter, mode); @@ -2122,6 +2116,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, ahw->link_autoneg = MSB(MSW(data[3])); ahw->module_type = MSB(LSW(data[3])); ahw->has_link_events = 1; + ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; qlcnic_advert_link_change(adapter, link_status); } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e9c21e5d0ca9..c4262c23ed7c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_skb_frag *buffrag; int i, j; + spin_lock(&tx_ring->tx_clean_lock); + cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; @@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, } cmd_buf++; } + + spin_unlock(&tx_ring->tx_clean_lock); } void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index a215e0f69335..6373f6022486 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) adapter->ahw->linkup = 0; netif_carrier_off(netdev); } else if (!adapter->ahw->linkup && linkup) { + /* Do not advertise Link up if the port is in loopback mode */ + if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) + return; + netdev_info(netdev, "NIC Link is up\n"); adapter->ahw->linkup = 1; netif_carrier_on(netdev); @@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; - if (!spin_trylock(&adapter->tx_clean_lock)) + if (!spin_trylock(&tx_ring->tx_clean_lock)) return 1; sw_consumer = tx_ring->sw_consumer; @@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, break; } + tx_ring->sw_consumer = sw_consumer; + if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; smp_mb(); if (netif_tx_queue_stopped(tx_ring->txq) && netif_carrier_ok(netdev)) { @@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); + + spin_unlock(&tx_ring->tx_clean_lock); return done; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index d131ec1321e8..eeec83a0e664 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1757,7 +1757,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); smp_mb(); - spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); adapter->ahw->linkup = 0; netif_tx_disable(netdev); @@ -1778,7 +1777,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) for (ring = 0; ring < adapter->drv_tx_rings; ring++) qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); - spin_unlock(&adapter->tx_clean_lock); } /* Usage: During suspend and firmware recovery module */ @@ -2173,6 +2171,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, } memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; + spin_lock_init(&tx_ring->tx_clean_lock); } if (qlcnic_83xx_check(adapter) || @@ -2300,7 +2299,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) rwlock_init(&adapter->ahw->crb_lock); mutex_init(&adapter->ahw->mem_lock); - spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); qlcnic_register_dcb(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 98b621fb1227..d14d9a139eef 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -81,9 +81,12 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, if (qlcnic_83xx_pf_check(adapter)) num_macs = 1; + info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; + if (adapter->ahw->pci_func == func) { info->min_tx_bw = 0; info->max_tx_bw = MAX_BW; + temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs; info->max_rx_ucast_mac_filters = temp; temp = res->num_tx_mac_filters - num_macs * num_vfs; @@ -92,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, temp = res->num_rx_mcast_mac_filters - temp; info->max_rx_mcast_mac_filters = temp; + info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; } else { id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) @@ -99,10 +103,13 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, vp = sriov->vf_info[id].vp; info->min_tx_bw = vp->min_tx_bw; info->max_tx_bw = vp->max_tx_bw; + info->max_rx_ucast_mac_filters = num_macs; info->max_tx_mac_filters = num_macs; temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC; info->max_rx_mcast_mac_filters = temp; + + info->max_tx_ques = QLCNIC_SINGLE_RING; } info->max_rx_ip_addr = res->num_destip / max; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 216141028125..b8e3a4ce24b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - if (netif_msg_hw(priv)) { - if (priv->dma_cap.time_stamp) { - pr_debug("IEEE 1588-2002 Time Stamp supported\n"); - priv->adv_ts = 0; - } - if (priv->dma_cap.atime_stamp && priv->extend_desc) { - pr_debug - ("IEEE 1588-2008 Advanced Time Stamp supported\n"); - priv->adv_ts = 1; - } - } + priv->adv_ts = 0; + if (priv->dma_cap.atime_stamp && priv->extend_desc) + priv->adv_ts = 1; + + if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) + pr_debug("IEEE 1588-2002 Time Stamp supported\n"); + + if (netif_msg_hw(priv) && priv->adv_ts) + pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); priv->hw->ptp = &stmmac_ptp; priv->hwts_tx_en = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b8b0eeed0f92..7680581ebe12 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) priv->hw->ptp->config_addend(priv->ioaddr, addend); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } @@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 243fffbe18e8..e8bb77d25d98 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); + else if (phy->speed == 10) + mac_control |= BIT(18); /* In Band mode */ *link = true; } else { @@ -2126,7 +2128,7 @@ static int cpsw_probe(struct platform_device *pdev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, - dev_name(priv->dev), priv)) { + dev_name(&pdev->dev), priv)) { dev_err(priv->dev, "error attaching irq\n"); goto clean_ale_ret; } diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 3169252613fa..5d78c1d08abd 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case HDLCDRVCTL_CALIBRATE: if(!capable(CAP_SYS_RAWIO)) return -EPERM; + if (bi.data.calibrate > INT_MAX / s->par.bitrate) + return -EINVAL; s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; return 0; diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 1971411574db..61dd2447e1bb 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; case SIOCYAMGCFG: + memset(&yi, 0, sizeof(yi)); yi.cfg.mask = 0xffffffff; yi.cfg.iobase = yp->iobase; yi.cfg.irq = yp->irq; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f80bd0c90f1e..7756118c2f0a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -260,9 +260,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct sk_buff *skb; net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; - if (!net) { - netdev_err(net, "got receive callback but net device" - " not initialized yet\n"); + if (!net || net->reg_state != NETREG_REGISTERED) { packet->status = NVSP_STAT_FAIL; return 0; } @@ -434,19 +432,11 @@ static int netvsc_probe(struct hv_device *dev, SET_ETHTOOL_OPS(net, ðtool_ops); SET_NETDEV_DEV(net, &dev->device); - ret = register_netdev(net); - if (ret != 0) { - pr_err("Unable to register netdev.\n"); - free_netdev(net); - goto out; - } - /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - unregister_netdev(net); free_netdev(net); hv_set_drvdata(dev, NULL); return ret; @@ -455,7 +445,13 @@ static int netvsc_probe(struct hv_device *dev, netif_carrier_on(net); -out: + ret = register_netdev(net); + if (ret != 0) { + pr_err("Unable to register netdev.\n"); + rndis_filter_device_remove(dev); + free_netdev(net); + } + return ret; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 94198366de7f..09ababe54a5b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -689,8 +689,19 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { struct macvlan_dev *vlan = netdev_priv(dev); + netdev_features_t mask; - return features & (vlan->set_features | ~MACVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (vlan->set_features | ~MACVLAN_FEATURES); + mask = features; + + features = netdev_increment_features(vlan->lowerdev->features, + features, + mask); + if (!vlan->fwd_priv) + features |= NETIF_F_LLTX; + + return features; } static const struct ethtool_ops macvlan_ethtool_ops = { @@ -1009,9 +1020,8 @@ static int macvlan_device_event(struct notifier_block *unused, break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { - vlan->dev->features = dev->features & MACVLAN_FEATURES; vlan->dev->gso_max_size = dev->gso_max_size; - netdev_features_change(vlan->dev); + netdev_update_features(vlan->dev); } break; case NETDEV_UNREGISTER: diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 19da5ab615bd..76e8936ab9e4 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -533,10 +533,8 @@ phy_err: int phy_start_interrupts(struct phy_device *phydev) { atomic_set(&phydev->irq_disable, 0); - if (request_irq(phydev->irq, phy_interrupt, - IRQF_SHARED, - "phy_interrupt", - phydev) < 0) { + if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", + phydev) < 0) { pr_warn("%s: Can't get IRQ %d (PHY)\n", phydev->bus->name, phydev->irq); phydev->irq = PHY_POLL; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 85e4a01670f0..47b0f732b0b1 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM module will be called cdc_mbim. config USB_NET_DM9601 - tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" + tristate "Davicom DM96xx based USB 10/100 ethernet devices" depends on USB_USBNET select CRC32 help - This option adds support for Davicom DM9601 based USB 1.1 - 10/100 Ethernet adapters. + This option adds support for Davicom DM9601/DM9620/DM9621A + based USB 10/100 Ethernet adapters. config USB_NET_SR9700 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index c6867f926cff..14aa48fa8d7e 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -1,5 +1,5 @@ /* - * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices + * Davicom DM96xx USB 10/100Mbps ethernet devices * * Peter Korsgaard <jacmet@sunsite.dk> * @@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &dm9601_ethtool_ops; dev->net->hard_header_len += DM_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; - dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; + + /* dm9620/21a require room for 4 byte padding, even in dm9601 + * mode, so we need +1 to be able to receive full size + * ethernet frames. + */ + dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1; dev->mii.dev = dev->net; dev->mii.mdio_read = dm9601_mdio_read; @@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { - int len; + int len, pad; /* format: b1: packet length low @@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, b3..n: packet data */ - len = skb->len; + len = skb->len + DM_TX_OVERHEAD; + + /* workaround for dm962x errata with tx fifo getting out of + * sync if a USB bulk transfer retry happens right after a + * packet with odd / maxpacket length by adding up to 3 bytes + * padding. + */ + while ((len & 1) || !(len % dev->maxpacket)) + len++; - if (skb_headroom(skb) < DM_TX_OVERHEAD) { + len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */ + pad = len - skb->len; + + if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) { struct sk_buff *skb2; - skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); + skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) @@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, __skb_push(skb, DM_TX_OVERHEAD); - /* usbnet adds padding if length is a multiple of packet size - if so, adjust length value in header */ - if ((skb->len % dev->maxpacket) == 0) - len++; + if (pad) { + memset(skb->data + skb->len, 0, pad); + __skb_put(skb, pad); + } skb->data[0] = len; skb->data[1] = len >> 8; @@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev) } static const struct driver_info dm9601_info = { - .description = "Davicom DM9601 USB Ethernet", + .description = "Davicom DM96xx USB 10/100 Ethernet", .flags = FLAG_ETHER | FLAG_LINK_INTR, .bind = dm9601_bind, .rx_fixup = dm9601_rx_fixup, @@ -594,6 +610,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; @@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = { module_usb_driver(dm9601_driver); MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); -MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); +MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 86292e6aaf49..1a482344b3f5 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -185,7 +185,6 @@ enum rx_ctrl_state{ #define BM_REQUEST_TYPE (0xa1) #define B_NOTIFICATION (0x20) #define W_VALUE (0x0) -#define W_INDEX (0x2) #define W_LENGTH (0x2) #define B_OVERRUN (0x1<<6) @@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb) struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; + int if_num; /* Sanity checks */ if (!serial) @@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb) handle_usb_error(status, __func__, serial->parent); return; } + + /* tiocmget is only supported on HSO_PORT_MODEM */ tiocmget = serial->tiocmget; if (!tiocmget) return; + BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); + usb = serial->parent->usb; + if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; + + /* wIndex should be the USB interface number of the port to which the + * notification applies, which should always be the Modem port. + */ serial_state_notification = &tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || serial_state_notification->bNotification != B_NOTIFICATION || le16_to_cpu(serial_state_notification->wValue) != W_VALUE || - le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || + le16_to_cpu(serial_state_notification->wIndex) != if_num || le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { dev_warn(&usb->dev, "hso received invalid serial state notification\n"); diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index aea68bc33583..36ff0019aa32 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -116,7 +116,6 @@ enum { struct mcs7830_data { u8 multi_filter[8]; u8 config; - u8 link_counter; }; static const char driver_name[] = "MOSCHIP usb-ethernet driver"; @@ -560,26 +559,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb) { u8 *buf = urb->transfer_buffer; bool link, link_changed; - struct mcs7830_data *data = mcs7830_get_data(dev); if (urb->actual_length < 16) return; - link = !(buf[1] & 0x20); + link = !(buf[1] == 0x20); link_changed = netif_carrier_ok(dev->net) != link; if (link_changed) { - data->link_counter++; - /* - track link state 20 times to guard against erroneous - link state changes reported sometimes by the chip - */ - if (data->link_counter > 20) { - data->link_counter = 0; - usbnet_link_change(dev, link, 0); - netdev_dbg(dev->net, "Link Status is: %d\n", link); - } - } else - data->link_counter = 0; + usbnet_link_change(dev, link, 0); + netdev_dbg(dev->net, "Link Status is: %d\n", link); + } } static const struct driver_info moschip_info = { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c51a98867a40..7b172408cff0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1788,16 +1788,17 @@ static int virtnet_restore(struct virtio_device *vdev) if (err) return err; - if (netif_running(vi->dev)) + if (netif_running(vi->dev)) { + for (i = 0; i < vi->curr_queue_pairs; i++) + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); + } netif_device_attach(vi->dev); - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ab2e92eec949..481f85d604a4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2466,7 +2466,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, /* update header length based on lower device */ dev->hard_header_len = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); - } + } else if (use_ipv6) + vxlan->flags |= VXLAN_F_IPV6; if (data[IFLA_VXLAN_TOS]) vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 857ede3a999c..741b38ddcb37 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -77,9 +77,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, mask2 |= ATH9K_INT_CST; if (isr2 & AR_ISR_S2_TSFOOR) mask2 |= ATH9K_INT_TSFOOR; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { + REG_WRITE(ah, AR_ISR_S2, isr2); + isr &= ~AR_ISR_BCNMISC; + } } - isr = REG_READ(ah, AR_ISR_RAC); + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) + isr = REG_READ(ah, AR_ISR_RAC); + if (isr == 0xffffffff) { *masked = 0; return false; @@ -98,11 +105,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, *masked |= ATH9K_INT_TX; - s0_s = REG_READ(ah, AR_ISR_S0_S); + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { + s0_s = REG_READ(ah, AR_ISR_S0_S); + s1_s = REG_READ(ah, AR_ISR_S1_S); + } else { + s0_s = REG_READ(ah, AR_ISR_S0); + REG_WRITE(ah, AR_ISR_S0, s0_s); + s1_s = REG_READ(ah, AR_ISR_S1); + REG_WRITE(ah, AR_ISR_S1, s1_s); + + isr &= ~(AR_ISR_TXOK | + AR_ISR_TXDESC | + AR_ISR_TXERR | + AR_ISR_TXEOL); + } + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); - - s1_s = REG_READ(ah, AR_ISR_S1_S); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); } @@ -115,13 +134,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, *masked |= mask2; } - if (AR_SREV_9100(ah)) - return true; - - if (isr & AR_ISR_GENTMR) { + if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) { u32 s5_s; - s5_s = REG_READ(ah, AR_ISR_S5_S); + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { + s5_s = REG_READ(ah, AR_ISR_S5_S); + } else { + s5_s = REG_READ(ah, AR_ISR_S5); + } + ah->intr_gen_timer_trigger = MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); @@ -134,8 +155,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, if ((s5_s & AR_ISR_S5_TIM_TIMER) && !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) *masked |= ATH9K_INT_TIM_TIMER; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { + REG_WRITE(ah, AR_ISR_S5, s5_s); + isr &= ~AR_ISR_GENTMR; + } } + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { + REG_WRITE(ah, AR_ISR, isr); + REG_READ(ah, AR_ISR); + } + + if (AR_SREV_9100(ah)) + return true; + if (sync_cause) { if (sync_cause_p) *sync_cause_p = sync_cause; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9a2657fdd9cc..608d739d1378 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) struct ath9k_vif_iter_data *iter_data = data; int i; - for (i = 0; i < ETH_ALEN; i++) - iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); + if (iter_data->hw_macaddr != NULL) { + for (i = 0; i < ETH_ALEN; i++) + iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); + } else { + iter_data->hw_macaddr = mac; + } } -static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, +static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif) { struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_vif_iter_data iter_data; /* - * Use the hardware MAC address as reference, the hardware uses it - * together with the BSSID mask when matching addresses. + * Pick the MAC address of the first interface as the new hardware + * MAC address. The hardware will use it together with the BSSID mask + * when matching addresses. */ - iter_data.hw_macaddr = common->macaddr; + iter_data.hw_macaddr = NULL; memset(&iter_data.mask, 0xff, ETH_ALEN); if (vif) @@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, ath9k_htc_bssid_iter, &iter_data); memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); + + if (iter_data.hw_macaddr) + memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); + ath_hw_setbssidmask(common); } @@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, goto out; } - ath9k_htc_set_bssid_mask(priv, vif); + ath9k_htc_set_mac_bssid_mask(priv, vif); priv->vif_slot |= (1 << avp->index); priv->nvifs++; @@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath9k_htc_set_opmode(priv); - ath9k_htc_set_bssid_mask(priv, vif); + ath9k_htc_set_mac_bssid_mask(priv, vif); /* * Stop ANI only if there are no associated station interfaces. diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 173a889f9dbb..21b764ba6400 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -994,8 +994,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(ah); /* - * Use the hardware MAC address as reference, the hardware uses it - * together with the BSSID mask when matching addresses. + * Pick the MAC address of the first interface as the new hardware + * MAC address. The hardware will use it together with the BSSID mask + * when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); memset(&iter_data->mask, 0xff, ETH_ALEN); diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 8707d1a94995..d7aa165fe677 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -738,6 +738,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) }; int index = rtlpci->rx_ring[rx_queue_idx].idx; + if (rtlpci->driver_is_goingto_unload) + return; /*RX NORMAL PKT */ while (count--) { /*rx descriptor */ @@ -1634,6 +1636,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) */ set_hal_stop(rtlhal); + rtlpci->driver_is_goingto_unload = true; rtlpriv->cfg->ops->disable_interrupt(hw); cancel_work_sync(&rtlpriv->works.lps_change_work); @@ -1651,7 +1654,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) ppsc->rfchange_inprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); - rtlpci->driver_is_goingto_unload = true; rtlpriv->cfg->ops->hw_disable(hw); /* some things are not needed if firmware not available */ if (!rtlpriv->max_fw_size) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index ba30a6d9fefa..c955fc39d69a 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -101,6 +101,13 @@ struct xenvif_rx_meta { #define MAX_PENDING_REQS 256 +/* It's possible for an skb to have a maximal number of frags + * but still be less than MAX_BUFFER_OFFSET in size. Thus the + * worst-case number of copy operations is MAX_SKB_FRAGS per + * ring slot. + */ +#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -141,13 +148,13 @@ struct xenvif { */ bool rx_event; - /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each - * head/fragment page uses 2 copy operations because it - * straddles two buffers in the frontend. - */ - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; - struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; + /* This array is allocated seperately as it is large */ + struct gnttab_copy *grant_copy_op; + /* We create one meta structure per ring request we consume, so + * the maximum number is the same as the ring size. + */ + struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; u8 fe_dev_addr[6]; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1dcb9606e6e0..b9de31ea7fc4 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -34,6 +34,7 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> +#include <linux/vmalloc.h> #include <xen/events.h> #include <asm/xen/hypercall.h> @@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, SET_NETDEV_DEV(dev, parent); vif = netdev_priv(dev); + + vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * + MAX_GRANT_COPY_OPS); + if (vif->grant_copy_op == NULL) { + pr_warn("Could not allocate grant copy space for %s\n", name); + free_netdev(dev); + return ERR_PTR(-ENOMEM); + } + vif->domid = domid; vif->handle = handle; vif->can_sg = 1; @@ -488,6 +498,7 @@ void xenvif_free(struct xenvif *vif) unregister_netdev(vif->dev); + vfree(vif->grant_copy_op); free_netdev(vif->dev); module_put(THIS_MODULE); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 611aebee4583..4f81ac0e2f0a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -524,7 +524,7 @@ static void xenvif_rx_action(struct xenvif *vif) if (!npo.copy_prod) goto done; - BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); + BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { @@ -1108,8 +1108,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct tcphdr, check))) + offsetof(struct tcphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) tcp_hdr(skb)->check = @@ -1126,8 +1128,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct udphdr, check))) + offsetof(struct udphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) udp_hdr(skb)->check = @@ -1249,8 +1253,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct tcphdr, check))) + offsetof(struct tcphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) tcp_hdr(skb)->check = @@ -1267,8 +1273,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, goto out; if (!skb_partial_csum_set(skb, off, - offsetof(struct udphdr, check))) + offsetof(struct udphdr, check))) { + err = -EPROTO; goto out; + } if (recalculate_partial_csum) udp_hdr(skb)->check = |