diff options
Diffstat (limited to 'drivers/net')
134 files changed, 1241 insertions, 727 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 004919aea5fb..f88cb097b022 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b43b51646b11..f0f9138e967f 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 47d65b77caf7..7c17b0f705ec 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1268,6 +1268,9 @@ static int ksz8795_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 9a51b8a4de5d..4a9239b2c2e4 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -974,23 +974,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } -static void ksz9477_phy_setup(struct ksz_device *dev, int port, - struct phy_device *phy) -{ - /* Only apply to port with PHY. */ - if (port >= dev->phy_port_cnt) - return; - - /* The MAC actually cannot run in 1000 half-duplex mode. */ - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* PHY does not support gigabit. */ - if (!(dev->features & GBIT_SUPPORT)) - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT); -} - static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) { bool gbit; @@ -1588,6 +1571,9 @@ static int ksz9477_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } @@ -1600,7 +1586,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, - .phy_setup = ksz9477_phy_setup, .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, @@ -1614,7 +1599,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { int ksz9477_switch_register(struct ksz_device *dev) { - return ksz_switch_register(dev, &ksz9477_dev_ops); + int ret, i; + struct phy_device *phydev; + + ret = ksz_switch_register(dev, &ksz9477_dev_ops); + if (ret) + return ret; + + for (i = 0; i < dev->phy_port_cnt; ++i) { + if (!dsa_is_user_port(dev->ds, i)) + continue; + + phydev = dsa_to_port(dev->ds, i)->slave->phydev; + + /* The MAC actually cannot run in 1000 half-duplex mode. */ + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* PHY does not support gigabit. */ + if (!(dev->features & GBIT_SUPPORT)) + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + } + return ret; } EXPORT_SYMBOL(ksz9477_switch_register); diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 7d050fab0889..7951f52d860d 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id); static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9477" }, { .compatible = "microchip,ksz9897" }, + { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9567" }, {}, }; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fd1d6676ae4f..7b6c0dce7536 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -358,8 +358,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - if (dev->dev_ops->phy_setup) - dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index f2c9bb68fd33..7d11dd32ec0d 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -119,8 +119,6 @@ struct ksz_dev_ops { u32 (*get_port_addr)(int port, int offset); void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); void (*port_cleanup)(struct ksz_device *dev, int port); void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7627ea61e0ea..fee16c947c2e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -664,8 +664,11 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, const struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_port *p; int err; + p = &chip->ports[port]; + /* FIXME: is this the correct test? If we're in fixed mode on an * internal port, why should we process this any different from * PHY mode? On the other hand, the port may be automedia between @@ -675,10 +678,14 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, return; mv88e6xxx_reg_lock(chip); - /* FIXME: should we force the link down here - but if we do, how - * do we restore the link force/unforce state? The driver layering - * gets in the way. + /* In inband mode, the link may come up at any time while the link + * is not forced down. Force the link down while we reconfigure the + * interface mode. */ + if (mode == MLO_AN_INBAND && p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); + err = mv88e6xxx_port_config_interface(chip, port, state->interface); if (err && err != -EOPNOTSUPP) goto err_unlock; @@ -691,6 +698,15 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, if (err > 0) err = 0; + /* Undo the forced down state above after completing configuration + * irrespective of its state on entry, which allows the link to come up. + */ + if (mode == MLO_AN_INBAND && p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); + + p->interface = state->interface; + err_unlock: mv88e6xxx_reg_unlock(chip); diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e5430cf2ad71..6476524e8239 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -232,6 +232,7 @@ struct mv88e6xxx_port { u64 atu_full_violation; u64 vtu_member_violation; u64 vtu_miss_violation; + phy_interface_t interface; u8 cmode; bool mirror_ingress; bool mirror_egress; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index ed5b465bc664..992fedbe4ce3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -64,6 +64,7 @@ struct aq_hw_caps_s { u8 rx_rings; bool flow_control; bool is_64_dma; + u32 quirks; u32 priv_data_len; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 4435c6374f7e..7c7bf6bf163f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self) self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) { self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX; err = aq_phy_init(self->aq_hw); + + /* Disable the PTP on NICs where it's known to cause datapath + * problems. + * Ideally this should have been done by PHY provisioning, but + * many units have been shipped with enabled PTP block already. + */ + if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP) + if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX) + aq_phy_disable_ptp(self->aq_hw); } for (i = 0U; i < self->aq_vecs; i++) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 2ab003065e62..439ce9692dac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -81,6 +81,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U +#define AQ_NIC_QUIRK_BAD_PTP BIT(0) + #define AQ_NIC_WOL_MODES (WAKE_MAGIC |\ WAKE_PHY) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c index 51ae921e3e1f..949ac2351701 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c @@ -1,10 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only -/* aQuantia Corporation Network Driver - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2018-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ #include "aq_phy.h" +#define HW_ATL_PTP_DISABLE_MSK BIT(10) + bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw) { int err = 0; @@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw) return true; } + +void aq_phy_disable_ptp(struct aq_hw_s *aq_hw) +{ + static const u16 ptp_registers[] = { + 0x031e, + 0x031d, + 0x031c, + 0x031b, + }; + u16 val; + int i; + + for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) { + val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1, + ptp_registers[i]); + + aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1, + ptp_registers[i], + val & ~HW_ATL_PTP_DISABLE_MSK); + } +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h index 84b72ad04a4a..86cc1ee836e2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* aQuantia Corporation Network Driver - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2018-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ #ifndef AQ_PHY_H @@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw); bool aq_phy_init(struct aq_hw_s *aq_hw); +void aq_phy_disable_ptp(struct aq_hw_s *aq_hw); + #endif /* AQ_PHY_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 14d79f70cad7..2125bc20ab6a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { AQ_NIC_RATE_100M, }; +const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, + .quirks = AQ_NIC_QUIRK_BAD_PTP, +}; + +const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, + .quirks = AQ_NIC_QUIRK_BAD_PTP, +}; + static int hw_atl_b0_hw_reset(struct aq_hw_s *self) { int err = 0; @@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) /* WSP, if min_rate is set for at least one TC. * RR otherwise. + * + * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't + * overwrite it here in that case. */ - hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + if (!nic_cfg->is_ptp) + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, * leave Descriptor TC Arbiter as RR. */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index 30f468f2084d..16091af17980 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109; - -#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109 +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111; +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112; #define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100 #define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107 #define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108 #define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109 - -#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109 +#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111 +#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112 extern const struct aq_hw_ops hw_atl_ops_b0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 3c8e8047ea1e..d775b23025c1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1700,7 +1700,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location + i), - ipv6_src[i]); + ipv6_src[3 - i]); } void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, @@ -1711,7 +1711,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location + i), - ipv6_dest[i]); + ipv6_dest[3 - i]); } u32 hw_atl_sem_ram_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 06220792daf1..7430ff025134 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1360,7 +1360,7 @@ */ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ -#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4) +#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4) /* Bitmask for bitfield l3_da0[1F:0] */ #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu /* Inverted bitmask for bitfield l3_da0[1F:0] */ diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 112edbd30823..38cce66ef212 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); if (IS_ERR(ag->mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - return PTR_ERR(ag->mdio_reset); + err = PTR_ERR(ag->mdio_reset); + goto mdio_err_put_clk; } mii_bus->name = "ag71xx_mdio"; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a884df44612..7463a1847ceb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) */ void bnxt_set_ring_params(struct bnxt *bp) { - u32 ring_size, rx_size, rx_space; + u32 ring_size, rx_size, rx_space, max_rx_cmpl; u32 agg_factor = 0, agg_ring_size = 0; /* 8 for CRC and VLAN */ @@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; bp->cp_ring_size = ring_size; bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work) &bp->sp_event)) bnxt_hwrm_phy_qcaps(bp); - if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, - &bp->sp_event)) - bnxt_init_ethtool_link_settings(bp); - rc = bnxt_update_link(bp, true); - mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); + + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, + &bp->sp_event)) + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); } if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6b88143af5ea..b4aa56dc4f9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) + if (netif_running(dev)) { + mutex_lock(&bp->link_lock); rc = bnxt_hwrm_set_pause(bp); + mutex_unlock(&bp->link_lock); + } return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 3a9a51f7063a..392e32c7122a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) } } + bp->pf.active_vfs = 0; kfree(bp->pf.vf); bp->pf.vf = NULL; } @@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); - bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ rtnl_lock(); bnxt_restore_pf_fw_resources(bp); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index af924a8b885f..e471b14fc6e9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -543,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size) #define VALIDATE_MASK(x) \ bcmgenet_hfb_validate_mask(&(x), sizeof(x)) -static int bcmgenet_hfb_insert_data(u32 *f, int offset, - void *val, void *mask, size_t size) +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) { - int index; - u32 tmp; + u32 index, tmp; - index = offset / 2; - tmp = f[index]; + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); while (size--) { if (offset++ & 1) { @@ -567,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset, tmp |= 0x10000; break; } - f[index++] = tmp; + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); if (size) - tmp = f[index]; + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); } else { tmp &= ~0xCFF00; tmp |= (*(unsigned char *)val++) << 8; @@ -585,44 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset, break; } if (!size) - f[index] = tmp; + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); } } return 0; } -static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data, - u32 f_length, u32 rx_queue, int f_index) -{ - u32 base = f_index * priv->hw_params->hfb_filter_size; - int i; - - for (i = 0; i < f_length; i++) - bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32)); - - bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); - bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); -} - -static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, - struct bcmgenet_rxnfc_rule *rule) +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) { struct ethtool_rx_flow_spec *fs = &rule->fs; - int err = 0, offset = 0, f_length = 0; + u32 offset = 0, f_length = 0, f; u8 val_8, mask_8; __be16 val_16; u16 mask_16; size_t size; - u32 *f_data; - - f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32), - GFP_KERNEL); - if (!f_data) - return -ENOMEM; + f = fs->location; if (fs->flow_type & FLOW_MAC_EXT) { - bcmgenet_hfb_insert_data(f_data, 0, + bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_ext.h_dest, &fs->m_ext.h_dest, sizeof(fs->h_ext.h_dest)); } @@ -630,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, if (fs->flow_type & FLOW_EXT) { if (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci) { - bcmgenet_hfb_insert_data(f_data, 12, + bcmgenet_hfb_insert_data(priv, f, 12, &fs->h_ext.vlan_etype, &fs->m_ext.vlan_etype, sizeof(fs->h_ext.vlan_etype)); - bcmgenet_hfb_insert_data(f_data, 14, + bcmgenet_hfb_insert_data(priv, f, 14, &fs->h_ext.vlan_tci, &fs->m_ext.vlan_tci, sizeof(fs->h_ext.vlan_tci)); @@ -646,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { case ETHER_FLOW: f_length += DIV_ROUND_UP(ETH_HLEN, 2); - bcmgenet_hfb_insert_data(f_data, 0, + bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_u.ether_spec.h_dest, &fs->m_u.ether_spec.h_dest, sizeof(fs->h_u.ether_spec.h_dest)); - bcmgenet_hfb_insert_data(f_data, ETH_ALEN, + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, &fs->h_u.ether_spec.h_source, &fs->m_u.ether_spec.h_source, sizeof(fs->h_u.ether_spec.h_source)); - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, &fs->h_u.ether_spec.h_proto, &fs->m_u.ether_spec.h_proto, sizeof(fs->h_u.ether_spec.h_proto)); @@ -664,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, /* Specify IP Ether Type */ val_16 = htons(ETH_P_IP); mask_16 = 0xFFFF; - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, &val_16, &mask_16, sizeof(val_16)); - bcmgenet_hfb_insert_data(f_data, 15 + offset, + bcmgenet_hfb_insert_data(priv, f, 15 + offset, &fs->h_u.usr_ip4_spec.tos, &fs->m_u.usr_ip4_spec.tos, sizeof(fs->h_u.usr_ip4_spec.tos)); - bcmgenet_hfb_insert_data(f_data, 23 + offset, + bcmgenet_hfb_insert_data(priv, f, 23 + offset, &fs->h_u.usr_ip4_spec.proto, &fs->m_u.usr_ip4_spec.proto, sizeof(fs->h_u.usr_ip4_spec.proto)); - bcmgenet_hfb_insert_data(f_data, 26 + offset, + bcmgenet_hfb_insert_data(priv, f, 26 + offset, &fs->h_u.usr_ip4_spec.ip4src, &fs->m_u.usr_ip4_spec.ip4src, sizeof(fs->h_u.usr_ip4_spec.ip4src)); - bcmgenet_hfb_insert_data(f_data, 30 + offset, + bcmgenet_hfb_insert_data(priv, f, 30 + offset, &fs->h_u.usr_ip4_spec.ip4dst, &fs->m_u.usr_ip4_spec.ip4dst, sizeof(fs->h_u.usr_ip4_spec.ip4dst)); @@ -688,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, /* Only supports 20 byte IPv4 header */ val_8 = 0x45; mask_8 = 0xFF; - bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset, + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, &val_8, &mask_8, sizeof(val_8)); size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); - bcmgenet_hfb_insert_data(f_data, + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + 20 + offset, &fs->h_u.usr_ip4_spec.l4_4_bytes, &fs->m_u.usr_ip4_spec.l4_4_bytes, @@ -701,34 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, break; } + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { /* Ring 0 flows can be handled by the default Descriptor Ring * We'll map them to ring 0, but don't enable the filter */ - bcmgenet_hfb_set_filter(priv, f_data, f_length, 0, - fs->location); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); rule->state = BCMGENET_RXNFC_STATE_DISABLED; } else { /* Other Rx rings are direct mapped here */ - bcmgenet_hfb_set_filter(priv, f_data, f_length, - fs->ring_cookie, fs->location); - bcmgenet_hfb_enable_filter(priv, fs->location); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); rule->state = BCMGENET_RXNFC_STATE_ENABLED; } - - kfree(f_data); - - return err; } /* bcmgenet_hfb_clear * * Clear Hardware Filter Block and disable all filtering. */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) { u32 i; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); @@ -740,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); - for (i = 0; i < priv->hw_params->hfb_filter_cnt * - priv->hw_params->hfb_filter_size; i++) - bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); } static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) { int i; + INIT_LIST_HEAD(&priv->rxnfc_list); if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) return; - INIT_LIST_HEAD(&priv->rxnfc_list); for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; @@ -1437,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev, loc_rule = &priv->rxnfc_rules[cmd->fs.location]; if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); - if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; memcpy(&loc_rule->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec)); - err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); - if (err) { - netdev_err(dev, "rxnfc: Could not install rule (%d)\n", - err); - return err; - } + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); list_add_tail(&loc_rule->list, &priv->rxnfc_list); @@ -1473,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev, if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); - if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } rule->state = BCMGENET_RXNFC_STATE_UNUSED; memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); @@ -3999,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev) if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) - goto err; + goto err_clk_disable; /* Mii wait queue */ init_waitqueue_head(&priv->wq); @@ -4011,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_wol)) { dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); err = PTR_ERR(priv->clk_wol); - goto err; + goto err_clk_disable; } priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); if (IS_ERR(priv->clk_eee)) { dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); err = PTR_ERR(priv->clk_eee); - goto err; + goto err_clk_disable; } /* If this is an internal GPHY, power it on now, before UniMAC is @@ -4129,8 +4118,9 @@ static int bcmgenet_resume(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 offset, reg; + u32 reg; int ret; if (!netif_running(dev)) @@ -4161,10 +4151,11 @@ static int bcmgenet_resume(struct device *d) bcmgenet_set_hw_addr(priv, dev->dev_addr); - offset = HFB_FLT_ENABLE_V3PLUS; - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset); - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32)); - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL); + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); @@ -4208,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); - u32 offset; if (!netif_running(dev)) return 0; @@ -4220,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d) if (!device_may_wakeup(d)) phy_suspend(dev->phydev); - /* Preserve filter state and disable filtering */ - priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - offset = HFB_FLT_ENABLE_V3PLUS; - priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset); - priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + /* Disable filtering */ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index a12cb59298f4..f6ca01da141d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -696,7 +696,6 @@ struct bcmgenet_priv { u32 wolopts; u8 sopass[SOPASS_MAX]; bool wol_active; - u32 hfb_en[3]; struct bcmgenet_mib_counters mib; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 4ea6a26b04f7..1c86eddb1b51 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, priv->wol_active = 0; clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; /* Disable Magic Packet Detection */ - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~(MPD_EN | MPD_PW_EN); - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } /* Disable WAKE_FILTER Detection */ - reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } /* Disable CRC Forward */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - priv->crc_fwd_en = 0; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 52582e8ed90e..2213e6ab8151 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -2821,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct macb *bp = netdev_priv(netdev); - wol->supported = 0; - wol->wolopts = 0; - - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { phylink_ethtool_get_wol(bp->phylink, wol); + wol->supported |= WAKE_MAGIC; + + if (bp->wol & MACB_WOL_ENABLED) + wol->wolopts |= WAKE_MAGIC; + } } static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2833,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct macb *bp = netdev_priv(netdev); int ret; + /* Pass the order to phylink layer */ ret = phylink_ethtool_set_wol(bp->phylink, wol); - if (!ret) - return 0; + /* Don't manage WoL on MAC if handled by the PHY + * or if there's a failure in talking to the PHY + */ + if (!ret || ret != -EOPNOTSUPP) + return ret; if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || (wol->wolopts & ~WAKE_MAGIC)) @@ -3730,7 +3736,7 @@ static int macb_init(struct platform_device *pdev) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { val = 0; - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(bp->phy_interface)) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) @@ -4422,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev) bp->wol = 0; if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); spin_lock_init(&bp->lock); @@ -4598,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev) bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); } - netif_carrier_off(netdev); if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); - pm_runtime_force_suspend(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_suspend(dev); return 0; } @@ -4616,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev) if (!netif_running(netdev)) return 0; - pm_runtime_force_resume(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IDR, MACB_BIT(WOL)); @@ -4654,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -4670,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 7a7f61a8cdf4..d02d346629b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1112,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) struct in_addr *addr; addr = (struct in_addr *)ipmask; - if (ntohl(addr->s_addr) == 0xffffffff) + if (addr->s_addr == htonl(0xffffffff)) return true; } else if (family == AF_INET6) { struct in6_addr *addr6; addr6 = (struct in6_addr *)ipmask; - if (ntohl(addr6->s6_addr32[0]) == 0xffffffff && - ntohl(addr6->s6_addr32[1]) == 0xffffffff && - ntohl(addr6->s6_addr32[2]) == 0xffffffff && - ntohl(addr6->s6_addr32[3]) == 0xffffffff) + if (addr6->s6_addr32[0] == htonl(0xffffffff) && + addr6->s6_addr32[1] == htonl(0xffffffff) && + addr6->s6_addr32[2] == htonl(0xffffffff) && + addr6->s6_addr32[3] == htonl(0xffffffff)) return true; } return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 32a45dc51ed7..92eee66cbc84 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1aa6dc10dc0b..ad522f822cc2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, drv_fw = &fw_info->fw_hdr; /* Read the header of the firmware on the card */ - ret = -t4_read_flash(adap, FLASH_FW_START, + ret = t4_read_flash(adap, FLASH_FW_START, sizeof(*card_fw) / sizeof(uint32_t), (uint32_t *)card_fw, 1); if (ret == 0) { @@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, should_install_fs_fw(adap, card_fw_usable, be32_to_cpu(fs_fw->fw_ver), be32_to_cpu(card_fw->fw_ver))) { - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, - fw_size, 0); + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); if (ret != 0) { dev_err(adap->pdev_dev, "failed to install firmware: %d\n", ret); @@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 2972244e6eb0..43570f4911ea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) DMA_BIT_MASK(40)); if (err) { netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); - return err; + goto free_netdev; } /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f150cd454fa4..0998ceb1a26e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); dpmac_dev = fsl_mc_get_endpoint(dpni_dev); - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) return 0; if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 96831f49925c..22105d09bc89 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data) /* disable interrupts */ enetc_wr_reg(v->rbier, 0); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); napi_schedule_irqoff(&v->napi); @@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) /* enable interrupts */ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), ENETC_TBIER_TXTIE); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4fac57dbb3c8..7a9675bd36e8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev, return 0; err_reg_netdev: + enetc_mdio_remove(pf); enetc_of_put_phy(priv); enetc_free_msix(priv); err_alloc_msix: diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a6cdd5b61921..832a2175636d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -525,11 +525,6 @@ struct fec_enet_private { unsigned int total_tx_ring_size; unsigned int total_rx_ring_size; - unsigned long work_tx; - unsigned long work_rx; - unsigned long work_ts; - unsigned long work_mdio; - struct platform_device *pdev; int dev_id; @@ -595,6 +590,7 @@ struct fec_enet_private { void fec_ptp_init(struct platform_device *pdev, int irq_idx); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); +void fec_ptp_disable_hwts(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5..cc7fbfc09354 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define DRIVER_NAME "fec" -#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) - /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) fep = netdev_priv(ndev); - queue_id = FEC_ENET_GET_QUQUE(queue_id); - txq = fep->tx_queue[queue_id]; /* get next bdp of dirty_tx */ nq = netdev_get_tx_queue(ndev, queue_id); @@ -1298,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ndev->stats.tx_bytes += skb->len; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && - fep->bufdesc_ex) { + /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who + * are to time stamp the packet, so we still need to check time + * stamping enabled flag. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && + fep->hwts_tx_en) && + fep->bufdesc_ex) { struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -1340,17 +1341,14 @@ skb_done: writel(0, txq->bd.reg_desc_active); } -static void -fec_enet_tx(struct net_device *ndev) +static void fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 queue_id; - /* First process class A queue, then Class B and Best Effort queue */ - for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { - clear_bit(queue_id, &fep->work_tx); - fec_enet_tx_queue(ndev, queue_id); - } - return; + int i; + + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_tx_queues - 1; i >= 0; i--) + fec_enet_tx_queue(ndev, i); } static int @@ -1426,7 +1424,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) #ifdef CONFIG_M532x flush_cache_all(); #endif - queue_id = FEC_ENET_GET_QUQUE(queue_id); rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. @@ -1550,6 +1547,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) htons(ETH_P_8021Q), vlan_tag); + skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); if (is_copybreak) { @@ -1595,48 +1593,30 @@ rx_processing_done: return pkt_received; } -static int -fec_enet_rx(struct net_device *ndev, int budget) +static int fec_enet_rx(struct net_device *ndev, int budget) { - int pkt_received = 0; - u16 queue_id; struct fec_enet_private *fep = netdev_priv(ndev); + int i, done = 0; - for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - int ret; + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_rx_queues - 1; i >= 0; i--) + done += fec_enet_rx_queue(ndev, budget - done, i); - ret = fec_enet_rx_queue(ndev, - budget - pkt_received, queue_id); - - if (ret < budget - pkt_received) - clear_bit(queue_id, &fep->work_rx); - - pkt_received += ret; - } - return pkt_received; + return done; } -static bool -fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +static bool fec_enet_collect_events(struct fec_enet_private *fep) { - if (int_events == 0) - return false; + uint int_events; + + int_events = readl(fep->hwp + FEC_IEVENT); - if (int_events & FEC_ENET_RXF_0) - fep->work_rx |= (1 << 2); - if (int_events & FEC_ENET_RXF_1) - fep->work_rx |= (1 << 0); - if (int_events & FEC_ENET_RXF_2) - fep->work_rx |= (1 << 1); + /* Don't clear MDIO events, we poll for those */ + int_events &= ~FEC_ENET_MII; - if (int_events & FEC_ENET_TXF_0) - fep->work_tx |= (1 << 2); - if (int_events & FEC_ENET_TXF_1) - fep->work_tx |= (1 << 0); - if (int_events & FEC_ENET_TXF_2) - fep->work_tx |= (1 << 1); + writel(int_events, fep->hwp + FEC_IEVENT); - return true; + return int_events != 0; } static irqreturn_t @@ -1644,18 +1624,9 @@ fec_enet_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct fec_enet_private *fep = netdev_priv(ndev); - uint int_events; irqreturn_t ret = IRQ_NONE; - int_events = readl(fep->hwp + FEC_IEVENT); - - /* Don't clear MDIO events, we poll for those */ - int_events &= ~FEC_ENET_MII; - - writel(int_events, fep->hwp + FEC_IEVENT); - fec_enet_collect_events(fep, int_events); - - if ((fep->work_tx || fep->work_rx) && fep->link) { + if (fec_enet_collect_events(fep) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -1672,17 +1643,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct fec_enet_private *fep = netdev_priv(ndev); - int pkts; + int done = 0; - pkts = fec_enet_rx(ndev, budget); - - fec_enet_tx(ndev); + do { + done += fec_enet_rx(ndev, budget - done); + fec_enet_tx(ndev); + } while ((done < budget) && fec_enet_collect_events(fep)); - if (pkts < budget) { - napi_complete_done(napi, pkts); + if (done < budget) { + napi_complete_done(napi, done); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } - return pkts; + + return done; } /* ------------------------------------------------------------------------- */ @@ -2755,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) return -ENODEV; if (fep->bufdesc_ex) { - if (cmd == SIOCSHWTSTAMP) - return fec_ptp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return fec_ptp_get(ndev, rq); + bool use_fec_hwts = !phy_has_hwtstamp(phydev); + + if (cmd == SIOCSHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_set(ndev, rq); + fec_ptp_disable_hwts(ndev); + } else if (cmd == SIOCGHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_get(ndev, rq); + } } return phy_mii_ioctl(phydev, rq, cmd); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 945643c02615..f8a592c96beb 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } +/** + * fec_ptp_disable_hwts - disable hardware time stamping + * @ndev: pointer to net_device + */ +void fec_ptp_disable_hwts(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + fep->hwts_tx_en = 0; + fep->hwts_rx_en = 0; +} + int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) { struct fec_enet_private *fep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b3c69e9038ea..b513b8c5c3b5 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) + if (!IS_ERR(mac_addr)) { ether_addr_copy(dev->dev_addr, mac_addr); + } else { + eth_hw_addr_random(dev); + dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); + } if (model && !strcasecmp(model, "TSEC")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d041cac9a487..088550db2de7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -77,6 +77,7 @@ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) enum hns_desc_type { + DESC_TYPE_UNKNOWN, DESC_TYPE_SKB, DESC_TYPE_FRAGLIST_SKB, DESC_TYPE_PAGE, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b14f2abc2425..33c481d11116 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1118,12 +1118,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return -ENOMEM; } + desc_cb->priv = priv; desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; if (likely(size <= HNS3_MAX_BD_SIZE)) { - desc_cb->priv = priv; - desc_cb->dma = dma; - desc_cb->type = type; desc->addr = cpu_to_le64(dma); desc->tx.send_size = cpu_to_le16(size); desc->tx.bdtp_fe_sc_vld_ra_ri = @@ -1135,18 +1135,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, } frag_buf_num = hns3_tx_bd_count(size); - sizeoflast = size & HNS3_TX_LAST_SIZE_M; + sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; - desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB || - type == DESC_TYPE_SKB) && !k) ? - type : DESC_TYPE_PAGE; - /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? @@ -1158,7 +1151,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); - desc_cb = &ring->desc_cb[ring->next_to_use]; desc = &ring->desc[ring->next_to_use]; } @@ -1346,6 +1338,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; @@ -1353,6 +1349,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) /* rollback one */ ring_ptr_move_bw(ring, next_to_use); + if (!ring->desc_cb[ring->next_to_use].dma) + continue; + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || ring->desc_cb[ring->next_to_use].type == @@ -1369,6 +1368,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].length = 0; ring->desc_cb[ring->next_to_use].dma = 0; + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; } } @@ -4127,9 +4127,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); - hns3_dbg_uninit(handle); - out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 66cd4395f781..a8776620acbc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -165,8 +165,6 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) -#define HNS3_TX_LAST_SIZE_M 0xffff - #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6b1545f982aa..2622e04e8eed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, { struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); u32 i; - for (i = 0; i < skb->len; i++) + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) if (packet[i] != (unsigned char)(i & 0xff)) break; /* The packet is correctly received */ - if (i == skb->len) + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) tqp_vector->rx_group.total_packets++; else print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); + skb->data, len, true); dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 96bfad52630d..bb4a6327035d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2673,11 +2673,10 @@ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) delay_time); } -static int hclge_get_mac_link_status(struct hclge_dev *hdev) +static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) { struct hclge_link_status_cmd *req; struct hclge_desc desc; - int link_status; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); @@ -2689,33 +2688,25 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) } req = (struct hclge_link_status_cmd *)desc.data; - link_status = req->status & HCLGE_LINK_STATUS_UP_M; + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; - return !!link_status; + return 0; } -static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) { - unsigned int mac_state; - int link_stat; + struct phy_device *phydev = hdev->hw.mac.phydev; + + *link_status = HCLGE_LINK_STATUS_DOWN; if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) return 0; - mac_state = hclge_get_mac_link_status(hdev); - - if (hdev->hw.mac.phydev) { - if (hdev->hw.mac.phydev->state == PHY_RUNNING) - link_stat = mac_state & - hdev->hw.mac.phydev->link; - else - link_stat = 0; - - } else { - link_stat = mac_state; - } + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) + return 0; - return !!link_stat; + return hclge_get_mac_link_status(hdev, link_status); } static void hclge_update_link_status(struct hclge_dev *hdev) @@ -2725,6 +2716,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) struct hnae3_handle *rhandle; struct hnae3_handle *handle; int state; + int ret; int i; if (!client) @@ -2733,7 +2725,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) return; - state = hclge_get_mac_phy_link(hdev); + ret = hclge_get_mac_phy_link(hdev, &state); + if (ret) { + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); + return; + } + if (state != hdev->hw.mac.link) { for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; @@ -6524,14 +6521,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) { #define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_status; int i = 0; int ret; do { - ret = hclge_get_mac_link_status(hdev); - if (ret < 0) + ret = hclge_get_mac_link_status(hdev, &link_status); + if (ret) return ret; - else if (ret == link_ret) + if (link_status == link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); @@ -6542,9 +6540,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { -#define HCLGE_LINK_STATUS_DOWN 0 -#define HCLGE_LINK_STATUS_UP 1 - int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -9859,7 +9854,7 @@ retry: set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); hdev->reset_type = HNAE3_FLR_RESET; ret = hclge_reset_prepare(hdev); - if (ret) { + if (ret || hdev->reset_pending) { dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", ret); if (hdev->reset_pending || diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 46e6e0fef3ba..9bbdd4557c27 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -317,6 +317,9 @@ enum hclge_link_fail_code { HCLGE_LF_XSFP_ABSENT, }; +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + #define HCLGE_PG_NUM 4 #define HCLGE_SCH_MODE_SP 0 #define HCLGE_SCH_MODE_DWRR 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 1b9578d0bd80..a10b022d1951 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1793,6 +1793,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to assert VF reset, ret = %d\n", ret); + return ret; + } hdev->rst_stats.vf_func_rst_cnt++; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 0245da02efbb..b735bc537508 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -814,6 +814,8 @@ err_aeqs_init: err_init_msix: err_pfhwdev_alloc: hinic_free_hwif(hwif); + if (err > 0) + err = -EIO; return ERR_PTR(err); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index c33eb1147055..e0f5a81d8620 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, MSG_NOT_RESP, timeout); } -/** - * mgmt_recv_msg_handler - handler for message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) +static void recv_mgmt_msg_work_handler(struct work_struct *work) { - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - u8 *buf_out = recv_msg->buf_out; + struct hinic_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic_mgmt_msg_handle_work, work); + struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt; + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; + u8 *buf_out = pf_to_mgmt->mgmt_ack_buf; struct hinic_mgmt_cb *mgmt_cb; unsigned long cb_state; u16 out_size = 0; - if (recv_msg->mod >= HINIC_MOD_MAX) { + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mgmt_work->mod >= HINIC_MOD_MAX) { dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", - recv_msg->mod); + mgmt_work->mod); + kfree(mgmt_work->msg); + kfree(mgmt_work); return; } - mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; + mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod]; cb_state = cmpxchg(&mgmt_cb->state, HINIC_MGMT_CB_ENABLED, HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) - mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, - recv_msg->msg, recv_msg->msg_len, + mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd, + mgmt_work->msg, mgmt_work->msg_len, buf_out, &out_size); else dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", - recv_msg->mod, recv_msg->cmd); + mgmt_work->mod, mgmt_work->cmd); mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; - if (!recv_msg->async_mgmt_to_pf) + if (!mgmt_work->async_mgmt_to_pf) /* MGMT sent sync msg, send the response */ - msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, + msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd, buf_out, out_size, MGMT_RESP, - recv_msg->msg_id); + mgmt_work->msg_id); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + dev_err(&pdev->dev, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); } /** @@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) if (!pf_to_mgmt->sync_msg_buf) return -ENOMEM; + pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev, + MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) + return -ENOMEM; + return 0; } @@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, return 0; sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); + if (!pf_to_mgmt->workq) { + dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); + return -ENOMEM; + } pf_to_mgmt->sync_msg_id = 0; err = alloc_msg_buf(pf_to_mgmt); @@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + destroy_workqueue(pf_to_mgmt->workq); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index c2b142c08b0e..a824fbda59db 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -119,6 +119,7 @@ struct hinic_pf_to_mgmt { struct semaphore sync_msg_lock; u16 sync_msg_id; u8 *sync_msg_buf; + void *mgmt_ack_buf; struct hinic_recv_msg recv_resp_msg_from_mgmt; struct hinic_recv_msg recv_msg_from_mgmt; @@ -126,6 +127,21 @@ struct hinic_pf_to_mgmt { struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; + + struct workqueue_struct *workq; +}; + +struct hinic_mgmt_msg_handle_work { + struct work_struct work; + struct hinic_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; }; void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c639e3a29302..7d5d9d34f4e4 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3959,7 +3959,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, /* When at 2.5G, the link partner can send frames with shortened * preambles. */ - if (state->speed == SPEED_2500) + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; if (pp->phy_interface != state->interface) { diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 241f00716979..fe54764caea9 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -203,7 +203,7 @@ io_error: static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { - u16 v; + u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h index 7be6b2d36b60..9976de8b9047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h @@ -29,6 +29,7 @@ struct mlx5e_dcbx { bool manual_buffer; u32 cable_len; u32 xoff; + u16 port_buff_cell_sz; }; #define MLX5E_MAX_DSCP (64) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2a8950b3056f..3cf3e35053f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_400GAUI_8] = 400000, }; +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev) +{ + struct mlx5e_port_eth_proto eproto; + int err; + + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet)) + return true; + + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto); + if (err) + return false; + + return !!eproto.cap; +} + static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, const u32 **arr, u32 *size, bool force_legacy) { - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev); *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : ARRAY_SIZE(mlx5e_link_speed); @@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) bool ext; int err; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) goto out; @@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) int err; int i; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index a2ddd446dd59..7a7defe60792 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, bool force_legacy); - +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index ae99fac08b53..673f1c82d381 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -34,6 +34,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); u32 total_used = 0; @@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, port_buffer->buffer[i].epsb = MLX5_GET(bufferx_reg, buffer, epsb); port_buffer->buffer[i].size = - MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz; port_buffer->buffer[i].xon = - MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz; port_buffer->buffer[i].xoff = - MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz; total_used += port_buffer->buffer[i].size; mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, @@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, } port_buffer->port_buffer_size = - MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; port_buffer->spare_buffer_size = port_buffer->port_buffer_size - total_used; @@ -88,9 +89,9 @@ out: static int port_set_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); - void *buffer; void *in; int err; int i; @@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv, goto out; for (i = 0; i < MLX5E_MAX_BUFFER; i++) { - buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); - - MLX5_SET(bufferx_reg, buffer, size, - port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); - MLX5_SET(bufferx_reg, buffer, lossy, - port_buffer->buffer[i].lossy); - MLX5_SET(bufferx_reg, buffer, xoff_threshold, - port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); - MLX5_SET(bufferx_reg, buffer, xon_threshold, - port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); + void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + u64 size = port_buffer->buffer[i].size; + u64 xoff = port_buffer->buffer[i].xoff; + u64 xon = port_buffer->buffer[i].xon; + + do_div(size, port_buff_cell_sz); + do_div(xoff, port_buff_cell_sz); + do_div(xon, port_buff_cell_sz); + MLX5_SET(bufferx_reg, buffer, size, size); + MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy); + MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff); + MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); } err = mlx5e_port_set_pbmc(mdev, in); @@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) } static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - u32 xoff, unsigned int max_mtu) + u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz) { int i; @@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { + (xoff + max_mtu + port_buff_cell_sz)) { pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", i, port_buffer->buffer[i].size); return -ENOMEM; @@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * @pfc_en: <input> current pfc configuration * @buffer: <input> current prio to buffer mapping * @xoff: <input> xoff value + * @port_buff_cell_sz: <input> port buffer cell_size * @port_buffer: <output> port receive buffer configuration * @change: <output> * @@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * sets change to true if buffer configuration was modified. */ static int update_buffer_lossy(unsigned int max_mtu, - u8 pfc_en, u8 *buffer, u32 xoff, + u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz, struct mlx5e_port_buffer *port_buffer, bool *change) { @@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu, } if (changed) { - err = update_xoff_threshold(port_buffer, xoff, max_mtu); + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; @@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 *buffer_size, u8 *prio2buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5e_port_buffer port_buffer; u32 xoff = calculate_xoff(priv, mtu); bool update_prio2buffer = false; @@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } @@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz, &port_buffer, &update_buffer); if (err) return err; @@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, xoff, &port_buffer, &update_buffer); if (err) return err; @@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return -EINVAL; update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } @@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, /* Need to update buffer configuration if xoff value is changed */ if (!update_buffer && xoff != priv->dcbx.xoff) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index 34f55b81a0de..80af7a5ac604 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -36,7 +36,6 @@ #include "port.h" #define MLX5E_MAX_BUFFER 8 -#define MLX5E_BUFFER_CELL_SHIFT 7 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c index baa162432e75..c3d167fa944c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c @@ -6,7 +6,6 @@ #include <linux/rculist.h> #include <linux/rtnetlink.h> #include <linux/workqueue.h> -#include <linux/rwlock.h> #include <linux/spinlock.h> #include <linux/notifier.h> #include <net/netevent.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 430025550fad..aad1c29b23db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) struct mlx5_ct_entry *entry = ptr; mlx5_tc_ct_entry_del_rules(ct_priv, entry); + kfree(entry); } static void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index bc102d094bbd..d20243d6a032 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) return 0; } +#define MLX5E_BUFFER_CELL_SHIFT 7 + +static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {}; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return (1 << MLX5E_BUFFER_CELL_SHIFT); + + if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_SBCAM, 0, 0)) + return (1 << MLX5E_BUFFER_CELL_SHIFT); + + return MLX5_GET(sbcam_reg, out, cap_cell_size); +} + void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) { struct mlx5e_dcbx *dcbx = &priv->dcbx; @@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; + priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); priv->dcbx.manual_buffer = false; priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ec5658bbe3c5..c2464c349117 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, struct ptys2ethtool_config **arr, u32 *size) { - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : @@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } @@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, @@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext_supported = mlx5e_ptys_ext_supported(mdev); ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a836a02a2116..081f15074cac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) - udp_tunnel_get_rx_info(netdev); - return err; } @@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_flow_steering; +#ifdef CONFIG_MLX5_EN_ARFS + priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev); +#endif + return 0; err_destroy_flow_steering: @@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(netdev)) mlx5e_open(netdev); + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) + udp_tunnel_get_rx_info(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) + udp_tunnel_drop_rx_info(priv->netdev); netif_device_detach(priv->netdev); rtnl_unlock(); @@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev, /* netdev init */ netif_carrier_off(netdev); -#ifdef CONFIG_MLX5_EN_ARFS - netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev); -#endif - return 0; err_free_cpumask: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7fc84f58e28a..cc8412151ca0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev, struct mlx5e_rep_priv *rpriv) { /* Offloaded flow rule is allowed to duplicate on non-uplink representor - * sharing tc block with other slaves of a lag device. + * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this + * function is called from NIC mode. */ - return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; } int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, @@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, rcu_read_lock(); flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); - rcu_read_unlock(); if (flow) { /* Same flow rule offloaded to non-uplink representor sharing tc block, * just return 0. */ if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) - goto out; + goto rcu_unlock; NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); @@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, "flow cookie %lx already exists, ignoring\n", f->cookie); err = -EEXIST; - goto out; + goto rcu_unlock; } +rcu_unlock: + rcu_read_unlock(); + if (flow) + goto out; trace_mlx5e_configure_flower(f); err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 5dc335e621c5..b68976b378b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, } /* Create ingress allow rule */ - memset(spec, 0, sizeof(*spec)); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 9f829e68fc73..e4186e84b3ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) return 0; } -static int mlx5_eeprom_page(int offset) +static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, + u8 *module_id) +{ + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + int err, status; + u8 *ptr; + + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, device_address, 0); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, size, 1); + MLX5_SET(mcia_reg, in, l, 0); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + *module_id = ptr[0]; + + return 0; +} + +static int mlx5_qsfp_eeprom_page(u16 offset) { if (offset < MLX5_EEPROM_PAGE_LENGTH) /* Addresses between 0-255 - page 00 */ @@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset) MLX5_EEPROM_HIGH_PAGE_LENGTH); } -static int mlx5_eeprom_high_page_offset(int page_num) +static int mlx5_qsfp_eeprom_high_page_offset(int page_num) { if (!page_num) /* Page 0 always start from low page */ return 0; @@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num) return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; } +static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = mlx5_qsfp_eeprom_page(*offset); + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num); +} + +static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < MLX5_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = MLX5_I2C_ADDR_HIGH; + *offset -= MLX5_EEPROM_PAGE_LENGTH; +} + int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data) { - int module_num, page_num, status, err; + int module_num, status, err, page_num = 0; + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - u32 in[MLX5_ST_SZ_DW(mcia_reg)]; - u16 i2c_addr; - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + u16 i2c_addr = 0; + u8 module_id; + void *ptr; err = mlx5_query_module_num(dev, &module_num); if (err) return err; - memset(in, 0, sizeof(in)); - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); - - /* Get the page number related to the given offset */ - page_num = mlx5_eeprom_page(offset); + err = mlx5_query_module_id(dev, module_num, &module_id); + if (err) + return err; - /* Set the right offset according to the page number, - * For page_num > 0, relative offset is always >= 128 (high page). - */ - offset -= mlx5_eeprom_high_page_offset(page_num); + switch (module_id) { + case MLX5_MODULE_ID_SFP: + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + case MLX5_MODULE_ID_QSFP: + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + default: + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; - i2c_addr = MLX5_I2C_ADDR_LOW; + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); @@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, return -EIO; } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); memcpy(data, ptr, size); return size; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e9ccd333f61d..d6d6fe64887b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -710,7 +710,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -722,6 +722,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 08215fed193d..a7d86df7123f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -45,7 +45,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, static int mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, u16 offset, u16 size, void *data, - unsigned int *p_read_size) + bool qsfp, unsigned int *p_read_size) { char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; @@ -54,6 +54,10 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, int status; int err; + /* MCIA register accepts buffer size <= 48. Page of size 128 should be + * read by chunks of size 48, 48, 32. Align the size of the last chunk + * to avoid reading after the end of the page. + */ size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && @@ -63,18 +67,25 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW; if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) { - page = MLXSW_REG_MCIA_PAGE_GET(offset); - offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; - /* When reading upper pages 1, 2 and 3 the offset starts at - * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436 - * specification for graphical depiction. - * MCIA register accepts buffer size <= 48. Page of size 128 - * should be read by chunks of size 48, 48, 32. Align the size - * of the last chunk to avoid reading after the end of the - * page. - */ - if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) - size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + if (qsfp) { + /* When reading upper pages 1, 2 and 3 the offset + * starts at 128. Please refer to "QSFP+ Memory Map" + * figure in SFF-8436 specification for graphical + * depiction. + */ + page = MLXSW_REG_MCIA_PAGE_GET(offset); + offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; + if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) + size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + } else { + /* When reading upper pages 1, 2 and 3 the offset + * starts at 0 and I2C high address is used. Please refer + * refer to "Memory Organization" figure in SFF-8472 + * specification for graphical depiction. + */ + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH; + offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH; + } } mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr); @@ -166,7 +177,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, int err; err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, - module_info, &read_size); + module_info, false, &read_size); if (err) return err; @@ -197,7 +208,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, /* Verify if transceiver provides diagnostic monitoring page */ err = mlxsw_env_query_module_eeprom(mlxsw_core, module, SFP_DIAGMON, 1, &diag_mon, - &read_size); + false, &read_size); if (err) return err; @@ -225,17 +236,22 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, int offset = ee->offset; unsigned int read_size; int i = 0; + bool qsfp; int err; if (!ee->len) return -EINVAL; memset(data, 0, ee->len); + /* Validate module identifier value. */ + err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp); + if (err) + return err; while (i < ee->len) { err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, ee->len - i, data + i, - &read_size); + qsfp, &read_size); if (err) { netdev_err(netdev, "Eeprom query failed\n"); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index fd0e97de44e7..c04ec1a92826 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, u16 num_pages; int err; - mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); - mlxsw_pci->core = mlxsw_core; mbox = mlxsw_cmd_mbox_alloc(); if (!mbox) return -ENOMEM; - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); - if (err) - goto mbox_put; - - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - if (err) - goto err_out_mbox_alloc; - err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); if (err) goto err_sw_reset; @@ -1537,9 +1526,6 @@ err_query_fw: mlxsw_pci_free_irq_vectors(mlxsw_pci); err_alloc_irq: err_sw_reset: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); -err_out_mbox_alloc: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); mbox_put: mlxsw_cmd_mbox_free(mbox); return err; @@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv) mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } static struct mlxsw_pci_queue * @@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; +static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + if (err) + goto err_in_mbox_alloc; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + if (err) + goto err_out_mbox_alloc; + + return 0; + +err_out_mbox_alloc: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +err_in_mbox_alloc: + mutex_destroy(&mlxsw_pci->cmd.lock); + return err; +} + +static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + mutex_destroy(&mlxsw_pci->cmd.lock); +} + static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; @@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); + err = mlxsw_pci_cmd_init(mlxsw_pci); + if (err) + goto err_pci_cmd_init; + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: + mlxsw_pci_cmd_fini(mlxsw_pci); +err_pci_cmd_init: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); + mlxsw_pci_cmd_fini(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 770de0222e7b..019ed503aadf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6262,7 +6262,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; fib_work->mlxsw_sp = router->mlxsw_sp; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 9b63574b6202..b5f1849fd280 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -98,7 +98,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) { struct sk_buff **skb_ptr = NULL; struct sk_buff **temp; -#define NR_SKB_COMPLETED 128 +#define NR_SKB_COMPLETED 16 struct sk_buff *completed[NR_SKB_COMPLETED]; int more; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index f7e3ce3de04d..095561924bdc 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -103,15 +103,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); } static int ionic_get_link_ksettings(struct net_device *netdev, @@ -468,12 +471,18 @@ static void ionic_get_ringparam(struct net_device *netdev, ring->rx_pending = lif->nrxq_descs; } +static void ionic_set_ringsize(struct ionic_lif *lif, void *arg) +{ + struct ethtool_ringparam *ring = arg; + + lif->ntxq_descs = ring->tx_pending; + lif->nrxq_descs = ring->rx_pending; +} + static int ionic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ionic_lif *lif = netdev_priv(netdev); - bool running; - int err; if (ring->rx_mini_pending || ring->rx_jumbo_pending) { netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); @@ -491,22 +500,7 @@ static int ionic_set_ringparam(struct net_device *netdev, ring->rx_pending == lif->nrxq_descs) return 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - - running = test_bit(IONIC_LIF_F_UP, lif->state); - if (running) - ionic_stop(netdev); - - lif->ntxq_descs = ring->tx_pending; - lif->nrxq_descs = ring->rx_pending; - - if (running) - ionic_open(netdev); - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); - - return 0; + return ionic_reset_queues(lif, ionic_set_ringsize, ring); } static void ionic_get_channels(struct net_device *netdev, @@ -521,12 +515,17 @@ static void ionic_get_channels(struct net_device *netdev, ch->combined_count = lif->nxqs; } +static void ionic_set_queuecount(struct ionic_lif *lif, void *arg) +{ + struct ethtool_channels *ch = arg; + + lif->nxqs = ch->combined_count; +} + static int ionic_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct ionic_lif *lif = netdev_priv(netdev); - bool running; - int err; if (!ch->combined_count || ch->other_count || ch->rx_count || ch->tx_count) @@ -535,21 +534,7 @@ static int ionic_set_channels(struct net_device *netdev, if (ch->combined_count == lif->nxqs) return 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - - running = test_bit(IONIC_LIF_F_UP, lif->state); - if (running) - ionic_stop(netdev); - - lif->nxqs = ch->combined_count; - - if (running) - ionic_open(netdev); - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); - - return 0; + return ionic_reset_queues(lif, ionic_set_queuecount, ch); } static u32 ionic_get_priv_flags(struct net_device *netdev) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index aaa00edd9d5b..5fd31ba56937 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -96,8 +96,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) u16 link_status; bool link_up; - if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) || - test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state)) + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return; link_status = le16_to_cpu(lif->info->status.link_status); @@ -114,16 +113,22 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_on(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); ionic_start_queues(lif); + mutex_unlock(&lif->queue_lock); + } } else { if (netif_carrier_ok(netdev)) { netdev_info(netdev, "Link down\n"); netif_carrier_off(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); + } } clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); @@ -863,8 +868,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) if (f) return 0; - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, - ctx.comp.rx_filter_add.filter_id); + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); err = ionic_adminq_post_wait(lif, &ctx); @@ -893,6 +897,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) return -ENOENT; } + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", + addr, f->filter_id); + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); spin_unlock_bh(&lif->rx_filters.lock); @@ -901,9 +908,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) if (err && err != -EEXIST) return err; - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, - ctx.cmd.rx_filter_del.filter_id); - return 0; } @@ -1313,7 +1317,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) return err; netdev->mtu = new_mtu; - err = ionic_reset_queues(lif); + err = ionic_reset_queues(lif, NULL, NULL); return err; } @@ -1325,7 +1329,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws) netdev_info(lif->netdev, "Tx Timeout recovery\n"); rtnl_lock(); - ionic_reset_queues(lif); + ionic_reset_queues(lif, NULL, NULL); rtnl_unlock(); } @@ -1351,13 +1355,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, }; int err; + netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; - netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, - ctx.comp.rx_filter_add.filter_id); - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); } @@ -1382,8 +1384,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, return -ENOENT; } - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, - le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", + vid, f->filter_id); ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); @@ -1673,6 +1675,14 @@ int ionic_open(struct net_device *netdev) if (err) goto err_out; + err = netif_set_real_num_tx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + + err = netif_set_real_num_rx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + /* don't start the queues until we have link */ if (netif_carrier_ok(netdev)) { err = ionic_start_queues(lif); @@ -1980,26 +1990,28 @@ static const struct net_device_ops ionic_netdev_ops = { .ndo_get_vf_stats = ionic_get_vf_stats, }; -int ionic_reset_queues(struct ionic_lif *lif) +int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) { bool running; int err = 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - + mutex_lock(&lif->queue_lock); running = netif_running(lif->netdev); if (running) { netif_device_detach(lif->netdev); err = ionic_stop(lif->netdev); + if (err) + return err; } - if (!err && running) { - ionic_open(lif->netdev); + + if (cb) + cb(lif, arg); + + if (running) { + err = ionic_open(lif->netdev); netif_device_attach(lif->netdev); } - - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); + mutex_unlock(&lif->queue_lock); return err; } @@ -2146,7 +2158,9 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) if (test_bit(IONIC_LIF_F_UP, lif->state)) { dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); } if (netif_running(lif->netdev)) { @@ -2265,15 +2279,15 @@ static void ionic_lif_deinit(struct ionic_lif *lif) cancel_work_sync(&lif->deferred.work); cancel_work_sync(&lif->tx_timeout_work); ionic_rx_filters_deinit(lif); + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_deinit(lif); } - if (lif->netdev->features & NETIF_F_RXHASH) - ionic_lif_rss_deinit(lif); - napi_disable(&lif->adminqcq->napi); ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); + mutex_destroy(&lif->queue_lock); ionic_lif_reset(lif); } @@ -2450,6 +2464,7 @@ static int ionic_lif_init(struct ionic_lif *lif) return err; lif->hw_index = le16_to_cpu(comp.hw_index); + mutex_init(&lif->queue_lock); /* now that we have the hw_index we can figure out our doorbell page */ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index c3428034a17b..8dc2c5d77424 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -135,7 +135,6 @@ enum ionic_lif_state_flags { IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, - IONIC_LIF_F_QUEUE_RESET, IONIC_LIF_F_FW_RESET, /* leave this as last */ @@ -165,6 +164,7 @@ struct ionic_lif { unsigned int hw_index; unsigned int kern_pid; u64 __iomem *kern_dbpage; + struct mutex queue_lock; /* lock for queue structures */ spinlock_t adminq_lock; /* lock for AdminQ operations */ struct ionic_qcq *adminqcq; struct ionic_qcq *notifyqcq; @@ -213,12 +213,6 @@ struct ionic_lif { #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) -/* return 0 if successfully set the bit, else non-zero */ -static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) -{ - return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE); -} - static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) { u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); @@ -248,6 +242,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) return (units * div) / mult; } +typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg); + void ionic_link_status_check_request(struct ionic_lif *lif); void ionic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *ns); @@ -267,7 +263,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, int ionic_open(struct net_device *netdev); int ionic_stop(struct net_device *netdev); -int ionic_reset_queues(struct ionic_lif *lif); +int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg); static inline void debug_stats_txq_post(struct ionic_qcq *qcq, struct ionic_txq_desc *desc, bool dbell) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 80eeb7696e01..cd0076fc3044 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -21,13 +21,16 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) void ionic_rx_filter_replay(struct ionic_lif *lif) { struct ionic_rx_filter_add_cmd *ac; + struct hlist_head new_id_list; struct ionic_admin_ctx ctx; struct ionic_rx_filter *f; struct hlist_head *head; struct hlist_node *tmp; + unsigned int key; unsigned int i; int err; + INIT_HLIST_HEAD(&new_id_list); ac = &ctx.cmd.rx_filter_add; for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { @@ -58,9 +61,30 @@ void ionic_rx_filter_replay(struct ionic_lif *lif) ac->mac.addr); break; } + spin_lock_bh(&lif->rx_filters.lock); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + continue; } + + /* remove from old id list, save new id in tmp list */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_del(&f->by_id); + spin_unlock_bh(&lif->rx_filters.lock); + f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id); + hlist_add_head(&f->by_id, &new_id_list); } } + + /* rebuild the by_id hash lists with the new filter ids */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) { + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filters_init(struct ionic_lif *lif) @@ -69,10 +93,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif) spin_lock_init(&lif->rx_filters.lock); + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); } + spin_unlock_bh(&lif->rx_filters.lock); return 0; } @@ -84,11 +110,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) struct hlist_node *tmp; unsigned int i; + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { head = &lif->rx_filters.by_id[i]; hlist_for_each_entry_safe(f, tmp, head, by_id) ionic_rx_filter_free(lif, f); } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, @@ -124,6 +152,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index b7f900c11834..85eb8f276a37 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -161,12 +161,6 @@ static void ionic_rx_clean(struct ionic_queue *q, return; } - /* no packet processing while resetting */ - if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) { - stats->dropped++; - return; - } - stats->pkts++; stats->bytes += le16_to_cpu(comp->len); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a49743d56b9c..6c2f9ff4a53e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -876,6 +876,8 @@ struct qed_dev { struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM]; u8 engine_for_debug; bool disable_ilt_dump; + bool dbg_bin_dump; + DECLARE_HASHTABLE(connections, 10); const struct firmware *firmware; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 08ba9d54ab63..d13ec88313c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2008,8 +2008,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, enum protocol_type proto; if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 81e8fbe4a05b..3b9bbafafe68 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7506,6 +7506,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->print_dbg_data) qed_dbg_print_feature(text_buf, text_size_bytes); + /* Just return the original binary buffer if requested */ + if (p_hwfn->cdev->dbg_bin_dump) { + vfree(text_buf); + return DBG_STATUS_OK; + } + /* Free the old dump_buf and point the dump_buf to the newly allocagted * and formatted text buffer. */ @@ -7733,7 +7739,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev) #define REGDUMP_HEADER_SIZE_SHIFT 0 #define REGDUMP_HEADER_SIZE_MASK 0xffffff #define REGDUMP_HEADER_FEATURE_SHIFT 24 -#define REGDUMP_HEADER_FEATURE_MASK 0x3f +#define REGDUMP_HEADER_FEATURE_MASK 0x1f +#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29 +#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 #define REGDUMP_HEADER_ENGINE_SHIFT 31 @@ -7771,6 +7779,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); @@ -7794,6 +7803,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) omit_engine = 1; mutex_lock(&qed_dbg_lock); + cdev->dbg_bin_dump = true; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7931,6 +7941,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* Re-populate nvm attribute info */ + qed_mcp_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_populate(p_hwfn); + /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + @@ -7993,6 +8007,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } + cdev->dbg_bin_dump = false; mutex_unlock(&qed_dbg_lock); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3aa51374e727..dbdac983ccde 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3102,7 +3102,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } /* Log and clear previous pglue_b errors if such exist */ - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -4472,12 +4472,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) -{ - kfree(p_hwfn->nvm_info.image_att); - p_hwfn->nvm_info.image_att = NULL; -} - static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -4562,7 +4556,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -4623,7 +4617,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -4657,7 +4651,7 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); } static void qed_chain_free_next_ptr(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b7b974f0ef21..5eec1fc6229d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -257,9 +257,10 @@ out: #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init) { + char msg[256]; u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); @@ -273,22 +274,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_NOTICE(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + snprintf(msg, sizeof(msg), + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), + tmp, + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -321,8 +323,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); - if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { @@ -361,7 +369,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { - return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static int qed_fw_assertion(struct qed_hwfn *p_hwfn) @@ -1193,7 +1201,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index e09db3386367..110169e90121 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -442,7 +442,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt); +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9624616806e7..0fd4520d0666 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3280,6 +3280,13 @@ err0: return rc; } +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5750b4c5ef63..12a705ed4bac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1221,6 +1221,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** + * @brief Delete nvm info shadow in the given hardware function + * + * @param p_hwfn + */ +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); + +/** * @brief Get the engine affinity configuration. * * @param p_hwfn diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 40efe60eff8d..fcdecddb2812 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev) return 0; } -static int rmnet_register_real_device(struct net_device *real_dev) +static int rmnet_register_real_device(struct net_device *real_dev, + struct netlink_ext_ack *extack) { struct rmnet_port *port; int rc, entry; ASSERT_RTNL(); - if (rmnet_is_real_dev_registered(real_dev)) + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + if (port->rmnet_mode != RMNET_EPMODE_VND) { + NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + return -EINVAL; + } + return 0; + } port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) @@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - err = rmnet_register_real_device(real_dev); + err = rmnet_register_real_device(real_dev, extack); if (err) goto err0; @@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, } if (port->rmnet_mode != RMNET_EPMODE_VND) { - NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached"); return -EINVAL; } @@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, return -EBUSY; } - err = rmnet_register_real_device(slave_dev); + err = rmnet_register_real_device(slave_dev, extack); if (err) return -EBUSY; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index a442bcf64b9c..99f7aae102ce 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1450,6 +1450,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1458,15 +1459,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 90410f9d3b1a..1c4fea9c3ec4 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, "power", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Optional reset GPIO configured? Minimum 100 ns reset needed @@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, "reset", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Need to wait for optional EEPROM to load, max 750 us according diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f2638446b62e..81b554dd7221 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev) ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, priv->pinmode_mask, priv->pinmode_val); if (ret) - return ret; + goto out_reset_assert; ave_global_reset(ndev); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 1492648247d9..6d778bc3d012 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1850,7 +1850,8 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; port->ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM; + NETIF_F_HW_CSUM | + NETIF_F_HW_TC; port->ndev->features = port->ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; port->ndev->vlan_features |= NETIF_F_SG; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4661ef865807..dec52b763d50 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1615,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); + enum ifla_geneve_df df = geneve->df; struct geneve_sock *gs4, *gs6; struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; - enum ifla_geneve_df df; bool ttl_inherit; int err; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 2a6ec5394966..a4b3fce69ecd 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev) rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { - pci_free_consistent(pdev, sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 5a37514e4234..c11f32f644db 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -4,7 +4,7 @@ * * Copyright 2009-2017 Analog Devices Inc. * - * http://www.analog.com/ADF7242 + * https://www.analog.com/ADF7242 */ #include <linux/kernel.h> @@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi) WQ_MEM_RECLAIM); if (unlikely(!lp->wqueue)) { ret = -ENOMEM; - goto err_hw_init; + goto err_alloc_wq; } ret = adf7242_hw_init(lp); @@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi) return ret; err_hw_init: + destroy_workqueue(lp->wqueue); +err_alloc_wq: mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 55226b264e3c..ac7e5a04c8ac 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -500,6 +500,13 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) int ret; state = gsi_channel_state(channel); + + /* Channel could have entered STOPPED state since last call + * if it timed out. If so, we're done. + */ + if (state == GSI_CHANNEL_STATE_STOPPED) + return 0; + if (state != GSI_CHANNEL_STATE_STARTED && state != GSI_CHANNEL_STATE_STOP_IN_PROC) return -EINVAL; @@ -789,20 +796,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) int gsi_channel_stop(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - enum gsi_channel_state state; u32 retries; int ret; gsi_channel_freeze(channel); - /* Channel could have entered STOPPED state since last call if the - * STOP command timed out. We won't stop a channel if stopping it - * was successful previously (so we still want the freeze above). - */ - state = gsi_channel_state(channel); - if (state == GSI_CHANNEL_STATE_STOPPED) - return 0; - /* RX channels might require a little time to enter STOPPED state */ retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index c9ab865e7290..d92dd3f09b73 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void) return 4; } +void ipa_cmd_tag_process(struct ipa *ipa) +{ + u32 count = ipa_cmd_tag_process_count(); + struct gsi_trans *trans; + + trans = ipa_cmd_trans_alloc(ipa, count); + if (trans) { + ipa_cmd_tag_process_add(trans); + gsi_trans_commit_wait(trans); + } else { + dev_err(&ipa->pdev->dev, + "error allocating %u entry tag transaction\n", count); + } +} + static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index e440aa69c8b5..1a646e0264a0 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -172,6 +172,14 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans); u32 ipa_cmd_tag_process_count(void); /** + * ipa_cmd_tag_process() - Perform a tag process + * + * @Return: The number of elements to allocate in a transaction + * to hold tag process commands + */ +void ipa_cmd_tag_process(struct ipa *ipa); + +/** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer * @tre_count: Number of elements in the transaction diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index 52d4b84e0dac..de2768d71ab5 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { - .checksum = true, .aggregation = true, .status_enable = true, .rx = { diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9f50d0d11704..9e58e495d373 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1450,6 +1450,8 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); + ipa_cmd_tag_process(ipa); + ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c index dc4a5c2196ae..d323adb03383 100644 --- a/drivers/net/ipa/ipa_gsi.c +++ b/drivers/net/ipa/ipa_gsi.c @@ -6,6 +6,7 @@ #include <linux/types.h> +#include "ipa_gsi.h" #include "gsi_trans.h" #include "ipa.h" #include "ipa_endpoint.h" diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h index 3cf18600c68e..0a40f3dc55fc 100644 --- a/drivers/net/ipa/ipa_gsi.h +++ b/drivers/net/ipa/ipa_gsi.h @@ -8,7 +8,9 @@ #include <linux/types.h> +struct gsi; struct gsi_trans; +struct ipa_gsi_endpoint_data; /** * ipa_gsi_trans_complete() - GSI transaction completion callback diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 03a1d0e55964..73413371e3d3 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = { sizeof_field(struct ipa_driver_init_complete_rsp, rsp), .tlv_type = 0x02, - .elem_size = offsetof(struct ipa_driver_init_complete_rsp, + .offset = offsetof(struct ipa_driver_init_complete_rsp, rsp), .ei_array = qmi_response_type_v01_ei, }, @@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = { sizeof_field(struct ipa_init_complete_ind, status), .tlv_type = 0x02, - .elem_size = offsetof(struct ipa_init_complete_ind, + .offset = offsetof(struct ipa_init_complete_ind, status), .ei_array = qmi_response_type_v01_ei, }, @@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { sizeof_field(struct ipa_init_modem_driver_req, platform_type_valid), .tlv_type = 0x10, - .elem_size = offsetof(struct ipa_init_modem_driver_req, + .offset = offsetof(struct ipa_init_modem_driver_req, platform_type_valid), }, { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index e56547bfdac9..9159846b8b93 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, return err; netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macsec_netdev_addr_lock_key, - dev->lower_level); + lockdep_set_class(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key); err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6a6cc9f75307..4942f6112e51 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; static void macvlan_set_lockdep_class(struct net_device *dev) { netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key, - dev->lower_level); + lockdep_set_class(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key); } static int macvlan_init(struct net_device *dev) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 2908e0a0d6e1..23950e7a0f81 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -302,7 +302,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) rtnl_lock(); err = nsim_bpf_init(ns); if (err) - goto err_free_netdev; + goto err_rtnl_unlock; nsim_ipsec_init(ns); @@ -316,8 +316,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); +err_rtnl_unlock: rtnl_unlock(); -err_free_netdev: free_netdev(dev); return ERR_PTR(err); } diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ecbd5e0d685c..acb0aae60755 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1260,6 +1260,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -1267,6 +1268,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -1274,6 +1276,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -1281,6 +1284,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 858b012074bd..7adeb91bd368 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -62,6 +62,7 @@ #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> +#include <net/ip_tunnels.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> @@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev) switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 4e514f5d7c6c..fd3a04d98dc1 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -187,6 +187,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); + ret = -EIO; goto free; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bb8c34d746ab..5f123a8cf68e 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 31b1d4b959f6..07c42c0719f5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 3cf4dc3433f9..bb4ccbda031a 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc95xx_reset(dev); + if (ret) + goto free_pdata; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); if (ret < 0) - return ret; + goto free_pdata; + val >>= 16; pdata->chip_id = val; pdata->mdix_ctrl = get_mdix_status(dev->net); @@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); return 0; + +free_pdata: + kfree(pdata); + return ret; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index c84536b03aa8..f70336bb6f52 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } skb_push(skb, 1); skb_reset_network_header(skb); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index e30d91a38cfb..b2868433718f 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - skb_push(skb, 1); - - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } + + skb_push(skb, 1); ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -303,7 +305,6 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; - dev->hard_header_len = 3; dev->mtu = 1000; dev->addr_len = 0; } @@ -324,6 +325,14 @@ static int lapbeth_new_device(struct net_device *dev) if (!ndev) goto out; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 69773d228ec1..84640a0c13f3 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl) netif_wake_queue(sl->dev); } -/* Send one completely decapsulated IP datagram to the IP layer. */ +/* Send an LAPB frame to the LAPB module to process. */ static void x25_asy_bump(struct x25_asy *sl) { @@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl) count = sl->rcount; dev->stats.rx_bytes += count; - skb = dev_alloc_skb(count+1); + skb = dev_alloc_skb(count); if (skb == NULL) { netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } - skb_push(skb, 1); /* LAPB internal control */ skb_put_data(skb, sl->rbuff, count); skb->protocol = x25_type_trans(skb, sl->dev); err = lapb_data_received(skb->dev, skb); @@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl) kfree_skb(skb); printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); } else { - netif_rx(skb); dev->stats.rx_packets++; } } @@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, */ /* - * Called when I frame data arrives. We did the work above - throw it - * at the net layer. + * Called when I frame data arrive. We add a pseudo header for upper + * layers and pass it to upper layers. */ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) { + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + skb_push(skb, 1); + skb->data[0] = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + return netif_rx(skb); } @@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) switch (s) { case X25_END: if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 2) + sl->rcount >= 2) x25_asy_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index a8f151b1b5fa..c9f65e96ccb0 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev) max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); dev->netdev_ops = &netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->hard_header_len = 0; dev->addr_len = 0; dev->needed_headroom = DATA_PACKET_HEAD_ROOM; diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index c58df439dbbe..dfb674e03076 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -11,6 +11,7 @@ #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <net/ip_tunnels.h> struct wg_device; struct wg_peer; @@ -65,25 +66,9 @@ struct packet_cb { #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) -/* Returns either the correct skb->protocol value, or 0 if invalid. */ -static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) -{ - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct iphdr)) <= - skb_tail_pointer(skb) && - ip_hdr(skb)->version == 4) - return htons(ETH_P_IP); - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= - skb_tail_pointer(skb) && - ipv6_hdr(skb)->version == 6) - return htons(ETH_P_IPV6); - return 0; -} - static inline bool wg_check_packet_protocol(struct sk_buff *skb) { - __be16 real_protocol = wg_examine_packet_protocol(skb); + __be16 real_protocol = ip_tunnel_parse_protocol(skb); return real_protocol && skb->protocol == real_protocol; } diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 9b2ab6fc91cd..2c9551ea6dc7 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, */ skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ~0; /* All levels */ - skb->protocol = wg_examine_packet_protocol(skb); + skb->protocol = ip_tunnel_parse_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 342a7e58018a..05a61975c83f 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -820,7 +820,7 @@ err_free_irq: ath10k_ahb_release_irq_legacy(ar); err_free_pipes: - ath10k_pci_free_pipes(ar); + ath10k_pci_release_resource(ar); err_resource_deinit: ath10k_ahb_resource_deinit(ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 1d941d53fdc9..cfde7791291a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3473,6 +3473,28 @@ int ath10k_pci_setup_resource(struct ath10k *ar) timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); + ar_pci->attr = kmemdup(pci_host_ce_config_wlan, + sizeof(pci_host_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->attr) + return -ENOMEM; + + ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, + sizeof(pci_target_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->pipe_config) { + ret = -ENOMEM; + goto err_free_attr; + } + + ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, + sizeof(pci_target_service_to_ce_map_wlan), + GFP_KERNEL); + if (!ar_pci->serv_to_pipe) { + ret = -ENOMEM; + goto err_free_pipe_config; + } + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); @@ -3480,18 +3502,31 @@ int ath10k_pci_setup_resource(struct ath10k *ar) if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); - return ret; + goto err_free_serv_to_pipe; } return 0; + +err_free_serv_to_pipe: + kfree(ar_pci->serv_to_pipe); +err_free_pipe_config: + kfree(ar_pci->pipe_config); +err_free_attr: + kfree(ar_pci->attr); + return ret; } void ath10k_pci_release_resource(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_pci_rx_retry_sync(ar); netif_napi_del(&ar->napi); ath10k_pci_ce_deinit(ar); ath10k_pci_free_pipes(ar); + kfree(ar_pci->attr); + kfree(ar_pci->pipe_config); + kfree(ar_pci->serv_to_pipe); } static const struct ath10k_bus_ops ath10k_pci_bus_ops = { @@ -3601,30 +3636,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); - ar_pci->attr = kmemdup(pci_host_ce_config_wlan, - sizeof(pci_host_ce_config_wlan), - GFP_KERNEL); - if (!ar_pci->attr) { - ret = -ENOMEM; - goto err_free; - } - - ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, - sizeof(pci_target_ce_config_wlan), - GFP_KERNEL); - if (!ar_pci->pipe_config) { - ret = -ENOMEM; - goto err_free; - } - - ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, - sizeof(pci_target_service_to_ce_map_wlan), - GFP_KERNEL); - if (!ar_pci->serv_to_pipe) { - ret = -ENOMEM; - goto err_free; - } - ret = ath10k_pci_setup_resource(ar); if (ret) { ath10k_err(ar, "failed to setup resource: %d\n", ret); @@ -3705,10 +3716,9 @@ err_unsupported: err_free_irq: ath10k_pci_free_irq(ar); - ath10k_pci_rx_retry_sync(ar); err_deinit_irq: - ath10k_pci_deinit_irq(ar); + ath10k_pci_release_resource(ar); err_sleep: ath10k_pci_sleep_sync(ar); @@ -3720,29 +3730,18 @@ err_free_pipes: err_core_destroy: ath10k_core_destroy(ar); -err_free: - kfree(ar_pci->attr); - kfree(ar_pci->pipe_config); - kfree(ar_pci->serv_to_pipe); - return ret; } static void ath10k_pci_remove(struct pci_dev *pdev) { struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci) - return; - ath10k_core_unregister(ar); ath10k_pci_free_irq(ar); ath10k_pci_deinit_irq(ar); @@ -3750,9 +3749,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); - kfree(ar_pci->attr); - kfree(ar_pci->pipe_config); - kfree(ar_pci->serv_to_pipe); } MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 4ed21dad6a8e..3f563e02d17d 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -733,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } + rx_buf->skb = nskb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 7987a288917b..27116c7d3f4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -271,6 +271,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, { struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data; u32 tp = le32_to_cpu(trig->time_point); + struct iwl_ucode_tlv *dup = NULL; + int ret; if (le32_to_cpu(tlv->length) < sizeof(*trig)) return -EINVAL; @@ -283,10 +285,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, return -EINVAL; } - if (!le32_to_cpu(trig->occurrences)) + if (!le32_to_cpu(trig->occurrences)) { + dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), + GFP_KERNEL); + if (!dup) + return -ENOMEM; + trig = (void *)dup->data; trig->occurrences = cpu_to_le32(-1); + tlv = dup; + } + + ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + kfree(dup); - return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + return ret; } static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index fee01cbbd3ac..27977992fd7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1189,17 +1189,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); + rcu_read_unlock(); + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) { - rcu_read_unlock(); + if (ret) return ret; - } } - rcu_read_unlock(); - return free_queue; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 65d65c6baf4c..e02bafb8921f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -582,6 +582,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dfe625a53c63..3d7db6ffb599 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -301,6 +301,7 @@ struct mt76_hw_cap { #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) #define MT_DRV_SW_RX_AIRTIME BIT(2) #define MT_DRV_RX_DMA_HDR BIT(3) +#define MT_DRV_HW_MGMT_TXQ BIT(4) struct mt76_driver_ops { u32 drv_flags; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 26cb711b465f..83dfa6da4761 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7603_dev *dev = hw->priv; + mutex_lock(&dev->mt76.mutex); dev->coverage_class = max_t(s16, coverage_class, 0); mt7603_mac_set_timing(dev); + mutex_unlock(&dev->mt76.mutex); } static void mt7603_tx(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index fd3ef483a87c..d06afcf46d67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) int i; for (i = 0; i < 16; i++) { - int j, acs = i / 4, index = i % 4; + int j, wmm_idx = i % MT7615_MAX_WMM_SETS; + int acs = i / MT7615_MAX_WMM_SETS; u32 ctrl, val, qlen = 0; - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); ctrl = BIT(31) | BIT(15) | (acs << 8); for (j = 0; j < 32; j++) { @@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) continue; mt76_wr(dev, MT_PLE_FL_Q0_CTRL, - ctrl | (j + (index << 5))); + ctrl | (j + (wmm_idx << 5))); qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, GENMASK(11, 0)); } - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); } return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 5a124610d4af..e5a965df899a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -36,10 +36,10 @@ static int mt7622_init_tx_queues_multi(struct mt7615_dev *dev) { static const u8 wmm_queue_map[] = { - MT7622_TXQ_AC0, - MT7622_TXQ_AC1, - MT7622_TXQ_AC2, - MT7622_TXQ_AC3, + [IEEE80211_AC_BK] = MT7622_TXQ_AC0, + [IEEE80211_AC_BE] = MT7622_TXQ_AC1, + [IEEE80211_AC_VI] = MT7622_TXQ_AC2, + [IEEE80211_AC_VO] = MT7622_TXQ_AC3, }; int ret; int i; @@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev) int i; mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); if (is_mt7615(&dev->mt76)) { mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); } else { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index edac37e7847b..22e4eabe6578 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -72,8 +72,7 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr) { int ret; - ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE + - MT7615_EEPROM_EXTRA_DATA); + ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE); if (ret < 0) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h index 40fed7adc58a..a024dee10362 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h @@ -17,7 +17,7 @@ #define MT7615_EEPROM_TXDPD_SIZE 216 #define MT7615_EEPROM_TXDPD_COUNT (44 + 3) -#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \ +#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \ MT7615_EEPROM_TXDPD_COUNT * \ MT7615_EEPROM_TXDPD_SIZE) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 9f1c6ca7a665..d97315ec7265 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { - q_idx = wmm_idx * MT7615_MAX_WMM_SETS + - skb_get_queue_mapping(skb); - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; - } else if (beacon) { - if (ext_phy) - q_idx = MT_LMAC_BCN1; - else - q_idx = MT_LMAC_BCN0; + if (beacon) { p_fmt = MT_TX_TYPE_FW; + q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; } else { - if (ext_phy) - q_idx = MT_LMAC_ALTX1; - else - q_idx = MT_LMAC_ALTX0; p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = wmm_idx * MT7615_MAX_WMM_SETS + + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); } val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index f0d4b29a52a2..81608ab656b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -124,21 +124,6 @@ enum tx_pkt_type { MT_TX_TYPE_FW, }; -enum tx_pkt_queue_idx { - MT_LMAC_AC00, - MT_LMAC_AC01, - MT_LMAC_AC02, - MT_LMAC_AC03, - MT_LMAC_ALTX0 = 0x10, - MT_LMAC_BMC0, - MT_LMAC_BCN0, - MT_LMAC_PSMP0, - MT_LMAC_ALTX1, - MT_LMAC_BMC1, - MT_LMAC_BCN1, - MT_LMAC_PSMP1, -}; - enum tx_port_idx { MT_TX_PORT_IDX_LMAC, MT_TX_PORT_IDX_MCU diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index c26f99b368d9..beaca8127680 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); + queue = mt7615_lmac_mapping(dev, queue); queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; return mt7615_mcu_set_wmm(dev, queue, params); @@ -735,9 +736,12 @@ static void mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7615_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index e670393506f0..2e99845b9c96 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common), - .drv_flags = MT_DRV_TXWI_NO_FREE, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index d6176d316bee..3e7d51bf42a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -282,6 +282,21 @@ struct mt7615_dev { struct list_head wd_head; }; +enum tx_pkt_queue_idx { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, + MT_LMAC_ALTX1, + MT_LMAC_BMC1, + MT_LMAC_BCN1, + MT_LMAC_PSMP1, +}; + enum { HW_BSSID_0 = 0x0, HW_BSSID_1, @@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) return MT7615_WTBL_SIZE; } +static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) +{ + static const u8 lmac_queue_map[] = { + [IEEE80211_AC_BK] = MT_LMAC_AC00, + [IEEE80211_AC_BE] = MT_LMAC_AC01, + [IEEE80211_AC_VI] = MT_LMAC_AC02, + [IEEE80211_AC_VO] = MT_LMAC_AC03, + }; + + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) + return MT_LMAC_AC01; /* BE */ + + return lmac_queue_map[ac]; +} + void mt7615_dma_reset(struct mt7615_dev *dev); void mt7615_scan_work(struct work_struct *work); void mt7615_roc_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index a50077eb24d7..5be6704770ad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf, { static const struct mt76_driver_ops drv_ops = { .txwi_size = MT_USB_TXD_SIZE, - .drv_flags = MT_DRV_RX_DMA_HDR, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, .tx_prepare_skb = mt7663u_tx_prepare_skb, .tx_complete_skb = mt7663u_tx_complete_skb, .tx_status_data = mt7663u_tx_status_data, @@ -329,25 +329,26 @@ static int mt7663u_probe(struct usb_interface *usb_intf, if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, FW_STATE_PWR_ON << 1, 500)) { dev_err(dev->mt76.dev, "Timeout for power on\n"); - return -EIO; + ret = -EIO; + goto error; } alloc_queues: ret = mt76u_alloc_mcu_queue(&dev->mt76); if (ret) - goto error; + goto error_free_q; ret = mt76u_alloc_queues(&dev->mt76); if (ret) - goto error; + goto error_free_q; ret = mt7663u_register_device(dev); if (ret) - goto error_freeq; + goto error_free_q; return 0; -error_freeq: +error_free_q: mt76u_queues_deinit(&dev->mt76); error: mt76u_deinit(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index cbbe986655fe..5fda6e7b120c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -456,8 +456,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) tasklet_disable(&dev->mt76.tx_tasklet); napi_disable(&dev->mt76.tx_napi); - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) + mt76_for_each_q_rx(&dev->mt76, i) { napi_disable(&dev->mt76.napi[i]); + } mutex_lock(&dev->mt76.mutex); @@ -515,7 +516,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) tasklet_enable(&dev->mt76.pre_tbtt_tasklet); - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { + mt76_for_each_q_rx(&dev->mt76, i) { napi_enable(&dev->mt76.napi[i]); napi_schedule(&dev->mt76.napi[i]); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 0575c259f245..05b5650c56c8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -716,9 +716,12 @@ static void mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7915_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index fca38ea2441f..f10c98aa883c 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, skb_set_queue_mapping(skb, qid); } + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { + qid = MT_TXQ_PSD; + skb_set_queue_mapping(skb, qid); + } + if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index fb97ea25b4d4..87382b2f7443 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) { if (mt76_chip(dev) == 0x7663) { - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_VO] = 0, - [IEEE80211_AC_VI] = 1, - [IEEE80211_AC_BE] = 2, - [IEEE80211_AC_BK] = 4, + static const u8 lmac_queue_map[] = { + /* ac to lmac mapping */ + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 4, }; - if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) - return 2; /* BE */ + if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map))) + return 1; /* BE */ - return wmm_queue_map[ac]; + return lmac_queue_map[ac]; } return mt76_ac_to_hwq(ac); @@ -1066,11 +1067,16 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) static void mt76u_free_tx(struct mt76_dev *dev) { - struct mt76_queue *q; - int i, j; + int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { + struct mt76_queue *q; + int j; + q = dev->q_tx[i].q; + if (!q) + continue; + for (j = 0; j < q->ndesc; j++) usb_free_urb(q->entry[j].urb); } @@ -1078,17 +1084,22 @@ static void mt76u_free_tx(struct mt76_dev *dev) void mt76u_stop_tx(struct mt76_dev *dev) { - struct mt76_queue_entry entry; - struct mt76_queue *q; - int i, j, ret; + int ret; ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), HZ / 5); if (!ret) { + struct mt76_queue_entry entry; + struct mt76_queue *q; + int i, j; + dev_err(dev->dev, "timed out waiting for pending tx\n"); for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->q_tx[i].q; + if (!q) + continue; + for (j = 0; j < q->ndesc; j++) usb_kill_urb(q->entry[j].urb); } @@ -1100,6 +1111,8 @@ void mt76u_stop_tx(struct mt76_dev *dev) */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->q_tx[i].q; + if (!q) + continue; /* Assure we are in sync with killed tasklet. */ spin_lock_bh(&q->lock); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 482c6c8b0fb7..88280057e032 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ -static int xennet_remove(struct xenbus_device *dev) +static void xennet_bus_close(struct xenbus_device *dev) { - struct netfront_info *info = dev_get_drvdata(&dev->dev); - - dev_dbg(&dev->dev, "%s\n", dev->nodename); + int ret; - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + +static int xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev_get_drvdata(&dev->dev); + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) |