summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c11
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c1
-rw-r--r--drivers/net/ethernet/8390/wd.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c42
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c30
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c5
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c13
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c34
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c44
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c38
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c95
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
109 files changed, 929 insertions, 630 deletions
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index d2f4358cc550..ba3e7aa1a28f 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -66,8 +66,10 @@ static int max_interrupt_work = 20;
#include <linux/timer.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
-
#include <linux/uaccess.h>
+
+#include <net/Space.h>
+
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 82f94b1635bf..5267e9dcd87e 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
struct net_device *dev;
+ int ret;
dev_dbg(&link->dev, "3c589_attach()\n");
@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
dev->ethtool_ops = &netdev_ethtool_ops;
- return tc589_config(link);
+ ret = tc589_config(link);
+ if (ret)
+ goto err_free_netdev;
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+ return ret;
}
static void tc589_detach(struct pcmcia_device *link)
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 0a9118b8be0c..bc9c81dc00fd 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -52,6 +52,7 @@ static const char version2[] =
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
+#include <net/Space.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 6e62c37c9400..7465650c8078 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -66,6 +66,7 @@ static const char version[] =
#include <linux/isapnp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 5b00c452bede..119021d41451 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -37,6 +37,7 @@ static const char version[] =
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <net/Space.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 8971665a4b2a..6cf38180cc01 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/bitops.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index f7c597ea5daf..debe5216fe29 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -68,9 +68,15 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
bool pdsc_is_fw_good(struct pdsc *pdsc)
{
- u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+ bool fw_running = pdsc_is_fw_running(pdsc);
+ u8 gen;
- return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation;
+ /* Make sure to update the cached fw_status by calling
+ * pdsc_is_fw_running() before getting the generation
+ */
+ gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+ return fw_running && gen == pdsc->fw_generation;
}
static u8 pdsc_devcmd_status(struct pdsc *pdsc)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 33a9574e9e04..32d2c6fac652 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1329,7 +1329,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
return pdata->phy_if.phy_impl.an_outcome(pdata);
}
-static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
enum xgbe_mode mode;
@@ -1367,8 +1367,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
- if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+ if (!xgbe_set_mode(pdata, mode))
+ return false;
+
+ if (pdata->an_again)
xgbe_phy_reconfig_aneg(pdata);
+
+ return true;
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1398,7 +1403,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
- xgbe_phy_status_result(pdata);
+ if (xgbe_phy_status_result(pdata))
+ return;
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 38d0cdaf22a5..bf1611cce974 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2531,9 +2531,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->irq0 = platform_get_irq(pdev, 0);
if (!priv->is_lite) {
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
} else {
- priv->wol_irq = platform_get_irq(pdev, 1);
+ priv->wol_irq = platform_get_irq_optional(pdev, 1);
}
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 637d162bbcfa..1e7a6f1d4223 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14294,11 +14294,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
- if (netif_running(dev))
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (netif_running(dev)) {
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
+ goto done;
+ }
+ }
netif_device_attach(dev);
+done:
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dcd9367f05af..b499bc9c4e06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -692,7 +692,7 @@ next_tx_int:
__netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
- READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
+ READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -2365,6 +2365,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u64 ns;
+ if (!ptp)
+ goto async_event_process_exit;
+
spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
@@ -4763,6 +4766,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
continue;
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
+ !bp->ptp_cfg)
+ continue;
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
}
if (bmap && bmap_size) {
@@ -5350,6 +5356,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
return;
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
/* all contexts configured to same hash_type, zero always exists */
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
resp = hwrm_req_hold(bp, req);
@@ -8812,6 +8819,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
+ if (BNXT_VF(bp))
+ bnxt_hwrm_func_qcfg(bp);
+
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
@@ -11598,6 +11608,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
static void bnxt_fw_health_check(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
@@ -11611,7 +11622,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- if (val == fw_health->last_fw_heartbeat) {
+ if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
fw_health->arrests++;
goto fw_reset;
}
@@ -11619,7 +11630,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- if (val != fw_health->last_fw_reset_cnt) {
+ if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
fw_health->discoveries++;
goto fw_reset;
}
@@ -13025,26 +13036,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
+static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
- struct udp_tunnel_info ti;
unsigned int cmd;
- udp_tunnel_nic_get_port(netdev, table, 0, &ti);
- if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- if (ti.port)
- return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
+}
+
+static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct bnxt *bp = netdev_priv(netdev);
+ unsigned int cmd;
+
+ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
- .sync_table = bnxt_udp_tunnel_sync,
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2dd8ee4a6f75..8fd5071d8b09 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -3831,7 +3831,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
}
}
- if (req & BNXT_FW_RESET_AP) {
+ if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code >= 0x10803) {
if (!bnxt_firmware_reset_ap(dev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index e46689128e32..f3886710e778 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -952,6 +952,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_timecounter_init(bp, true);
bnxt_ptp_adjfine_rtc(bp, 0);
}
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f28ffc31df22..2b5761ad2f92 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1272,7 +1272,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@@ -1292,7 +1293,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
/* Enable EEE and switch to a 27Mhz clock automatically */
reg = bcmgenet_readl(priv->base + off);
- if (enable)
+ if (tx_lpi_enabled)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@@ -1313,6 +1314,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
priv->eee.eee_enabled = enable;
priv->eee.eee_active = enable;
+ priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -1328,6 +1330,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e->eee_enabled = p->eee_enabled;
e->eee_active = p->eee_active;
+ e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
@@ -1337,7 +1340,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct ethtool_eee *p = &priv->eee;
- int ret = 0;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1348,16 +1350,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
p->eee_enabled = e->eee_enabled;
if (!p->eee_enabled) {
- bcmgenet_eee_enable_set(dev, false);
+ bcmgenet_eee_enable_set(dev, false, false);
} else {
- ret = phy_init_eee(dev->phydev, false);
- if (ret) {
- netif_err(priv, hw, dev, "EEE initialization failed\n");
- return ret;
- }
-
+ p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, true);
+ bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
@@ -3450,7 +3447,7 @@ err_clk_disable:
return ret;
}
-static void bcmgenet_netif_stop(struct net_device *dev)
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3465,6 +3462,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3485,7 +3484,7 @@ static int bcmgenet_close(struct net_device *dev)
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, false);
/* Really kill the PHY state machine and disconnect from it */
phy_disconnect(dev->phydev);
@@ -4277,9 +4276,6 @@ static int bcmgenet_resume(struct device *d)
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
- if (priv->eee.eee_enabled)
- bcmgenet_eee_enable_set(dev, true);
-
bcmgenet_netif_start(dev);
netif_device_attach(dev);
@@ -4303,7 +4299,7 @@ static int bcmgenet_suspend(struct device *d)
netif_device_detach(dev);
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, true);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 946f6e283c4e..1985c0ec4da2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled);
+
#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index be042905ada2..c15ed0acdb77 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -87,6 +87,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmgenet_eee_enable_set(dev,
+ priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.tx_lpi_enabled);
}
/* setup netdev link state when PHY link status change and
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 06a0c00af99c..276c32c3926a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -72,6 +72,8 @@
#include <linux/gfp.h>
#include <linux/io.h>
+#include <net/Space.h>
+
#include <asm/irq.h>
#include <linux/atomic.h>
#if ALLOW_DMA
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3c4fa26f0f9b..9e1b2536e9a9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1229,7 +1229,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (!skb)
break;
- rx_byte_cnt += skb->len;
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
napi_gro_receive(napi, skb);
@@ -1565,6 +1571,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
&cleaned_cnt, &xdp_buff);
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
+
xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
switch (xdp_act) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 83c27bbbc6ed..126007ab70f6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -181,8 +181,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
int bw_sum = 0;
u8 bw;
- prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
- prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
+ prio_top = tc_nums - 1;
+ prio_next = tc_nums - 2;
/* Support highest prio and second prio tc in cbs mode */
if (tc != prio_top && tc != prio_next)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 42ec6ca3bf03..38e5b5abe067 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3798,7 +3798,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
netdev_err(fep->netdev, "NOT enough BD for SG!\n");
- xdp_return_frame(frame);
return NETDEV_TX_BUSY;
}
@@ -3835,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
txq->tx_skbuff[index] = NULL;
+ /* Make sure the updates to rest of the descriptor are performed before
+ * transferring ownership.
+ */
+ dma_wmb();
+
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
@@ -3844,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
+ /* Make sure the update to bdp are performed before txq->bd.cur. */
+ dma_wmb();
+
txq->bd.cur = bdp;
+ /* Trigger transmission start */
+ writel(0, txq->bd.reg_desc_active);
+
return 0;
}
@@ -3874,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
sent_frames++;
}
- /* Make sure the update to bdp and tx_skbuff are performed. */
- wmb();
-
- /* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
-
__netif_tx_unlock(nq);
return sent_frames;
@@ -4478,9 +4482,11 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev,
+ "Failed to resume device in remove callback (%pe)\n",
+ ERR_PTR(ret));
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -4493,8 +4499,13 @@ fec_drv_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+ /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+ * disabling them again.
+ */
+ if (ret >= 0) {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ }
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index cbbab5b2b402..b85c412683dd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
return head == hw->cmq.csq.next_to_use;
}
-static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+{
+ static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
+ {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
+ if (cmdq_tx_timeout_map[i].opcode == opcode)
+ return cmdq_tx_timeout_map[i].tx_timeout;
+
+ return tx_timeout;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
bool *is_completed)
{
+ u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
+ hw->cmq.tx_timeout);
u32 timeout = 0;
do {
@@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
}
udelay(1);
timeout++;
- } while (timeout < hw->cmq.tx_timeout);
+ } while (timeout < cmdq_tx_timeout);
}
static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
@@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
- hclge_comm_wait_for_resp(hw, &is_completed);
+ hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
+ &is_completed);
if (!is_completed)
ret = -EBADE;
@@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
/* Setup Tx write back timeout */
- cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+ cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
/* Setup queue rings */
ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index de72ecbfd5ad..18f1b4bf362d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -54,7 +54,8 @@
#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
enum hclge_opcode_type {
/* Generic commands */
@@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map {
u16 local_bit;
};
+struct hclge_cmdq_tx_timeout_map {
+ u32 opcode;
+ u32 tx_timeout;
+};
+
struct hclge_comm_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 4c3e90a1c4d0..d385ffc21876 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
+ .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 97578eabb7d8..4a5ef8a90a10 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -10,6 +10,7 @@
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
#define HNS3_DBG_READ_LEN_4MB 0x400000
+#define HNS3_DBG_READ_LEN_5MB 0x500000
#define HNS3_DBG_WRITE_LEN 1024
#define HNS3_DBG_DATA_STR_LEN 32
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 4fb5406c1951..2689b108f7df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET &&
- hdev->reset_type != HNAE3_FLR_RESET) {
- hclge_mac_stop_phy(hdev);
- hclge_update_link_status(hdev);
- return;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+ HCLGE_PFC_DISABLE);
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
+ hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
+ return;
+ }
}
hclge_reset_tqp(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 4a33f65190e2..922c0da3660c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
- u8 pfc_bitmap)
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap)
{
struct hclge_desc desc;
struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 68f28a98e380..dd6f1fd486cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
+#define HCLGE_PFC_DISABLE 0
+#define HCLGE_PFC_TX_RX_DISABLE 0
+
struct hclge_pfc_en_cmd {
u8 tx_rx_en_bitmap;
u8 pri_en_bitmap;
@@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index f24046250341..dd08989a4c7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
* might happen in case reset assertion was made by PF. Yes, this also
* means we might end up waiting bit more even for VF reset.
*/
- msleep(5000);
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+ msleep(5000);
+ else
+ msleep(500);
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 9abaff1f2aff..39d0fe76a38f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
+void iavf_irq_enable_queues(struct iavf_adapter *adapter);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2de4baff4c20..4a66873882d1 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_enable_queues - Enable interrupt for specified queues
+ * iavf_irq_enable_queues - Enable interrupt for all queues
* @adapter: board private structure
- * @mask: bitmap of queues to enable
**/
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
+void iavf_irq_enable_queues(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i - 1)) {
- wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
- IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
- }
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
@@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
struct iavf_hw *hw = &adapter->hw;
iavf_misc_irq_enable(adapter);
- iavf_irq_enable_queues(adapter, ~0);
+ iavf_irq_enable_queues(adapter);
if (flush)
iavf_flush(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
index bf793332fc9d..a19e88898a0b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
@@ -40,7 +40,7 @@
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 9afbbdac3590..7c0578b5457b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
-
iavf_set_queue_vlan_tag_loc(adapter);
was_mac_changed = !ether_addr_equal(netdev->dev_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0157f6e98d3e..eb2dc0983776 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5160,7 +5160,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*/
int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8ba5f935a092..81961a7d6598 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_sq_cd *cd);
int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index c6d4926f0fcf..850db8e0e6b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
- first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+ first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
- first->tx_flags |= (skb->priority & 0x7) <<
- ICE_TX_FLAGS_VLAN_PR_S;
+ first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 2ea8a2b11bcd..75c9de675f20 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -16,8 +16,8 @@
* * number of bytes written - success
* * negative - error code
*/
-static unsigned int
-ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+static int
+ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size)
{
struct ice_aqc_link_topo_addr link_topo;
struct ice_hw *hw = &pf->hw;
@@ -72,39 +72,7 @@ err_out:
dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
offset, size, err);
- return offset;
-}
-
-/**
- * ice_gnss_write_pending - Write all pending data to internal GNSS
- * @work: GNSS write work structure
- */
-static void ice_gnss_write_pending(struct kthread_work *work)
-{
- struct gnss_serial *gnss = container_of(work, struct gnss_serial,
- write_work);
- struct ice_pf *pf = gnss->back;
-
- if (!pf)
- return;
-
- if (!test_bit(ICE_FLAG_GNSS, pf->flags))
- return;
-
- if (!list_empty(&gnss->queue)) {
- struct gnss_write_buf *write_buf = NULL;
- unsigned int bytes;
-
- write_buf = list_first_entry(&gnss->queue,
- struct gnss_write_buf, queue);
-
- bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
- dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
-
- list_del(&write_buf->queue);
- kfree(write_buf->buf);
- kfree(write_buf);
- }
+ return err;
}
/**
@@ -128,12 +96,7 @@ static void ice_gnss_read(struct kthread_work *work)
int err = 0;
pf = gnss->back;
- if (!pf) {
- err = -EFAULT;
- goto exit;
- }
-
- if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ if (!pf || !test_bit(ICE_FLAG_GNSS, pf->flags))
return;
hw = &pf->hw;
@@ -191,7 +154,6 @@ free_buf:
free_page((unsigned long)buf);
requeue:
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay);
-exit:
if (err)
dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
}
@@ -220,8 +182,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- INIT_LIST_HEAD(&gnss->queue);
- kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
@@ -281,7 +241,6 @@ static void ice_gnss_close(struct gnss_device *gdev)
if (!gnss)
return;
- kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
}
@@ -300,10 +259,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
size_t count)
{
struct ice_pf *pf = gnss_get_drvdata(gdev);
- struct gnss_write_buf *write_buf;
struct gnss_serial *gnss;
- unsigned char *cmd_buf;
- int err = count;
/* We cannot write a single byte using our I2C implementation. */
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
@@ -319,24 +275,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
if (!gnss)
return -ENODEV;
- cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
- if (!cmd_buf)
- return -ENOMEM;
-
- memcpy(cmd_buf, buf, count);
- write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
- if (!write_buf) {
- kfree(cmd_buf);
- return -ENOMEM;
- }
-
- write_buf->buf = cmd_buf;
- write_buf->size = count;
- INIT_LIST_HEAD(&write_buf->queue);
- list_add_tail(&write_buf->queue, &gnss->queue);
- kthread_queue_work(gnss->kworker, &gnss->write_work);
-
- return err;
+ return ice_gnss_do_write(pf, buf, count);
}
static const struct gnss_operations ice_gnss_ops = {
@@ -432,7 +371,6 @@ void ice_gnss_exit(struct ice_pf *pf)
if (pf->gnss_serial) {
struct gnss_serial *gnss = pf->gnss_serial;
- kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
kthread_destroy_worker(gnss->kworker);
gnss->kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index b8bb8b63d081..75e567ad7059 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -22,26 +22,16 @@
*/
#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
-struct gnss_write_buf {
- struct list_head queue;
- unsigned int size;
- unsigned char *buf;
-};
-
/**
* struct gnss_serial - data used to initialize GNSS TTY port
* @back: back pointer to PF
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
- * @write_work: write_work function for handling GNSS writes
- * @queue: write buffers queue
*/
struct gnss_serial {
struct ice_pf *back;
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
- struct kthread_work write_work;
- struct list_head queue;
};
#if IS_ENABLED(CONFIG_GNSS)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 450317dfcca7..11ae0e41f518 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2745,6 +2745,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ vsi->stat_offsets_loaded = false;
+
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@@ -2793,6 +2795,9 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
goto unroll_vector_base;
+
+ vsi->stat_offsets_loaded = false;
+
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index a1f7c8edc22f..42c318ceff61 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4802,9 +4802,13 @@ err_init_pf:
static void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
- ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
+
+ /* Service task is already stopped, so call reset directly. */
+ ice_reset(&pf->hw, ICE_RESET_PFR);
+ pci_wait_for_pending_transaction(pf->pdev);
+ ice_clear_interrupt_scheme(pf);
}
static void ice_init_features(struct ice_pf *pf)
@@ -5094,10 +5098,6 @@ int ice_load(struct ice_pf *pf)
struct ice_vsi *vsi;
int err;
- err = ice_reset(&pf->hw, ICE_RESET_PFR);
- if (err)
- return err;
-
err = ice_init_dev(pf);
if (err)
return err;
@@ -5354,12 +5354,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- /* Issue a PFR as part of the prescribed driver unload flow. Do not
- * do it via ice_schedule_reset() since there is no need to rebuild
- * and the service task is already stopped.
- */
- ice_reset(&pf->hw, ICE_RESET_PFR);
- pci_wait_for_pending_transaction(pdev);
pci_disable_device(pdev);
}
@@ -7056,6 +7050,10 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_for_each_xdp_txq(vsi, i)
+ ice_clean_tx_ring(vsi->xdp_rings[i]);
+
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index f1dca59bd844..588ad8696756 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1171,7 +1171,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
if (!vf)
return -EINVAL;
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1286,7 +1286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto out_put_vf;
}
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1340,7 +1340,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
return -EOPNOTSUPP;
}
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1653,7 +1653,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (!vf)
return -EINVAL;
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4fcf2d07eb85..52d0a126eb61 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1152,11 +1152,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
+ u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
u32 cnt = rx_ring->count;
- u32 cached_ntc = ntc;
u32 xdp_xmit = 0;
u32 cached_ntu;
bool failure;
@@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
- td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ td_tag = first->vid;
}
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
* VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
- first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+ first->vid = skb_vlan_tag_get(skb);
if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
@@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_IL2TAG2 <<
ICE_TXD_CTX_QW1_CMD_S));
- offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ offload.cd_l2tag2 = first->vid;
}
/* set up TSO offload */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fff0efe28373..166413fc33f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -127,10 +127,6 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
-#define ICE_TX_FLAGS_VLAN_M 0xffff0000
-#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
-#define ICE_TX_FLAGS_VLAN_PR_S 29
-#define ICE_TX_FLAGS_VLAN_S 16
#define ICE_XDP_PASS 0
#define ICE_XDP_CONSUMED BIT(0)
@@ -182,8 +178,9 @@ struct ice_tx_buf {
unsigned int gso_segs;
unsigned int nr_frags; /* used for mbuf XDP */
};
- u32 type:16; /* &ice_tx_buf_type */
- u32 tx_flags:16;
+ u32 tx_flags:12;
+ u32 type:4; /* &ice_tx_buf_type */
+ u32 vid:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 89fd6982df09..bf74a2f3a4f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -186,6 +186,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
}
/**
+ * ice_check_vf_ready_for_reset - check if VF is ready to be reset
+ * @vf: VF to check if it's ready to be reset
+ *
+ * The purpose of this function is to ensure that the VF is not in reset,
+ * disabled, and is both initialized and active, thus enabling us to safely
+ * initialize another reset.
+ */
+int ice_check_vf_ready_for_reset(struct ice_vf *vf)
+{
+ int ret;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+/**
* ice_trigger_vf_reset - Reset a VF on HW
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index e3cda6fb71ab..a38ef00a3679 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+int ice_check_vf_ready_for_reset(struct ice_vf *vf);
void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 97243c616d5d..f4a524f80b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3955,6 +3955,7 @@ error_handler:
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 205d577bdbba..caf91c6f52b4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
- u8 bit_shift = 0;
+ u8 bit_shift = 1;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
- while (hash_mask >> bit_shift != 0xFF)
+ while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
bit_shift++;
/* The portion of the address that is used for the hash table
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7d60da1b7bf4..319ed601eaa1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
*/
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto out;
}
/* Device's eeprom is always little-endian, word addressable */
@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
hw->nvm.ops.update(hw);
igb_set_fw_version(adapter);
+out:
kfree(eeprom_buff);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 58872a4c2540..bb3db387d49c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6947,6 +6947,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec64 ts;
+ unsigned long flags;
if (pin < 0 || pin >= IGB_N_SDP)
return;
@@ -6954,9 +6955,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
- s64 ns = rd32(auxstmpl);
+ u64 ns = rd32(auxstmpl);
- ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
+ ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, ns);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ts = ns_to_timespec64(ns);
} else {
ts.tv_nsec = rd32(auxstmpl);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 1c4676882082..fa764190f270 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* Zero out the buffer ring */
+ memset(tx_ring->tx_buffer_info, 0,
+ sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
*/
void igc_free_tx_resources(struct igc_ring *tx_ring)
{
- igc_clean_tx_ring(tx_ring);
+ igc_disable_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
@@ -6723,6 +6730,9 @@ static void igc_remove(struct pci_dev *pdev)
igc_ptp_stop(adapter);
+ pci_disable_ptm(pdev);
+ pci_clear_master(pdev);
+
set_bit(__IGC_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5d83c887a3fc..1726297f2e0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1256,7 +1256,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
ixgbe_desc_unused(tx_ring),
TX_WAKE_THRESHOLD,
- netif_carrier_ok(tx_ring->netdev) &&
+ !netif_carrier_ok(tx_ring->netdev) ||
test_bit(__IXGBE_DOWN, &adapter->state)))
++tx_ring->tx_stats.restart_queue;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index e1853da280f9..43eb6e871351 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -981,6 +981,9 @@ int octep_device_setup(struct octep_device *oct)
oct->mmio[i].hw_addr =
ioremap(pci_resource_start(oct->pdev, i * 2),
pci_resource_len(oct->pdev, i * 2));
+ if (!oct->mmio[i].hw_addr)
+ goto unmap_prev;
+
oct->mmio[i].mapped = 1;
}
@@ -1015,7 +1018,9 @@ int octep_device_setup(struct octep_device *oct)
return 0;
unsupported_dev:
- for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
+ i = OCTEP_MMIO_REGIONS;
+unmap_prev:
+ while (i--)
iounmap(oct->mmio[i].hw_addr);
kfree(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4ad707e758b9..f01d057ad025 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
@@ -4080,10 +4081,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
{
- /* CN10k supports 72KB FIFO size and max packet size of 64k */
- if (rvu->hw->lbk_bufsize == 0x12000)
- return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
-
return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index 51209119f0f2..9f11c1e40737 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -1164,10 +1164,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
{
struct npc_exact_table *table;
u16 *cnt, old_cnt;
- bool promisc;
table = rvu->hw->table;
- promisc = table->promisc_mode[drop_mcam_idx];
cnt = &table->cnt_cmd_rules[drop_mcam_idx];
old_cnt = *cnt;
@@ -1179,16 +1177,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
*enable_or_disable_cam = false;
- if (promisc)
- goto done;
-
- /* If all rules are deleted and not already in promisc mode; disable cam */
+ /* If all rules are deleted, disable cam */
if (!*cnt && val < 0) {
*enable_or_disable_cam = true;
goto done;
}
- /* If rule got added and not already in promisc mode; enable cam */
+ /* If rule got added, enable cam */
if (!old_cnt && val > 0) {
*enable_or_disable_cam = true;
goto done;
@@ -1443,7 +1438,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
u32 drop_mcam_idx;
bool *promisc;
bool rc;
- u32 cnt;
table = rvu->hw->table;
@@ -1466,17 +1460,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
return LMAC_AF_ERR_INVALID_PARAM;
}
*promisc = false;
- cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
mutex_unlock(&table->lock);
- /* If no dmac filter entries configured, disable drop rule */
- if (!cnt)
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
- else
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
-
- dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
- __func__, cgx_id, lmac_id, cnt);
return 0;
}
@@ -1494,7 +1479,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
u32 drop_mcam_idx;
bool *promisc;
bool rc;
- u32 cnt;
table = rvu->hw->table;
@@ -1517,17 +1501,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
return LMAC_AF_ERR_INVALID_PARAM;
}
*promisc = true;
- cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
mutex_unlock(&table->lock);
- /* If no dmac filter entries configured, disable drop rule */
- if (!cnt)
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
- else
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
-
- dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
- __func__, cgx_id, lmac_id, cnt);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7045fedfd73a..7af223b0a37f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
htons(ext->lso_sb - skb_network_offset(skb));
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
ext->lso_format = pfvf->hw.lso_tsov6_idx;
-
- ipv6_hdr(skb)->payload_len =
- htons(ext->lso_sb - skb_network_offset(skb));
+ ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
__be16 l3_proto = vlan_get_protocol(skb);
struct udphdr *udph = udp_hdr(skb);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a75fd072082c..834c644b67db 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev)
eth->dsa_meta[i] = md_dst;
}
} else {
- /* Hardware special tag parsing needs to be disabled if at least
- * one MAC does not use DSA.
+ /* Hardware DSA untagging and VLAN RX offloading need to be
+ * disabled if at least one MAC does not use DSA.
*/
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
val &= ~MTK_CDMP_STAG_EN;
mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
- val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
- val &= ~MTK_CDMQ_STAG_EN;
- mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
-
mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d53de39539a8..d532883b42d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err)
{
+ const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
- if (!err)
+ if (!err || !(strcmp(namep, "unknown command opcode")))
return;
stats = &dev->cmd.stats[opcode];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f40497823e65..7c0f2adbea00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -490,7 +490,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
(u64)timestamp_low;
break;
default:
- if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+ if (tracer_event->event_id >= tracer->str_db.first_string_trace &&
tracer_event->event_id <= tracer->str_db.first_string_trace +
tracer->str_db.num_string_trace) {
tracer_event->type = TRACER_EVENT_TYPE_STRING;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b8987a404d75..8e999f238194 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -327,6 +327,7 @@ struct mlx5e_params {
unsigned int sw_mtu;
int hard_mtu;
bool ptp_rx;
+ __be32 terminate_lkey_be;
};
static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 7ac1ad9c46de..7e8e96cc5cd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -51,7 +51,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
port_buffer->buffer[i].lossy =
MLX5_GET(bufferx_reg, buffer, lossy);
@@ -73,14 +73,24 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->buffer[i].lossy);
}
- port_buffer->headroom_size = total_used;
+ port_buffer->internal_buffers_size = 0;
+ for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) {
+ buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
+ port_buffer->internal_buffers_size +=
+ MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
+ }
+
port_buffer->port_buffer_size =
MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
- port_buffer->spare_buffer_size =
- port_buffer->port_buffer_size - total_used;
-
- mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n",
- port_buffer->port_buffer_size,
+ port_buffer->headroom_size = total_used;
+ port_buffer->spare_buffer_size = port_buffer->port_buffer_size -
+ port_buffer->internal_buffers_size -
+ port_buffer->headroom_size;
+
+ mlx5e_dbg(HW, priv,
+ "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
+ port_buffer->port_buffer_size, port_buffer->headroom_size,
+ port_buffer->internal_buffers_size,
port_buffer->spare_buffer_size);
out:
kfree(out);
@@ -206,11 +216,11 @@ static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
return 0;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
lossless_buff_count += ((port_buffer->buffer[i].size) &&
(!(port_buffer->buffer[i].lossy)));
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
err = mlx5e_port_set_sbcm(mdev, 0, i,
MLX5_INGRESS_DIR,
@@ -293,7 +303,7 @@ static int port_set_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
@@ -351,7 +361,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
{
int i;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
if (port_buffer->buffer[i].lossy) {
port_buffer->buffer[i].xoff = 0;
port_buffer->buffer[i].xon = 0;
@@ -408,7 +418,7 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev,
int err;
int i;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
prio_count = 0;
lossy_count = 0;
@@ -432,11 +442,11 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev,
}
if (changed) {
- err = port_update_pool_cfg(mdev, port_buffer);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
- err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
+ err = port_update_pool_cfg(mdev, port_buffer);
if (err)
return err;
@@ -515,7 +525,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
__func__, i, prio2buffer[i]);
@@ -530,7 +540,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
}
if (change & MLX5E_PORT_BUFFER_SIZE) {
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
@@ -544,7 +554,9 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
- if (total_used > port_buffer.port_buffer_size)
+ if (total_used > port_buffer.headroom_size &&
+ (total_used - port_buffer.headroom_size) >
+ port_buffer.spare_buffer_size)
return -EINVAL;
update_buffer = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index a6ef118de758..f4a19ffbb641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -35,7 +35,8 @@
#include "en.h"
#include "port.h"
-#define MLX5E_MAX_BUFFER 8
+#define MLX5E_MAX_NETWORK_BUFFER 8
+#define MLX5E_TOTAL_BUFFERS 10
#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
@@ -60,8 +61,9 @@ struct mlx5e_bufferx_reg {
struct mlx5e_port_buffer {
u32 port_buffer_size;
u32 spare_buffer_size;
- u32 headroom_size;
- struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER];
+ u32 headroom_size; /* Buffers 0-7 */
+ u32 internal_buffers_size; /* Buffers 8-9 */
+ struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
};
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index eb5abd0e55d9..3cbebfba582b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */
wmb();
+ mlx5e_txqsq_wake(&ptpsq->txqsq);
+
return work_done == budget;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index fc923a99b6a4..0380a04c3691 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -84,7 +84,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action,
+ struct flow_action *flow_action, int from, int to,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type)
{
@@ -96,6 +96,11 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
priv = parse_state->flow->priv;
flow_action_for_each(i, act, flow_action) {
+ if (i < from)
+ continue;
+ else if (i > to)
+ break;
+
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act || !tc_act->post_parse)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 0e6e1872ac62..d6c12d0ea55b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -112,7 +112,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action,
+ struct flow_action *flow_action, int from, int to,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 20c2d2ecaf93..f0c3464f037f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -492,6 +492,19 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
mlx5e_encap_dealloc(priv, e);
}
+static void mlx5e_encap_put_locked(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
+ if (!refcount_dec_and_test(&e->refcnt))
+ return;
+ list_del(&e->route_list);
+ hash_del_rcu(&e->encap_hlist);
+ mlx5e_encap_dealloc(priv, e);
+}
+
static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -816,6 +829,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
uintptr_t hash_key;
int err = 0;
+ lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
mpls_info = &parse_attr->mpls_info[out_index];
@@ -829,7 +844,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
hash_key = hash_encap_info(&key);
- mutex_lock(&esw->offloads.encap_tbl_lock);
e = mlx5e_encap_get(priv, &key, hash_key);
/* must verify if encap is valid or not */
@@ -840,15 +854,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err;
}
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- wait_for_completion(&e->res_ready);
-
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
- if (e->compl_result < 0) {
- err = -EREMOTEIO;
- goto out_err;
- }
goto attach_flow;
}
@@ -877,15 +882,12 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
INIT_LIST_HEAD(&e->flows);
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
if (family == AF_INET)
err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
complete_all(&e->res_ready);
if (err) {
e->compl_result = err;
@@ -920,18 +922,15 @@ attach_flow:
} else {
flow_flag_set(flow, SLOW);
}
- mutex_unlock(&esw->offloads.encap_tbl_lock);
return err;
out_err:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
if (e)
- mlx5e_encap_put(priv, e);
+ mlx5e_encap_put_locked(priv, e);
return err;
out_err_init:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
kfree(tun_info);
kfree(e);
return err;
@@ -1016,6 +1015,93 @@ out_err:
return err;
}
+int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *vf_tun)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct net_device *encap_dev = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch *esw;
+ int out_index;
+ int err = 0;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return 0;
+
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
+ int mirred_ifindex;
+
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto out;
+ }
+ err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
+ extack, &encap_dev);
+ dev_put(out_dev);
+ if (err)
+ goto out;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
+ }
+
+ if (*vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return err;
+}
+
+void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ int out_index;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ esw_attr = attr->esw_attr;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mlx5e_detach_encap(flow->priv, flow, attr, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
+}
+
static int cmp_route_info(struct mlx5e_route_key *a,
struct mlx5e_route_key *b)
{
@@ -1369,11 +1455,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
list_for_each_entry(flow, encap_flows, tmp_list) {
- struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
if (!mlx5e_is_offloaded_flow(flow))
continue;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
if (flow_flag_test(flow, SLOW))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index 8ad273dde40e..5d7d67687cbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -30,6 +30,15 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
+int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *vf_tun);
+void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
+
struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info);
int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 47381e949f1f..879d698b6119 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
+
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 1f90594499c6..41c396e76457 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -150,10 +150,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- goto out;
- }
+ if (!in)
+ return -ENOMEM;
if (enable_uc_lb)
lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
@@ -171,14 +169,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
- goto out;
+ break;
}
+ mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
-out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
- mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 89de92d06483..ebee52a8361a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -926,9 +926,10 @@ static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
if (err)
return err;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
- dcb_buffer->total_size = port_buffer.port_buffer_size;
+ dcb_buffer->total_size = port_buffer.port_buffer_size -
+ port_buffer.internal_buffers_size;
return 0;
}
@@ -970,7 +971,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (err)
return err;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
changed |= MLX5E_PORT_BUFFER_SIZE;
buffer_size = dcb_buffer->buffer_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2944691f06ad..a7c526ee5024 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -727,26 +727,6 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
mlx5e_rq_shampo_hd_free(rq);
}
-static __be32 mlx5e_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
- int res;
-
- if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
- return MLX5_TERMINATE_SCATTER_LIST_LKEY;
-
- MLX5_SET(query_special_contexts_in, in, opcode,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- res = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
- if (res)
- return MLX5_TERMINATE_SCATTER_LIST_LKEY;
-
- res = MLX5_GET(query_special_contexts_out, out,
- terminate_scatter_list_mkey);
- return cpu_to_be32(res);
-}
-
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
@@ -908,7 +888,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
/* check if num_frags is not a pow of two */
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
wqe->data[f].byte_count = 0;
- wqe->data[f].lkey = mlx5e_get_terminate_scatter_list_mkey(mdev);
+ wqe->data[f].lkey = params->terminate_lkey_be;
wqe->data[f].addr = 0;
}
}
@@ -5007,6 +4987,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
+
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
@@ -5279,12 +5261,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
+ priv->dfs_root = debugfs_create_dir("nic",
+ mlx5_debugfs_get_dev_root(mdev));
+
fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state),
priv->dfs_root);
if (!fs) {
err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+ debugfs_remove_recursive(priv->dfs_root);
return err;
}
priv->fs = fs;
@@ -5305,6 +5291,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
+ debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
}
@@ -5851,8 +5838,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
}
static int
-mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *new_profile, void *new_ppriv)
+mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -5868,6 +5855,25 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
err = new_profile->init(priv->mdev, priv->netdev);
if (err)
goto priv_cleanup;
+
+ return 0;
+
+priv_cleanup:
+ mlx5e_priv_cleanup(priv);
+ return err;
+}
+
+static int
+mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
+ if (err)
+ return err;
+
err = mlx5e_attach_netdev(priv);
if (err)
goto profile_cleanup;
@@ -5875,7 +5881,6 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
profile_cleanup:
new_profile->cleanup(priv);
-priv_cleanup:
mlx5e_priv_cleanup(priv);
return err;
}
@@ -5894,6 +5899,12 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
priv->profile->cleanup(priv);
mlx5e_priv_cleanup(priv);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ return -EIO;
+ }
+
err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
if (err) { /* roll back to original profile */
netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
@@ -5955,8 +5966,11 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (!netif_device_present(netdev))
+ if (!netif_device_present(netdev)) {
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5e_destroy_mdev_resources(mdev);
return -ENODEV;
+ }
mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
@@ -6002,9 +6016,6 @@ static int mlx5e_probe(struct auxiliary_device *adev,
priv->profile = profile;
priv->ppriv = NULL;
- priv->dfs_root = debugfs_create_dir("nic",
- mlx5_debugfs_get_dev_root(priv->mdev));
-
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
@@ -6033,7 +6044,6 @@ err_resume:
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
- debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
err_devlink_port_unregister:
mlx5e_devlink_port_unregister(mlx5e_dev);
@@ -6053,7 +6063,6 @@ static void mlx5e_remove(struct auxiliary_device *adev)
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
- debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 1fc386eccaf8..3e7041bd5705 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
@@ -812,11 +813,15 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->dfs_root = debugfs_create_dir("nic",
+ mlx5_debugfs_get_dev_root(mdev));
+
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state),
priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
+ debugfs_remove_recursive(priv->dfs_root);
return -ENOMEM;
}
@@ -829,6 +834,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
+ debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 728b82ce4031..8a5a8703f0a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
{
struct mlx5e_priv *out_priv, *route_priv;
- struct mlx5_devcom *devcom = NULL;
struct mlx5_core_dev *route_mdev;
struct mlx5_eswitch *esw;
u16 vhca_id;
- int err;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
if (mlx5_lag_is_active(out_priv->mdev)) {
+ struct mlx5_devcom *devcom;
+ int err;
+
/* In lag case we may get devices from different eswitch instances.
* If we failed to get vport num, it means, mostly, that we on the wrong
* eswitch.
@@ -1686,101 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
if (err != -ENOENT)
return err;
+ rcu_read_lock();
devcom = out_priv->mdev->priv.devcom;
- esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!esw)
- return -ENODEV;
- }
-
- err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
- if (devcom)
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- return err;
-}
-
-static int
-set_encap_dests(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr,
- struct netlink_ext_ack *extack,
- bool *vf_tun)
-{
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_esw_flow_attr *esw_attr;
- struct net_device *encap_dev = NULL;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *out_priv;
- int out_index;
- int err = 0;
-
- if (!mlx5e_is_eswitch_flow(flow))
- return 0;
-
- parse_attr = attr->parse_attr;
- esw_attr = attr->esw_attr;
- *vf_tun = false;
-
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- struct net_device *out_dev;
- int mirred_ifindex;
-
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
- if (!out_dev) {
- NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
- err = -ENODEV;
- goto out;
- }
- err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
- extack, &encap_dev);
- dev_put(out_dev);
- if (err)
- goto out;
+ esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
+ rcu_read_unlock();
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- *vf_tun = true;
-
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
- esw_attr->dests[out_index].mdev = out_priv->mdev;
- }
-
- if (*vf_tun && esw_attr->out_count > 1) {
- NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
- err = -EOPNOTSUPP;
- goto out;
+ return err;
}
-out:
- return err;
-}
-
-static void
-clean_encap_dests(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr)
-{
- struct mlx5_esw_flow_attr *esw_attr;
- int out_index;
-
- if (!mlx5e_is_eswitch_flow(flow))
- return;
-
- esw_attr = attr->esw_attr;
-
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mlx5e_detach_encap(priv, flow, attr, out_index);
- kfree(attr->parse_attr->tun_info[out_index]);
- }
+ return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
}
static int
@@ -1819,7 +1735,7 @@ post_process_attr(struct mlx5e_tc_flow *flow,
if (err)
goto err_out;
- err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+ err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
@@ -3943,8 +3859,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *prev_attr;
struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
+ int err, i, i_split = 0;
bool is_missable;
- int err, i;
ns_type = mlx5e_get_flow_namespace(flow);
list_add(&attr->list, &flow->attrs);
@@ -3985,7 +3901,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
i < flow_action->num_entries - 1)) {
is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
- err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr,
+ ns_type);
if (err)
goto out_free_post_acts;
@@ -3995,6 +3912,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
goto out_free_post_acts;
}
+ i_split = i + 1;
list_add(&attr->list, &flow->attrs);
}
@@ -4009,7 +3927,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
}
}
- err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type);
if (err)
goto out_free_post_acts;
@@ -4323,7 +4241,7 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a
if (attr->post_act_handle)
mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
- clean_encap_dests(flow->priv, flow, attr);
+ mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(counter_dev, attr->counter);
@@ -5301,6 +5219,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
+ mlx5_esw_offloads_devcom_init(esw);
+
return 0;
err_action_counter:
@@ -5329,7 +5249,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
- mlx5e_tc_clean_fdb_peer_flows(esw);
+ mlx5_esw_offloads_devcom_cleanup(esw);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
@@ -5643,22 +5563,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
0, NULL);
}
+static struct mapping_ctx *
+mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc;
+ struct mlx5_eswitch *esw;
+ struct mapping_ctx *ctx;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ esw = priv->mdev->priv.eswitch;
+ ctx = esw->offloads.reg_c0_obj_pool;
+ } else {
+ tc = mlx5e_fs_get_tc(priv->fs);
+ ctx = tc->mapping;
+ }
+
+ return ctx;
+}
+
int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u64 act_miss_cookie, u32 *act_miss_mapping)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_mapped_obj mapped_obj = {};
+ struct mlx5_eswitch *esw;
struct mapping_ctx *ctx;
int err;
- ctx = esw->offloads.reg_c0_obj_pool;
-
+ ctx = mlx5e_get_priv_obj_mapping(priv);
mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
mapped_obj.act_miss_cookie = act_miss_cookie;
err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
if (err)
return err;
+ if (!is_mdev_switchdev_mode(priv->mdev))
+ return 0;
+
+ esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
if (IS_ERR(attr->act_id_restore_rule))
goto err_rule;
@@ -5673,10 +5614,9 @@ err_rule:
void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u32 act_miss_mapping)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mapping_ctx *ctx;
+ struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
- ctx = esw->offloads.reg_c0_obj_pool;
- mlx5_del_flow_rules(attr->act_id_restore_rule);
+ if (is_mdev_switchdev_mode(priv->mdev))
+ mlx5_del_flow_rules(attr->act_id_restore_rule);
mapping_remove(ctx, act_miss_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index df5e780e8e6a..c7eb6b238c2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
}
}
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
+{
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+ mlx5e_ptpsq_fifo_has_room(sq) &&
+ !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats->wake++;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq_stats *stats;
@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
- if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
- mlx5e_ptpsq_fifo_has_room(sq) &&
- !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
- netif_tx_wake_queue(sq->txq);
- stats->wake++;
- }
+ mlx5e_txqsq_wake(sq);
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a50bfda18e96..fbb2d963fb7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
}
}
+ /* budget=0 means we may be in IRQ context, do as little as possible */
+ if (unlikely(!budget))
+ goto out;
+
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
- if (likely(budget)) { /* budget=0 means: don't poll rx rings */
- if (xsk_open)
- work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
+ if (xsk_open)
+ work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
- if (likely(budget - work_done))
- work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
+ if (likely(budget - work_done))
+ work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
- busy |= work_done == budget;
- }
+ busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1c35d721a31d..3db4866d7880 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -824,7 +824,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
if (!cpus)
- ret = -ENOMEM;
+ return -ENOMEM;
i = 0;
rcu_read_lock();
@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
- mlx5_irq_table_destroy(dev);
+ mlx5_irq_table_free_irqs(dev);
mutex_unlock(&table->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 1a042c981713..add6cfa432a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -342,6 +342,7 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
+ bool paired[MLX5_MAX_PORTS];
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 69215ffb9999..8d19c20d3447 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
+ if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+ break;
+
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
if (err)
goto err_pair;
+ esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
+ peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
- if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
break;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
+ peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
mlx5_esw_offloads_unpair(peer_esw);
mlx5_esw_offloads_unpair(esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
@@ -2779,7 +2786,7 @@ err_out:
return err;
}
-static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
ESW_OFFLOADS_DEVCOM_PAIR, esw);
}
-static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vports;
- esw_offloads_devcom_init(esw);
-
return 0;
err_vports:
@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
- esw_offloads_devcom_cleanup(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index adefde3ea941..b7d779d08d83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
+#include "mlx5_core.h"
static LIST_HEAD(devcom_list);
@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list);
struct mlx5_devcom_component {
struct {
- void *data;
+ void __rcu *data;
} device[MLX5_DEVCOM_PORTS_SUPPORTED];
mlx5_devcom_event_handler_t handler;
@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
+ mlx5_dev_list_lock();
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL;
@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!priv) {
priv = mlx5_devcom_list_alloc();
- if (!priv)
- return ERR_PTR(-ENOMEM);
+ if (!priv) {
+ devcom = ERR_PTR(-ENOMEM);
+ goto out;
+ }
idx = 0;
new_priv = true;
@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
priv->devs[idx] = dev;
devcom = mlx5_devcom_alloc(priv, idx);
if (!devcom) {
- kfree(priv);
- return ERR_PTR(-ENOMEM);
+ if (new_priv)
+ kfree(priv);
+ devcom = ERR_PTR(-ENOMEM);
+ goto out;
}
if (new_priv)
list_add(&priv->list, &devcom_list);
-
+out:
+ mlx5_dev_list_unlock();
return devcom;
}
@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
if (IS_ERR_OR_NULL(devcom))
return;
+ mlx5_dev_list_lock();
priv = devcom->priv;
priv->devs[devcom->idx] = NULL;
@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
break;
if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
- return;
+ goto out;
list_del(&priv->list);
kfree(priv);
+out:
+ mlx5_dev_list_unlock();
}
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
comp->handler = handler;
- comp->device[devcom->idx].data = data;
+ rcu_assign_pointer(comp->device[devcom->idx].data, data);
up_write(&comp->sem);
}
@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
- comp->device[devcom->idx].data = NULL;
+ RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
up_write(&comp->sem);
+ synchronize_rcu();
}
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
- if (i != devcom->idx && comp->device[i].data) {
- err = comp->handler(event, comp->device[i].data,
- event_data);
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
+ void *data = rcu_dereference_protected(comp->device[i].data,
+ lockdep_is_held(&comp->sem));
+
+ if (i != devcom->idx && data) {
+ err = comp->handler(event, data, event_data);
break;
}
+ }
up_write(&comp->sem);
return err;
@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem));
- comp->paired = paired;
+ WRITE_ONCE(comp->paired, paired);
}
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
if (IS_ERR_OR_NULL(devcom))
return false;
- return devcom->priv->components[id].paired;
+ return READ_ONCE(devcom->priv->components[id].paired);
}
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_read(&comp->sem);
- if (!comp->paired) {
+ if (!READ_ONCE(comp->paired)) {
up_read(&comp->sem);
return NULL;
}
@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
if (i != devcom->idx)
break;
- return comp->device[i].data;
+ return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
+}
+
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp;
+ int i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return NULL;
+
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
+ if (i != devcom->idx)
+ break;
+
+ comp = &devcom->priv->components[id];
+ /* This can change concurrently, however 'data' pointer will remain
+ * valid for the duration of RCU read section.
+ */
+ if (!READ_ONCE(comp->paired))
+ return NULL;
+
+ return rcu_dereference(comp->device[i].data);
}
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 94313c18bb64..9a496f4722da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 995eb2d5ace0..d6ee016deae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -923,7 +923,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
return 0;
err_clr_master:
@@ -1049,7 +1048,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
- mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+ mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
@@ -1155,6 +1154,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
goto err_cmd_cleanup;
}
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
mlx5_start_health_poll(dev);
@@ -1802,15 +1802,16 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
- * fw_reset before unregistering the devlink.
+ /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
+ * devlink notify APIs.
+ * Hence, we must drain them before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
+ mlx5_drain_health_wq(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_thermal_uninit(dev);
mlx5_crdump_disable(dev);
- mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 1d879374acaa..229520405d4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -276,18 +276,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
return pci_num_vf(dev->pdev) ? true : false;
}
-static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
-{
- /* LACP owner conditions:
- * 1) Function is physical.
- * 2) LAG is supported by FW.
- * 3) LAG is managed by driver (currently the only option).
- */
- return MLX5_CAP_GEN(dev, vport_group_manager) &&
- (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
- MLX5_CAP_GEN(dev, lag_master);
-}
-
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index efd0c299c5c7..aa403a5ea34e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 9d735c343a3b..678f0be81375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
@@ -122,3 +123,23 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
return mlx5_cmd_exec_in(dev, destroy_psv, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
+
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ u32 mkey;
+
+ if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
+ return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ if (mlx5_cmd_exec_inout(dev, query_special_contexts, in, out))
+ return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+ mkey = MLX5_GET(query_special_contexts_out, out,
+ terminate_scatter_list_mkey);
+ return cpu_to_be32(mkey);
+}
+EXPORT_SYMBOL(mlx5_core_get_terminate_scatter_list_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 2245d3b2f393..843da89a9035 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -32,6 +32,7 @@ struct mlx5_irq {
struct mlx5_irq_pool *pool;
int refcount;
struct msi_map map;
+ u32 pool_index;
};
struct mlx5_irq_table {
@@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq)
struct cpu_rmap *rmap;
#endif
- xa_erase(&pool->irqs, irq->map.index);
+ xa_erase(&pool->irqs, irq->pool_index);
/* free_irq requires that affinity_hint and rmap will be cleared before
* calling it. To satisfy this requirement, we call
* irq_cpu_rmap_remove() to remove the notifier
@@ -140,7 +141,7 @@ static void irq_release(struct mlx5_irq *irq)
irq_update_affinity_hint(irq->map.virq, NULL);
#ifdef CONFIG_RFS_ACCEL
rmap = mlx5_eq_table_get_rmap(pool->dev);
- if (rmap && irq->map.index)
+ if (rmap)
irq_cpu_rmap_remove(rmap, irq->map.virq);
#endif
@@ -231,12 +232,13 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
if (!irq)
return ERR_PTR(-ENOMEM);
if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
- /* The vector at index 0 was already allocated.
- * Just get the irq number. If dynamic irq is not supported
- * vectors have also been allocated.
+ /* The vector at index 0 is always statically allocated. If
+ * dynamic irq is not supported all vectors are statically
+ * allocated. In both cases just get the irq number and set
+ * the index.
*/
irq->map.virq = pci_irq_vector(dev->pdev, i);
- irq->map.index = 0;
+ irq->map.index = i;
} else {
irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc);
if (!irq->map.virq) {
@@ -276,11 +278,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
}
irq->pool = pool;
irq->refcount = 1;
- irq->map.index = i;
- err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL));
+ irq->pool_index = i;
+ err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
if (err) {
mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
- irq->map.index, err);
+ irq->pool_index, err);
goto err_xa;
}
return irq;
@@ -567,13 +569,13 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
struct mlx5_irq *irq;
int i;
- af_desc.is_managed = 1;
+ af_desc.is_managed = false;
for (i = 0; i < nirqs; i++) {
+ cpumask_clear(&af_desc.mask);
cpumask_set_cpu(cpus[i], &af_desc.mask);
irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap);
if (IS_ERR(irq))
break;
- cpumask_clear(&af_desc.mask);
irqs[i] = irq;
}
@@ -691,6 +693,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
irq_pool_free(table->pcif_pool);
}
+static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
+{
+ struct mlx5_irq *irq;
+ unsigned long index;
+
+ xa_for_each(&pool->irqs, index, irq)
+ free_irq(irq->map.virq, &irq->nh);
+}
+
+static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
+{
+ if (table->sf_ctrl_pool) {
+ mlx5_irq_pool_free_irqs(table->sf_comp_pool);
+ mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
+ }
+ mlx5_irq_pool_free_irqs(table->pcif_pool);
+}
+
/* irq_table API */
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
@@ -774,6 +794,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
pci_free_irq_vectors(dev->pdev);
}
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_table *table = dev->priv.irq_table;
+
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_irq_pools_free_irqs(table);
+ pci_free_irq_vectors(dev->pdev);
+}
+
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{
if (table->sf_comp_pool)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index e2f26d0bc615..0692363cf80e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -63,6 +63,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ mlx5_drain_health_wq(sf_dev->mdev);
devlink_unregister(devlink);
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 3835ba3f4dda..1aa525e509f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
+ caps->roce_caps.fl_rc_qp_when_roce_disabled =
+ MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
if (MLX5_CAP_GEN(mdev, roce)) {
err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
return err;
caps->roce_caps.roce_en = roce_en;
- caps->roce_caps.fl_rc_qp_when_roce_disabled =
+ caps->roce_caps.fl_rc_qp_when_roce_disabled |=
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
caps->roce_caps.fl_rc_qp_when_roce_enabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
index 13e06a6a6b22..d6947fe13d56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
@@ -213,6 +213,8 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
}
INIT_LIST_HEAD(&mgr->ptrn_list);
+ mutex_init(&mgr->modify_hdr_mutex);
+
return mgr;
free_mgr:
@@ -237,5 +239,6 @@ void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
}
mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
+ mutex_destroy(&mgr->modify_hdr_mutex);
kfree(mgr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 9413aaf51251..e94fbb015efa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
{
u32 crc = crc32(0, input_data, length);
- return (__force u32)htonl(crc);
+ return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+ ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
}
bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index afa3b92a6905..0d5a41a2ae01 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -245,12 +245,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
skb = priv->rx_skb[rx_pi_rem];
- skb_put(skb, datalen);
-
- skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
-
- skb->protocol = eth_type_trans(skb, netdev);
-
/* Alloc another RX SKB for this same index */
rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
&rx_buf_dma, DMA_FROM_DEVICE);
@@ -259,6 +253,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
priv->rx_skb[rx_pi_rem] = rx_skb;
dma_unmap_single(priv->dev, *rx_wqe_addr,
MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+
+ skb_put(skb, datalen);
+
+ skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
*rx_wqe_addr = rx_buf_dma;
} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
priv->stats.rx_mac_errors++;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2b6e046e1d10..ee2698698d71 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
reset_control_reset(switch_reset);
+ /* Don't reinitialize the switch core, if it is already initialized. In
+ * case it is initialized twice, some pointers inside the queue system
+ * in HW will get corrupted and then after a while the queue system gets
+ * full and no traffic is passing through the switch. The issue is seen
+ * when loading and unloading the driver and sending traffic through the
+ * switch.
+ */
+ if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
+ return 0;
+
lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
ret = readx_poll_timeout(lan966x_ram_init, lan966x,
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 06d6292e09b3..d907727c7b7a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1279,8 +1279,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
if (comp_read < 1)
return;
- apc->eth_stats.tx_cqes = comp_read;
-
for (i = 0; i < comp_read; i++) {
struct mana_tx_comp_oob *cqe_oob;
@@ -1363,8 +1361,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
WARN_ON_ONCE(1);
cq->work_done = pkt_transmitted;
-
- apc->eth_stats.tx_cqes -= pkt_transmitted;
}
static void mana_post_pkt_rxq(struct mana_rxq *rxq)
@@ -1626,15 +1622,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
struct mana_rxq *rxq = cq->rxq;
- struct mana_port_context *apc;
int comp_read, i;
- apc = netdev_priv(rxq->ndev);
-
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
- apc->eth_stats.rx_cqes = comp_read;
rxq->xdp_flush = false;
for (i = 0; i < comp_read; i++) {
@@ -1646,8 +1638,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
return;
mana_process_rx_cqe(rxq, cq, &comp[i]);
-
- apc->eth_stats.rx_cqes--;
}
if (rxq->xdp_flush)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index a64c81410dc1..0dc78679f620 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,11 +13,9 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
- {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
tx_cqe_unknown_type)},
- {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
{"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
rx_coalesced_err)},
{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h
index 094374df42b8..38b8b10b03cd 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.h
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.h
@@ -8,7 +8,7 @@
#ifdef CONFIG_DCB
/* DCB feature definitions */
-#define NFP_NET_MAX_DSCP 4
+#define NFP_NET_MAX_DSCP 64
#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS
#define NFP_NET_MAX_PRIO 8
#define NFP_DCB_CFG_STRIDE 256
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0605d1ee490d..7a549b834e97 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
return 0;
out_error:
+ nv_mgmt_release_sema(dev);
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
out_freering:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 2edd6bf64a3c..7776d3bdd459 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
- if (!cdev) {
+ if (!cdev || cdev->recov_in_prog) {
memset(stats, 0, sizeof(*stats));
return;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f9931ecb7baa..4d83ceebdc49 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -269,6 +269,10 @@ struct qede_dev {
#define QEDE_ERR_WARN 3
struct qede_dump_info dump_info;
+ struct delayed_work periodic_task;
+ unsigned long stats_coal_ticks;
+ u32 stats_coal_usecs;
+ spinlock_t stats_lock; /* lock for vport stats access */
};
enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 374a86b875a3..95820cf1cd6c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -429,6 +429,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
}
+ spin_lock(&edev->stats_lock);
+
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
@@ -438,6 +440,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
buf++;
}
+ spin_unlock(&edev->stats_lock);
+
__qede_unlock(edev);
}
@@ -829,6 +833,7 @@ out:
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
+ coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
return rc;
}
@@ -842,6 +847,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
int i, rc = 0;
u16 rxc, txc;
+ if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
+ edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
+ if (edev->stats_coal_usecs) {
+ edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
+ schedule_delayed_work(&edev->periodic_task, 0);
+
+ DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
+ edev->stats_coal_ticks);
+ } else {
+ cancel_delayed_work_sync(&edev->periodic_task);
+ }
+ }
+
if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n");
return -EINVAL;
@@ -2252,7 +2270,8 @@ out:
}
static const struct ethtool_ops qede_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
@@ -2303,7 +2322,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4c6c685820e3..4b004a728190 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -307,6 +307,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
+ spin_lock(&edev->stats_lock);
+
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
p_common->ttl0_discard = stats.common.ttl0_discard;
@@ -404,6 +406,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
+
+ spin_unlock(&edev->stats_lock);
}
static void qede_get_stats64(struct net_device *dev,
@@ -412,9 +416,10 @@ static void qede_get_stats64(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
- qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
+ spin_lock(&edev->stats_lock);
+
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
@@ -434,6 +439,8 @@ static void qede_get_stats64(struct net_device *dev,
stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
+
+ spin_unlock(&edev->stats_lock);
}
#ifdef CONFIG_QED_SRIOV
@@ -1063,6 +1070,23 @@ static void qede_unlock(struct qede_dev *edev)
rtnl_unlock();
}
+static void qede_periodic_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ periodic_task.work);
+
+ qede_fill_by_demand_stats(edev);
+ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
+}
+
+static void qede_init_periodic_task(struct qede_dev *edev)
+{
+ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
+ spin_lock_init(&edev->stats_lock);
+ edev->stats_coal_usecs = USEC_PER_SEC;
+ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
+}
+
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
@@ -1082,6 +1106,7 @@ static void qede_sp_task(struct work_struct *work)
*/
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+ cancel_delayed_work_sync(&edev->periodic_task);
#ifdef CONFIG_QED_SRIOV
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
@@ -1272,6 +1297,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
*/
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ qede_init_periodic_task(edev);
rc = register_netdev(edev->ndev);
if (rc) {
@@ -1296,6 +1322,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
qede_log_probe(edev);
+
+ /* retain user config (for example - after recovery) */
+ if (edev->stats_coal_usecs)
+ schedule_delayed_work(&edev->periodic_task, 0);
+
return 0;
err4:
@@ -1364,6 +1395,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
+ cancel_delayed_work_sync(&edev->periodic_task);
edev->ops->common->set_power_state(cdev, PCI_D0);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a7e376e7e689..4b19803a7dd0 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -616,10 +616,10 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- spinlock_t config25_lock;
- spinlock_t mac_ocp_lock;
+ raw_spinlock_t config25_lock;
+ raw_spinlock_t mac_ocp_lock;
- spinlock_t cfg9346_usage_lock;
+ raw_spinlock_t cfg9346_usage_lock;
int cfg9346_usage_count;
unsigned supports_gmii:1;
@@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
unsigned long flags;
- spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+ raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
if (!--tp->cfg9346_usage_count)
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
unsigned long flags;
- spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+ raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
if (!tp->cfg9346_usage_count++)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
unsigned long flags;
u8 val;
- spin_lock_irqsave(&tp->config25_lock, flags);
+ raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config2);
RTL_W8(tp, Config2, (val & ~clear) | set);
- spin_unlock_irqrestore(&tp->config25_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
@@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
unsigned long flags;
u8 val;
- spin_lock_irqsave(&tp->config25_lock, flags);
+ raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config5);
RTL_W8(tp, Config5, (val & ~clear) | set);
- spin_unlock_irqrestore(&tp->config25_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
unsigned long flags;
- spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
__r8168_mac_ocp_write(tp, reg, data);
- spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
unsigned long flags;
u16 val;
- spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
val = __r8168_mac_ocp_read(tp, reg);
- spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
return val;
}
@@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
unsigned long flags;
u16 data;
- spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
data = __r8168_mac_ocp_read(tp, reg);
__r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
- spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
/* Work around a hw issue with RTL8168g PHY, the quirk disables
@@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
}
- spin_lock_irqsave(&tp->config25_lock, flags);
+ raw_spin_lock_irqsave(&tp->config25_lock, flags);
for (i = 0; i < tmp; i++) {
options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(tp, cfg[i].reg, options);
}
- spin_unlock_irqrestore(&tp->config25_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
@@ -5179,9 +5179,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
- spin_lock_init(&tp->cfg9346_usage_lock);
- spin_lock_init(&tp->config25_lock);
- spin_lock_init(&tp->mac_ocp_lock);
+ raw_spin_lock_init(&tp->cfg9346_usage_lock);
+ raw_spin_lock_init(&tp->config25_lock);
+ raw_spin_lock_init(&tp->mac_ocp_lock);
dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
struct pcpu_sw_netstats);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 29afaddb598d..fa6d6202b129 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -347,17 +347,6 @@ out:
return -ENOMEM;
}
-static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
-{
- struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
-
- gq->ring_size = TS_RING_SIZE;
- gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
- sizeof(struct rswitch_ts_desc) *
- (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- return !gq->ts_ring ? -ENOMEM : 0;
-}
-
static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
{
desc->dptrl = cpu_to_le32(lower_32_bits(addr));
@@ -533,6 +522,28 @@ static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
gwca->linkfix_table = NULL;
}
+static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+ struct rswitch_ts_desc *desc;
+
+ gq->ring_size = TS_RING_SIZE;
+ gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct rswitch_ts_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+
+ if (!gq->ts_ring)
+ return -ENOMEM;
+
+ rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
+ desc = &gq->ts_ring[gq->ring_size];
+ desc->desc.die_dt = DT_LINKFIX;
+ rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+ INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+
+ return 0;
+}
+
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq;
@@ -1485,7 +1496,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
netif_stop_subqueue(ndev, 0);
- return ret;
+ return NETDEV_TX_BUSY;
}
if (skb_put_padto(skb, ETH_ZLEN))
@@ -1780,9 +1791,6 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
goto err_ts_queue_alloc;
- rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
-
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
err = rswitch_device_alloc(priv, i);
if (err < 0) {
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index d916877b5a9a..be395cd8770b 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -378,7 +378,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
efx->net_dev = net_dev;
SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
- net_dev->features |= efx->type->offload_features;
+ /* enable all supported features except rx-fcs and rx-all */
+ net_dev->features |= efx->type->offload_features &
+ ~(NETIF_F_RXFCS | NETIF_F_RXALL);
net_dev->hw_features |= efx->type->offload_features;
net_dev->hw_enc_features |= efx->type->offload_features;
net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index fcea3ea809d7..41b33a75333c 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
efx->legacy_irq = efx->pci_dev->irq;
}
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 381b805659d3..ef9971cbb695 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL,
0);
+
+ /* If the partition does not exist, that is not an error. */
+ if (rc == -ENOENT)
+ return 0;
+
if (rc) {
- netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n",
- version_name);
+ netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n",
+ version_name, rc);
return rc;
}
@@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
static int efx_devlink_info_stored_versions(struct efx_nic *efx,
struct devlink_info_req *req)
{
- int rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_BUNDLE,
- DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
- if (rc)
- return rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_MC_FIRMWARE,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
- if (rc)
- return rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
- EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
- if (rc)
- return rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_EXPANSION_ROM,
- EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
- if (rc)
- return rc;
+ int err;
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
- EFX_DEVLINK_INFO_VERSION_FW_UEFI);
- return rc;
+ /* We do not care here about the specific error but just if an error
+ * happened. The specific error will be reported inside the call
+ * through system messages, and if any error happened in any call
+ * below, we report it through extack.
+ */
+ err = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_BUNDLE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_MC_FIRMWARE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_ROM,
+ EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
+ EFX_DEVLINK_INFO_VERSION_FW_UEFI);
+ return err;
}
#define EFX_VER_FLAG(_f) \
@@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink,
{
struct efx_devlink *devlink_private = devlink_priv(devlink);
struct efx_nic *efx = devlink_private->efx;
- int rc;
+ int err;
- /* Several different MCDI commands are used. We report first error
- * through extack returning at that point. Specific error
- * information via system messages.
+ /* Several different MCDI commands are used. We report if errors
+ * happened through extack. Specific error information via system
+ * messages inside the calls.
*/
- rc = efx_devlink_info_board_cfg(efx, req);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Getting board info failed");
- return rc;
- }
- rc = efx_devlink_info_stored_versions(efx, req);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed");
- return rc;
- }
- rc = efx_devlink_info_running_versions(efx, req);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed");
- return rc;
- }
+ err = efx_devlink_info_board_cfg(efx, req);
+
+ err |= efx_devlink_info_stored_versions(efx, req);
+
+ err |= efx_devlink_info_running_versions(efx, req);
+
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages");
return 0;
}
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 06ed74994e36..1776f7f8a7a9 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
efx->legacy_irq = efx->pci_dev->irq;
}
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0327639a628a..c004443c1d58 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -624,13 +624,12 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (!found) { /* We don't care. */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter that doesn't egdev us\n");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
if (rc)
- goto release;
+ return rc;
if (efx_tc_match_is_encap(&match.mask)) {
enum efx_encap_type type;
@@ -639,8 +638,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (type == EFX_ENCAP_TYPE_NONE) {
NL_SET_ERR_MSG_MOD(extack,
"Egress encap match on unsupported tunnel device");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
rc = efx_mae_check_encap_type_supported(efx, type);
@@ -648,25 +646,24 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
NL_SET_ERR_MSG_FMT_MOD(extack,
"Firmware reports no support for %s encap match",
efx_tc_encap_type_name(type));
- goto release;
+ return rc;
}
rc = efx_tc_flower_record_encap_match(efx, &match, type,
extack);
if (rc)
- goto release;
+ return rc;
} else {
/* This is not a tunnel decap rule, ignore it */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter without encap match\n");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule) {
rc = -ENOMEM;
- goto release;
+ goto out_free;
}
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
@@ -678,7 +675,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
rc = -EEXIST;
- goto release;
+ goto out_free;
}
act = kzalloc(sizeof(*act), GFP_USER);
@@ -843,6 +840,7 @@ release:
efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
+out_free:
kfree(rule);
if (match.encap)
efx_tc_flower_release_encap_match(efx, match.encap);
@@ -899,8 +897,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
return rc;
if (efx_tc_match_is_encap(&match.mask)) {
NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
if (tc->common.chain_index) {
@@ -924,9 +921,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
- rc = -EEXIST;
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
- goto release;
+ kfree(rule);
+ return -EEXIST;
}
/* Parse actions */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 16a8c361283b..f07905f00f98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -644,7 +644,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->has_gmac4 = 1;
- plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
+ if (ethqos->has_emac3)
+ plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0fca81507a77..87510951f4e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3873,7 +3873,6 @@ irq_error:
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv, &priv->dma_conf);
phylink_disconnect_phy(priv->phylink);
init_phy_error:
pm_runtime_put(priv->device);
@@ -3891,6 +3890,9 @@ static int stmmac_open(struct net_device *dev)
return PTR_ERR(dma_conf);
ret = __stmmac_open(dev, dma_conf);
+ if (ret)
+ free_dma_desc_resources(priv, dma_conf);
+
kfree(dma_conf);
return ret;
}
@@ -5633,12 +5635,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
- kfree(dma_conf);
if (ret) {
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
return ret;
}
+ kfree(dma_conf);
+
stmmac_set_rx_mode(dev);
}
@@ -7233,8 +7238,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_NDO_XMIT;
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
ret = stmmac_tc_init(priv, priv);
if (!ret) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index 9d4d8c3dad0a..aa6f16d3df64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -117,6 +117,9 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
+
need_update = !!priv->xdp_prog != !!prog;
if (if_running && need_update)
stmmac_xdp_release(dev);
@@ -131,5 +134,8 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
if (if_running && need_update)
stmmac_xdp_open(dev);
+ if (prog)
+ xdp_features_set_redirect_target(dev, false);
+
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 4ef05bad4613..d61dfa250feb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -5077,6 +5077,8 @@ err_out_iounmap:
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
+ vfree(cp->fw_data);
+
pci_iounmap(pdev, cp->regs);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 11cbcd9e2c72..bebcfd5e6b57 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2068,7 +2068,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* Initialize the Serdes PHY for the port */
ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
if (ret)
- return ret;
+ goto of_node_put;
port->slave.mac_only =
of_property_read_bool(port_np, "ti,mac-only");