summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c98
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c393
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c36
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c323
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c61
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c144
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h5
-rw-r--r--drivers/net/ethernet/jme.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c286
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c266
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c228
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c31
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c48
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c66
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c40
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c213
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c104
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.h18
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c25
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/socionext/netsec.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c35
103 files changed, 3293 insertions, 1443 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1fb58f9ad80b..a250046b8e18 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1067,18 +1067,14 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp;
int rc;
- hash_key = (ena_dev->rss).hash_key;
-
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
ena_dev->rss.hash_key_dma_addr,
sizeof(ena_dev->rss.hash_key), 0);
if (unlikely(rc)) {
- hash_key = NULL;
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 1dcbc486eca9..b9b4edb913c1 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1416,10 +1416,7 @@ static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
0, IPPROTO_TCP, 0);
first->word1 |= 1 << TPD_IPV4_SHIFT;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
/* LSOv2: the first TPD only provides the packet length */
first->adrl.l.pkt_len = skb->len;
first->word1 |= 1 << TPD_LSO_V2_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4c0b1f8551dd..0d67b951c0b2 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2025,10 +2025,8 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
"IPV6 tso with zero data??\n");
goto check_sum;
} else
- tcp_hdr(skb)->check = ~csum_ipv6_magic(
- &ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
+
etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
etpd->pkt_len = cpu_to_le32(skb->len);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 01a50a4b2113..d6588502a050 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2504,12 +2504,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
IPPROTO_TCP, 0);
BNAD_UPDATE_CTR(bnad, tso4);
} else {
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
- ipv6h->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
- IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
BNAD_UPDATE_CTR(bnad, tso6);
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ddf60dc9ad16..3fc858b2c87b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -696,8 +696,7 @@ static void enic_preload_tcp_csum(struct sk_buff *skb)
tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
}
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index fe942de19597..f86283411f4d 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI && PCI_MSI
select FSL_ENETC_MDIO
select PHYLIB
help
@@ -13,7 +13,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI && PCI_MSI
select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
@@ -23,7 +23,7 @@ config FSL_ENETC_VF
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4432a59904c7..12edd4e358f8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3793,6 +3793,7 @@ static struct platform_driver fec_driver = {
.name = DRIVER_NAME,
.pm = &fec_pm_ops,
.of_match_table = fec_dt_ids,
+ .suppress_bind_attrs = true,
},
.id_table = fec_devtype,
.probe = fec_probe,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 1d4ffc5f408a..e1d88095a77e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -260,6 +260,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump m7 info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
+ dev_info(&h->pdev->dev, "dump loopback\n");
+ dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c03856e63320..3f59a1924390 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -736,7 +736,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (ops->get_media_type)
ops->get_media_type(handle, &media_type, &module_type);
- if (cmd->base.duplex != DUPLEX_FULL &&
+ if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
"only copper port supports half duplex!");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 67fad80035d3..6295cf93c350 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -310,8 +310,9 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
char *false_buf)
{
if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- true_buf);
+ dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
+ title_buf, index, true_buf,
+ hdev->tm_info.pg_info[0].tc_dwrr[index]);
else
dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
false_buf);
@@ -339,7 +340,8 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump tc\n");
+ dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
+ hdev->tm_info.num_tc);
dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
ets_weight->weight_offset);
@@ -1169,6 +1171,57 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
}
}
+static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ struct hclge_config_mac_mode_cmd *req_app;
+ struct hclge_serdes_lb_cmd *req_serdes;
+ struct hclge_desc desc;
+ u8 loopback_en;
+ int ret;
+
+ req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+ req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump app loopback status, ret = %d\n", ret);
+ return;
+ }
+
+ loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+ HCLGE_MAC_APP_LP_B);
+ dev_info(&hdev->pdev->dev, "app loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump serdes loopback status, ret = %d\n",
+ ret);
+ return;
+ }
+
+ loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ loopback_en = req_serdes->enable &
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ if (phydev)
+ dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
+ phydev->loopback_enabled ? "on" : "off");
+}
+
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
@@ -1269,6 +1322,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
#define DUMP_TM_MAP "dump tm map"
+#define DUMP_LOOPBACK "dump loopback"
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1302,6 +1356,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
+ strlen(DUMP_LOOPBACK)) == 0) {
+ hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 492bc9446463..51399dbed77a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -824,6 +824,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
+#define HCLGE_MAC_ID_MASK 0xF
+
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
@@ -833,6 +835,7 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
else
hdev->flag &= ~HCLGE_FLAG_MAIN;
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index f78cbb4cc85e..71df23d5f1b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -249,6 +249,7 @@ enum HCLGE_MAC_DUPLEX {
#define QUERY_ACTIVE_SPEED 1
struct hclge_mac {
+ u8 mac_id;
u8 phy_addr;
u8 flag;
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 2bced34c19ba..f7103356ef56 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2715,11 +2715,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index adce7e319b9e..9e7881db7859 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -897,6 +897,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
/* fall through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
mask |= BIT(18);
break;
default:
@@ -1561,6 +1562,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f556163481cb..b1447221669e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -97,6 +97,11 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
+#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
+#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
+#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
+#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
+#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
#define E1000_REVISION_4 4
@@ -121,6 +126,7 @@ enum e1000_mac_type {
e1000_pch_spt,
e1000_pch_cnp,
e1000_pch_tgp,
+ e1000_pch_adp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b4135c50e905..735bf25952fc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -317,6 +317,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -460,6 +461,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -703,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1642,6 +1645,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2095,6 +2099,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3133,6 +3138,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4077,6 +4083,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e531976f8a67..51512a73fdd0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1363,7 +1363,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- usleep_range(50, 100);
+ udelay(100);
i++;
}
@@ -1381,7 +1381,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- usleep_range(50, 100);
+ udelay(100);
}
if (i == timeout) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index db4ea58bac82..1e1625122596 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3536,6 +3536,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
break;
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3807,7 +3808,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
tdt = er32(TDT(0));
BUG_ON(tdt != tx_ring->next_to_use);
tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
- tx_desc->buffer_addr = tx_ring->dma;
+ tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
tx_desc->lower.data = cpu_to_le32(txd_lower | size);
tx_desc->upper.data = 0;
@@ -4049,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -5462,10 +5464,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -7760,6 +7759,11 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eaa5a0fb99f0..439fda2f5368 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cb10abb14e11..2d51ceaa2c8c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -212,6 +212,7 @@ enum ice_state {
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
+ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_STATE_NBITS /* must be last */
};
@@ -340,6 +341,7 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
+ ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -363,6 +365,8 @@ struct ice_pf {
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */
+ /* used to ratelimit the MDD event logging */
+ unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6873998cf145..38b6ffb6ad2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1661,6 +1661,13 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[1];
};
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct ice_aqc_event_lan_overflow {
+ __le32 prtdcb_ruptq;
+ __le32 qtx_ctl;
+ u8 reserved[8];
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1730,6 +1737,7 @@ struct ice_aq_desc {
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
+ struct ice_aqc_event_lan_overflow lan_overflow;
} params;
};
@@ -1860,6 +1868,9 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ /* Standalone Commands/Events */
+ ice_aqc_opc_event_lan_overflow = 0x1001,
+
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 81885efadc7a..a19cd6f5436b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{
- WARN_ONCE(ice_ring_is_xdp(ring) && tc,
- "XDP ring can't belong to TC other than 0");
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
@@ -247,7 +246,6 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
- /* fall through */
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -387,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
@@ -399,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ } else {
+ regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+ QRXFLXP_CNTXT_RXDID_PRIO_M |
+ QRXFLXP_CNTXT_TS_M);
}
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -459,17 +461,20 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
}
/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
* @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
+ * @ena: start or stop the Rx ring
+ * @rxq_idx: 0-based Rx queue index for the VSI passed in
+ * @wait: wait or don't wait for configuration to finish in hardware
+ *
+ * Return 0 on success and negative on error.
*/
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+int
+ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
{
int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int ret = 0;
u32 rx_reg;
rx_reg = rd32(hw, QRX_CTRL(pf_q));
@@ -485,13 +490,30 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), rx_reg);
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
+ if (!wait)
+ return 0;
+
+ ice_flush(hw);
+ return ice_pf_rxq_wait(pf, pf_q, ena);
+}
- return ret;
+/**
+ * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
+ * @vsi: the VSI being configured
+ * @ena: true/false to verify Rx ring has been enabled/disabled respectively
+ * @rxq_idx: 0-based Rx queue index for the VSI passed in
+ *
+ * This routine will wait for the given Rx queue of the VSI to reach the
+ * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
+ * the requested state after multiple retries; else will return 0 in case of
+ * success.
+ */
+int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+
+ return ice_pf_rxq_wait(pf, pf_q, ena);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 407995e8e944..44efdb627043 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -8,7 +8,9 @@
int ice_setup_rx_ctx(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int
+ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
+int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 04d5db0a25bf..1fe54f08f162 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,7 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
-#define ICE_PF_RESET_WAIT_COUNT 200
+#define ICE_PF_RESET_WAIT_COUNT 300
/**
* ice_set_mac_type - Sets MAC type
@@ -1181,7 +1181,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
break;
- /* fall-through */
+ fallthrough;
default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true;
@@ -2703,7 +2703,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
- /* fall-through */
+ fallthrough;
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 7108fb41b604..16656b6c3d09 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -63,6 +63,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_dcb_get_mode - gets the DCB mode
+ * @port_info: pointer to port info structure
+ * @host: if set it's HOST if not it's MANAGED
+ */
+static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
+{
+ u8 mode;
+
+ if (host)
+ mode = DCB_CAP_DCBX_HOST;
+ else
+ mode = DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+ return (mode | DCB_CAP_DCBX_VER_CEE);
+ else
+ return (mode | DCB_CAP_DCBX_VER_IEEE);
+}
+
+/**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from
*/
@@ -149,6 +169,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
/**
+ * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
+ * @pf: pointer to the PF struct
+ * @dcbcfg: pointer to DCB config structure
+ */
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
+ u8 num_tc, total_bw = 0;
+ int i;
+
+ /* returns number of contigous TCs and 1 TC for non-contigous TCs,
+ * since at least 1 TC has to be configured
+ */
+ num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+ /* no bandwidth checks required if there's only one TC, so assign
+ * all bandwidth to TC0 and return
+ */
+ if (num_tc == 1) {
+ etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+ return 0;
+ }
+
+ for (i = 0; i < num_tc; i++)
+ total_bw += etscfg->tcbwtable[i];
+
+ if (!total_bw) {
+ etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+ } else if (total_bw != ICE_TC_MAX_BW) {
+ dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return ret;
}
+ if (ice_dcb_bwchk(pf, new_cfg))
+ return -EINVAL;
+
/* Store old config in case FW config fails */
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
if (!old_cfg)
@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
ice_cfg_sw_lldp(pf_vsi, false, true);
- pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
}
set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
- /* DCBX in FW and LLDP enabled in FW */
- pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+ /* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */
+ pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
err = ice_dcb_init_cfg(pf, locked);
if (err)
@@ -772,6 +832,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
+ pf->dcbx_cap = ice_dcb_get_mode(pi, false);
goto out;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index f15e5776f287..37680e815b02 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index b61aba428adb..c4c12414083a 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
- /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
- * for the TC's index number. Add one to value if not zero, and
- * for zero set it to the FW's default value
- */
- if (max_tc)
- max_tc++;
- else
- max_tc = IEEE_8021QAZ_MAX_TCS;
+ if (ice_dcb_bwchk(pf, new_cfg)) {
+ err = -EINVAL;
+ goto ets_out;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
new_cfg->etscfg.maxtcs = max_tc;
@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
if (err == ICE_DCB_NO_HW_CHG)
err = ICE_DCB_HW_CHG_RST;
+ets_out:
mutex_unlock(&pf->tc_mutex);
return err;
}
@@ -535,6 +534,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
}
/**
+ * ice_dcbnl_set_pg_tc_cfg_rx
+ * @netdev: relevant netdev struct
+ * @prio: corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: BW percentage for corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+ int __always_unused prio,
+ u8 __always_unused prio_type,
+ u8 __always_unused pgid,
+ u8 __always_unused bw_pct,
+ u8 __always_unused up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
+}
+
+/**
* ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
* @netdev: pointer to netdev struct
* @pgid: the corresponding traffic class
@@ -554,6 +577,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
}
/**
+ * ice_dcbnl_set_pg_bwg_cfg_rx
+ * @netdev: the corresponding netdev
+ * @pgid: corresponding TC
+ * @bw_pct: BW percentage for given TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+ u8 __always_unused bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
+}
+
+/**
* ice_dcbnl_get_cap - Get DCBX capabilities of adapter
* @netdev: pointer to netdev struct
* @capid: the capability type
@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index ce63017c56c7..9d8194671f6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,12 +5,34 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
+/* Intel(R) Ethernet Connection E823-C for QSFP */
+#define ICE_DEV_ID_E823C_QSFP 0x188B
+/* Intel(R) Ethernet Connection E823-C for SFP */
+#define ICE_DEV_ID_E823C_SFP 0x188C
+/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
+/* Intel(R) Ethernet Connection E823-C 1GbE */
+#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */
@@ -21,8 +43,8 @@
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
-/* Intel(R) Ethernet Connection E822-X for backplane */
-#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 77c412a7e7a4..ab37dddb225b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -157,6 +157,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -462,7 +463,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_start_rx_rings(vsi);
+ status = ice_vsi_start_all_rx_rings(vsi);
if (status)
goto err_start_rx_ring;
@@ -494,7 +495,7 @@ static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, status);
- status = ice_vsi_stop_rx_rings(vsi);
+ status = ice_vsi_stop_all_rx_rings(vsi);
if (status)
netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, status);
@@ -672,7 +673,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
if (!test_vsi) {
- netdev_err(netdev, "Failed to create a VSI for the loopback test");
+ netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
return 1;
}
@@ -731,7 +732,7 @@ lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
- netdev_err(netdev, "Could not remove MAC filter for the test VSI");
+ netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list:
ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
@@ -744,7 +745,7 @@ lbtest_rings_dis:
lbtest_vsi_close:
test_vsi->netdev = NULL;
if (ice_vsi_release(test_vsi))
- netdev_err(netdev, "Failed to remove the test VSI");
+ netdev_err(netdev, "Failed to remove the test VSI\n");
return ret;
}
@@ -834,7 +835,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev);
if (status) {
- dev_err(dev, "Could not open device %s, err %d",
+ dev_err(dev, "Could not open device %s, err %d\n",
pf->int_name, status);
}
}
@@ -1091,7 +1092,6 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fecparam->active_fec = ETHTOOL_FEC_BASER;
break;
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fecparam->active_fec = ETHTOOL_FEC_RS;
break;
@@ -1781,7 +1781,6 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
Asym_Pause);
break;
case ICE_FC_PFC:
- /* fall through */
default:
ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 99208946224c..42bac3ec5526 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3471,6 +3471,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
}
/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
+{
+ struct ice_chs_chg *pos, *tmp;
+
+ list_for_each_entry_safe(tmp, pos, chg, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+ list_del(&tmp->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), tmp);
+ }
+}
+
+/**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
enum ice_status status;
struct ice_chs_chg *p;
- /* Default: enable means change the low flag bit to don't care */
- u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
/* if disabling, free the TCAM */
if (!enable) {
- status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+ /* if we have already created a change for this TCAM entry, then
+ * we need to remove that entry, in order to prevent writing to
+ * a TCAM entry we no longer will have ownership of.
+ */
+ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
tcam->tcam_idx = 0;
tcam->in_use = 0;
return status;
@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @blk: hardware block
* @vsig: the VSIG to which this profile is to be added
* @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
* @chg: the change list
*/
static enum ice_status
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
- struct list_head *chg)
+ bool rev, struct list_head *chg)
{
/* Masks that ignore flags */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
- u16 i;
+ u16 vsig_idx, i;
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* add profile to VSIG */
- list_add(&t->list,
- &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+ vsig_idx = vsig & ICE_VSIG_IDX_M;
+ if (rev)
+ list_add_tail(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+ else
+ list_add(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
return 0;
@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
if (status)
goto err_ice_create_prof_id_vsig;
- status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
if (status)
goto err_ice_create_prof_id_vsig;
@@ -3753,11 +3782,13 @@ err_ice_create_prof_id_vsig:
* @blk: hardware block
* @vsi: the initial VSI that will be in VSIG
* @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
* @chg: the change list
*/
static enum ice_status
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
- struct list_head *lst, struct list_head *chg)
+ struct list_head *lst, u16 *new_vsig,
+ struct list_head *chg)
{
struct ice_vsig_prof *t;
enum ice_status status;
@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
return status;
list_for_each_entry(t, lst, list) {
+ /* Reverse the order here since we are copying the list */
status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
- chg);
+ true, chg);
if (status)
return status;
}
+ *new_vsig = vsig;
+
return 0;
}
@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* not sharing entries and we can simply add the new
* profile to the VSIG.
*/
- status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
} else {
/* No match, so we need a new VSIG */
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &union_lst, &chg);
+ &union_lst, &vsig,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* new VSIG and TCAM entries
*/
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &copy, &chg);
+ &copy, &vsig,
+ &chg);
if (status)
goto err_ice_rem_prof_id_flow;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6db3d0494127..1d37a9f02c1c 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -85,6 +85,7 @@
#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
+#define QRXFLXP_CNTXT_TS_M BIT(11)
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
@@ -217,6 +218,8 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define GL_MDCK_TX_TDPU 0x00049348
+#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -286,6 +289,8 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLDCB_RTCTQ_RXQNUM_S 0
+#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d974e2fa3e63..ff72a6d1c978 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -26,16 +26,26 @@ const char *ice_vsi_type_str(enum ice_vsi_type type)
}
/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
+ *
+ * First enable/disable all of the Rx rings, flush any remaining writes, and
+ * then verify that they have all been enabled/disabled successfully. This will
+ * let all of the register writes complete when enabling/disabling the Rx rings
+ * before waiting for the change in hardware to complete.
*/
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
int i, ret = 0;
+ for (i = 0; i < vsi->num_rxq; i++)
+ ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
+
+ ice_flush(&vsi->back->hw);
+
for (i = 0; i < vsi->num_rxq; i++) {
- ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
+ ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
}
@@ -111,7 +121,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- /* fall through */
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -433,7 +442,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
- .mapping_mode = vsi->tx_mapping_mode
+ .mapping_mode = ICE_VSI_MAP_CONTIG
};
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -443,18 +452,21 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
- .mapping_mode = vsi->rx_mapping_mode
+ .mapping_mode = ICE_VSI_MAP_CONTIG
};
- int ret = 0;
-
- vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
- vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+ int ret;
ret = __ice_vsi_get_qs(&tx_qs_cfg);
- if (!ret)
- ret = __ice_vsi_get_qs(&rx_qs_cfg);
+ if (ret)
+ return ret;
+ vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
- return ret;
+ ret = __ice_vsi_get_qs(&rx_qs_cfg);
+ if (ret)
+ return ret;
+ vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
+
+ return 0;
}
/**
@@ -804,7 +816,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_LB:
- /* fall through */
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
@@ -1348,7 +1359,9 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&tmp->list_entry, &tmp_add_list);
status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (status) {
+ if (!status) {
+ vsi->num_vlan++;
+ } else {
err = -ENODEV;
dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
vsi->vsi_num);
@@ -1390,10 +1403,12 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&list->list_entry, &tmp_add_list);
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status) {
+ vsi->num_vlan--;
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
- } else if (status) {
+ } else {
dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
@@ -1678,25 +1693,25 @@ out:
}
/**
- * ice_vsi_start_rx_rings - start VSI's Rx rings
- * @vsi: the VSI whose rings are to be started
+ * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
+ * @vsi: the VSI whose rings are to be enabled
*
* Returns 0 on success and a negative value on error
*/
-int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_ctrl_rx_rings(vsi, true);
+ return ice_vsi_ctrl_all_rx_rings(vsi, true);
}
/**
- * ice_vsi_stop_rx_rings - stop VSI's Rx rings
- * @vsi: the VSI
+ * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
+ * @vsi: the VSI whose rings are to be disabled
*
* Returns 0 on success and a negative value on error
*/
-int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_ctrl_rx_rings(vsi, false);
+ return ice_vsi_ctrl_all_rx_rings(vsi, false);
}
/**
@@ -1756,6 +1771,26 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ *
+ * returns true if Rx VLAN pruning and Tx VLAN anti-spoof is enabled and false
+ * otherwise.
+ */
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+{
+ u8 rx_pruning = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ u8 tx_pruning = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+
+ if (!vsi)
+ return false;
+
+ return ((vsi->info.sw_flags2 & rx_pruning) &&
+ (vsi->info.sec_flags & tx_pruning));
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2025,6 +2060,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ /* Always add VLAN ID 0 switch rule by default. This is needed
+ * in order to allow all untagged and 0 tagged priority traffic
+ * if Rx VLAN pruning is enabled. Also there are cases where we
+ * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
+ * so this handles those cases (i.e. adding the PF to a bridge
+ * without the 8021q module loaded).
+ */
+ ret = ice_vsi_add_vlan(vsi, 0);
+ if (ret)
+ goto unroll_clear_rings;
+
ice_vsi_map_rings_to_vectors(vsi);
/* Do not exit if configuring RSS had an issue, at least
@@ -2104,6 +2150,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
return vsi;
+unroll_clear_rings:
+ ice_vsi_clear_rings(vsi);
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index e2c0dadce920..585f1350403f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -30,9 +30,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
@@ -42,6 +42,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5ef28052c0f8..3aa3fc37c70e 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -706,7 +706,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
/* Get FEC mode based on negotiated link info */
switch (vsi->port_info->phy.link_info.fec_info) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fec = "RS-FEC";
break;
@@ -1029,6 +1028,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ice_handle_link_event(pf, &event))
dev_err(dev, "Could not handle link event\n");
break;
+ case ice_aqc_opc_event_lan_overflow:
+ ice_vf_lan_overflow_event(pf, &event);
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -1185,20 +1187,28 @@ static void ice_service_timer(struct timer_list *t)
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
- * Called from service task. OICR interrupt handler indicates MDD event
+ * Called from service task. OICR interrupt handler indicates MDD event.
+ * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
+ * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
+ * disable the queue, the PF can be configured to reset the VF using ethtool
+ * private flag mdd-auto-reset-vf.
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- bool mdd_detected = false;
u32 reg;
int i;
- if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ ice_print_vfs_mdd_events(pf);
return;
+ }
- /* find what triggered the MDD event */
+ /* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
@@ -1214,7 +1224,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_TX_TCLAN);
@@ -1232,7 +1241,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_RX);
@@ -1250,85 +1258,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
- mdd_detected = true;
}
- if (mdd_detected) {
- bool pf_mdd_detected = false;
-
- reg = rd32(hw, PF_MDET_TX_PQM);
- if (reg & PF_MDET_TX_PQM_VALID_M) {
- wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ /* check to see if this PF caused an MDD event */
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_TX_TCLAN);
- if (reg & PF_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_RX);
- if (reg & PF_MDET_RX_VALID_M) {
- wr32(hw, PF_MDET_RX, 0xFFFF);
- dev_info(dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__ICE_NEEDS_RESTART, pf->state);
- ice_service_task_schedule(pf);
- }
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
}
- /* check to see if one of the VFs caused the MDD */
+ /* Check to see if one of the VFs caused an MDD event, and then
+ * increment counters and set print pending
+ */
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
- bool vf_mdd_detected = false;
-
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "RX driver issue detected on VF %d\n",
- i);
- }
-
- if (vf_mdd_detected) {
- vf->num_mdd_events++;
- if (vf->num_mdd_events &&
- vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
- i, vf->num_mdd_events);
+ vf->mdd_rx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
+ i);
+
+ /* Since the queue is disabled on VF Rx MDD events, the
+ * PF can be configured to reset the VF through ethtool
+ * private flag mdd-auto-reset-vf.
+ */
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ ice_reset_vf(&pf->vf[i], false);
}
}
+
+ ice_print_vfs_mdd_events(pf);
}
/**
@@ -1916,8 +1924,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
ret = ice_down(vsi);
if (ret) {
- NL_SET_ERR_MSG_MOD(extack,
- "Preparing device for XDP attach failed");
+ NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
return ret;
}
}
@@ -1926,13 +1933,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
vsi->num_xdp_txq = vsi->alloc_txq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Setting up XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Freeing XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
ice_vsi_assign_bpf_prog(vsi, prog);
}
@@ -1965,8 +1970,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_vsi *vsi = np->vsi;
if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack,
- "XDP can be loaded only on PF VSI");
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
return -EINVAL;
}
@@ -1993,6 +1997,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u32 val;
+ /* Disable anti-spoof detection interrupt to prevent spurious event
+ * interrupts during a function reset. Anti-spoof functionally is
+ * still supported.
+ */
+ val = rd32(hw, GL_MDCK_TX_TDPU);
+ val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
+ wr32(hw, GL_MDCK_TX_TDPU, val);
+
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
@@ -2461,16 +2473,19 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid)) {
+ /* VLAN 0 is added by default during load/reset */
+ if (!vid)
+ return 0;
+
+ /* Enable VLAN pruning when a VLAN other than 0 is added */
+ if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
- * needed to continue allowing all untagged packets since VLAN prune
- * list is applied to all packets by the switch
+ /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
+ * packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret) {
@@ -2500,6 +2515,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
+ /* don't allow removal of VLAN 0 */
+ if (!vid)
+ return 0;
+
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
@@ -2507,8 +2526,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (ret)
return ret;
- /* Disable VLAN pruning when VLAN 0 is removed */
- if (unlikely(!vid))
+ /* Disable pruning when VLAN 0 is the only VLAN rule */
+ if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
vsi->vlan_ena = false;
@@ -2945,7 +2964,6 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
}
break;
case ICE_ERR_BUF_TOO_SHORT:
- /* fall-through */
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
@@ -2977,7 +2995,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
default:
break;
}
- /* fall-through */
+ fallthrough;
default:
dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
@@ -3534,15 +3552,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
/* required last entry */
{ 0, }
};
@@ -3961,7 +3990,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
- err = ice_vsi_start_rx_rings(vsi);
+ err = ice_vsi_start_all_rx_rings(vsi);
if (err)
return err;
@@ -4340,7 +4369,7 @@ int ice_down(struct ice_vsi *vsi)
vsi->vsi_num, tx_err);
}
- rx_err = ice_vsi_stop_rx_rings(vsi);
+ rx_err = ice_vsi_stop_all_rx_rings(vsi);
if (rx_err)
netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
@@ -5027,6 +5056,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/**
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: Tx queue
*/
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 7525ac50742e..f6e25db22c23 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -289,17 +289,31 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+ switch (hw->device_id) {
/* the following devices do not have boot_cfg_tlv yet */
- if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
- hw->device_id == ICE_DEV_ID_E822C_QSFP ||
- hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
- hw->device_id == ICE_DEV_ID_E822C_SGMII ||
- hw->device_id == ICE_DEV_ID_E822C_SFP ||
- hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
- hw->device_id == ICE_DEV_ID_E822L_SFP ||
- hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
- hw->device_id == ICE_DEV_ID_E822L_SGMII)
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
return status;
+ default:
+ break;
+ }
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index d2db0d04e117..554f567476f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -121,9 +121,7 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
- /* fall through */
case ICE_AQ_LINK_SPEED_50GB:
- /* fall through */
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 431266081a80..4d96abfd05d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -760,7 +760,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_ETHERTYPE_MAC:
daddr = f_info->l_data.ethertype_mac.mac_addr;
- /* fall-through */
+ fallthrough;
case ICE_SW_LKUP_ETHERTYPE:
off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
@@ -771,7 +771,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_PROMISC_VLAN:
vlan_id = f_info->l_data.mac_vlan.vlan_id;
- /* fall-through */
+ fallthrough;
case ICE_SW_LKUP_PROMISC:
daddr = f_info->l_data.mac_vlan.mac_addr;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4de61dbedd36..f67e8362958c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -453,10 +453,10 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -1188,7 +1188,6 @@ ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_10GB:
- /* fall through */
default:
itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
avg_pkt_size + 640);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 75c70d432c72..6bedfa4676ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -91,6 +91,39 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+static bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_check_vf_init(pf, vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pf->hw.port_info->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -99,28 +132,16 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
- struct ice_link_status *ls;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- ls = &hw->port_info->phy.link_info;
+ struct ice_hw *hw = &vf->pf->hw;
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->num_qs_ena) {
+ if (ice_is_vf_link_up(vf))
+ ice_set_pfe_link(vf, &pfe,
+ hw->port_info->phy.link_info.link_speed, true);
+ else
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
- } else if (vf->link_forced) {
- u16 link_speed = vf->link_up ?
- ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
-
- ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
- } else {
- ice_set_pfe_link(vf, &pfe, ls->link_speed,
- ls->link_info & ICE_AQ_LINK_UP);
- }
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
@@ -150,6 +171,11 @@ static void ice_free_vf_res(struct ice_vf *vf)
}
last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
/* Disable interrupts so that VF starts in a known state */
for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
@@ -247,7 +273,6 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)
/* Clear Rx/Tx enabled queues flag */
bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
- vf->num_qs_ena = 0;
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
@@ -263,7 +288,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf)
vsi = pf->vsi[vf->lan_vsi_idx];
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
+ ice_vsi_stop_all_rx_rings(vsi);
ice_set_vf_state_qs_dis(vf);
}
@@ -407,43 +432,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
}
/**
- * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
- * @ctxt: the VSI ctxt to fill
- * @vid: the VLAN ID to set as a PVID
- */
-static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
-{
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-}
-
-/**
- * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
- * @ctxt: the VSI ctxt to fill
- */
-static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
-{
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-}
-
-/**
* ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
* @vsi: the VSI to update
- * @vid: the VLAN ID to set as a PVID
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
* @enable: true for enable PVID false for disable
*/
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
{
struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
@@ -453,20 +450,33 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
return -ENOMEM;
ctxt->info = vsi->info;
- if (enable)
- ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
- else
- ice_vsi_kill_pvid_fill_ctxt(ctxt);
+ info = &ctxt->info;
+ if (enable) {
+ info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ } else {
+ info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
+ ICE_AQ_VSI_VLAN_MODE_ALL;
+ info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
+ info->pvid = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info = ctxt->info;
+ vsi->info.vlan_flags = info->vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.pvid = info->pvid;
out:
kfree(ctxt);
return ret;
@@ -533,9 +543,20 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vf->lan_vsi_num = vsi->vsi_num;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id) {
- ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
- ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ if (vf->port_vlan_info) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
+ if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
+ dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
+ vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
+ } else {
+ /* set VLAN 0 filter by default when no port VLAN is
+ * enabled. If a port VLAN is enabled we don't want
+ * untagged broadcast/multicast traffic seen on the VF
+ * interface.
+ */
+ if (ice_vsi_add_vlan(vsi, 0))
+ dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
+ vf->vf_id);
}
eth_broadcast_addr(broadcast);
@@ -943,17 +964,9 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
- struct ice_vsi *vsi;
-
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
-
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (ice_vsi_add_vlan(vsi, 0))
- dev_warn(ice_pf_to_dev(pf),
- "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
- vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
@@ -985,13 +998,13 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
- } else if (vf->port_vlan_id) {
+ } else if (vf->port_vlan_info) {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_id);
+ vf->port_vlan_info);
else
status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_id);
+ vf->port_vlan_info);
} else {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
@@ -1167,7 +1180,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
*
* Returns true if the VF is reset, false otherwise.
*/
-static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -1231,7 +1244,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_id || vsi->num_vlan)
+ if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
@@ -1518,6 +1531,72 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
}
/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
+ *
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+ int vf_id;
+
+ ice_for_each_vf(pf, vf_id) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq)
+ return vf;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_vc_reset_vf(vf);
+}
+
+/**
* ice_vc_send_msg_to_vf - Send message to VF
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
@@ -1981,7 +2060,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -2039,6 +2118,22 @@ error_param:
}
/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_BASE_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_BASE_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
* ice_vc_ena_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2065,13 +2160,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (!vqs->rx_queues && !vqs->tx_queues) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
- vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2097,7 +2186,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
if (test_bit(vf_q_id, vf->rxq_ena))
continue;
- if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2105,7 +2194,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
}
set_bit(vf_q_id, vf->rxq_ena);
- vf->num_qs_ena++;
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2121,7 +2209,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
set_bit(vf_q_id, vf->txq_ena);
- vf->num_qs_ena++;
}
/* Set flag to indicate that queues are enabled */
@@ -2163,7 +2250,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (!vqs->rx_queues && !vqs->tx_queues) {
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2208,13 +2295,22 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
- vf->num_qs_ena--;
}
}
- if (vqs->rx_queues) {
- q_map = vqs->rx_queues;
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
+ } else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2225,7 +2321,8 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (!test_bit(vf_q_id, vf->rxq_ena))
continue;
- if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2234,12 +2331,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->rxq_ena);
- vf->num_qs_ena--;
}
}
/* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
@@ -2733,19 +2829,20 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
- u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
+ u16 vlanprio;
int ret;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- if (vlan_id > ICE_MAX_VLANID || qos > 7) {
- dev_err(dev, "Invalid VF Parameters\n");
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
return -EINVAL;
}
@@ -2761,40 +2858,52 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (ret)
return ret;
- if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
+
+ if (vf->port_vlan_info == vlanprio) {
/* duplicate request, so just return success */
dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
return 0;
}
- /* If PVID, then remove all filters on the old VLAN */
- if (vsi->info.pvid)
- ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
-
if (vlan_id || qos) {
+ /* remove VLAN 0 filter set by default when transitioning from
+ * no port VLAN to a port VLAN. No change to old port VLAN on
+ * failure.
+ */
+ ret = ice_vsi_kill_vlan(vsi, 0);
+ if (ret)
+ return ret;
ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
return ret;
} else {
- ice_vsi_manage_pvid(vsi, 0, false);
- vsi->info.pvid = 0;
+ /* add VLAN 0 filter back when transitioning from port VLAN to
+ * no port VLAN. No change to old port VLAN on failure.
+ */
+ ret = ice_vsi_add_vlan(vsi, 0);
+ if (ret)
+ return ret;
+ ret = ice_vsi_manage_pvid(vsi, 0, false);
+ if (ret)
+ return ret;
}
if (vlan_id) {
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
- /* add new VLAN filter for each MAC */
+ /* add VLAN filter for the port VLAN */
ret = ice_vsi_add_vlan(vsi, vlan_id);
if (ret)
return ret;
}
+ /* remove old port VLAN filter with valid VLAN ID or QoS fields */
+ if (vf->port_vlan_info)
+ ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
- /* The Port VLAN needs to be saved across resets the same as the
- * default LAN MAC address.
- */
- vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ /* keep port VLAN information persistent on resets */
+ vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
return 0;
}
@@ -2849,7 +2958,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
for (i = 0; i < vfl->num_elements; i++) {
- if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ if (vfl->vlan_id[i] >= VLAN_N_VID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "invalid VF VLAN id %d\n",
vfl->vlan_id[i]);
@@ -2911,9 +3020,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- vsi->num_vlan++;
- /* Enable VLAN pruning when VLAN is added */
- if (!vlan_promisc) {
+ /* Enable VLAN pruning when non-zero VLAN is added */
+ if (!vlan_promisc && vid &&
+ !ice_vsi_is_vlan_pruning_ena(vsi)) {
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2921,7 +3030,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
vid, status);
goto error_param;
}
- } else {
+ } else if (vlan_promisc) {
/* Enable Ucast/Mcast VLAN promiscuous mode */
promisc_m = ICE_PROMISC_VLAN_TX |
ICE_PROMISC_VLAN_RX;
@@ -2965,9 +3074,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- vsi->num_vlan--;
- /* Disable VLAN pruning when the last VLAN is removed */
- if (!vsi->num_vlan)
+ /* Disable VLAN pruning when only VLAN 0 is left */
+ if (vsi->num_vlan == 1 &&
+ ice_vsi_is_vlan_pruning_ena(vsi))
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
@@ -3246,14 +3355,12 @@ int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
struct ice_vf *vf;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_check_vf_init(pf, vf))
return -EBUSY;
@@ -3262,9 +3369,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
/* VF configuration for VLAN and applicable QoS */
- ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
- ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
- ICE_VLAN_PRIORITY_S;
+ ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
+ ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
ivi->trusted = vf->trusted;
ivi->spoofchk = vf->spoofchk;
@@ -3442,3 +3548,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
return 0;
}
+
+/**
+ * ice_print_vfs_mdd_event - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
+ return;
+
+ pf->last_printed_mdd_jiffies = jiffies;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, hw->pf_id, i,
+ vf->dflt_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, i,
+ vf->dflt_lan_addr.addr);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index ac67982751df..36dad0eba3db 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -5,11 +5,6 @@
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
-#define ICE_MAX_VLANID 4095
-#define ICE_VLAN_PRIORITY_S 12
-#define ICE_VLAN_M 0xFFF
-#define ICE_PRIORITY_M 0x7000
-
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
#define ICE_MAX_MACADDR_PER_VF 12
@@ -61,6 +56,13 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -75,7 +77,7 @@ struct ice_vf {
struct virtchnl_ether_addr dflt_lan_addr;
DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
- u16 port_vlan_id;
+ u16 port_vlan_info; /* Port VLAN ID and QoS */
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
u8 spoofchk:1;
@@ -89,14 +91,14 @@ struct ice_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
- u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
- u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
};
#ifdef CONFIG_PCI_IOV
@@ -111,6 +113,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -128,6 +131,9 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -135,6 +141,8 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id,
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
+#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
+#define ice_print_vfs_mdd_events(pf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -143,6 +151,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf,
return true;
}
+static inline bool
+ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
+{
+ return true;
+}
+
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 4d3407bbd4c4..8279db15e870 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -183,7 +183,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
ice_qvec_cfg_msix(vsi, q_vector);
- err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
goto free_buf;
@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (if_running) {
ret = ice_qp_dis(vsi, qid);
if (ret) {
- netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_umem_if_up;
}
}
@@ -471,11 +471,11 @@ xsk_umem_if_up:
if (!ret && umem_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
- netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
umem_present ? "en" : "dis", umem_failure);
return umem_failure;
}
@@ -609,7 +609,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
*/
static bool
ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
- bool alloc(struct ice_ring *, struct ice_rx_buf *))
+ bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -816,10 +816,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -841,8 +841,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
+ bool failure = false;
struct xdp_buff xdp;
- bool failure = 0;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+ if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+
+ return (int)total_rx_packets;
+ }
+
return failure ? budget : (int)total_rx_packets;
}
@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
+ if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ else
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
+ }
+
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 49fb1e1965cd..e3c164c12e10 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o igc_ptp.o
+igc_ethtool.o igc_ptp.o igc_dump.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 52066bdbbad0..0014828eec46 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -42,6 +42,10 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
+/* igc_dump declarations */
+void igc_rings_dump(struct igc_adapter *adapter);
+void igc_regs_dump(struct igc_adapter *adapter);
+
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -53,10 +57,13 @@ extern char igc_driver_version[];
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
+
+/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_PTP BIT(8)
+#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -108,7 +115,7 @@ extern char igc_driver_version[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
-/* FIXME: These values were estimated using the ones that i210 has as
+/* FIXME: These values were estimated using the ones that i225 has as
* basis, they seem to provide good numbers with ptp4l/phc2sys, but we
* need to confirm them.
*/
@@ -552,6 +559,7 @@ int igc_erase_filter(struct igc_adapter *adapter,
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
+void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 58efa7a02c68..4ddccccf42cc 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -16,7 +16,10 @@
/* Wake Up Filter Control */
#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -259,6 +262,9 @@
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
+/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
+
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
new file mode 100644
index 000000000000..657ab50ae296
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc.h"
+
+struct igc_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igc_reg_info igc_reg_info_tbl[] = {
+ /* General Registers */
+ {IGC_CTRL, "CTRL"},
+ {IGC_STATUS, "STATUS"},
+ {IGC_CTRL_EXT, "CTRL_EXT"},
+ {IGC_MDIC, "MDIC"},
+
+ /* Interrupt Registers */
+ {IGC_ICR, "ICR"},
+
+ /* RX Registers */
+ {IGC_RCTL, "RCTL"},
+ {IGC_RDLEN(0), "RDLEN"},
+ {IGC_RDH(0), "RDH"},
+ {IGC_RDT(0), "RDT"},
+ {IGC_RXDCTL(0), "RXDCTL"},
+ {IGC_RDBAL(0), "RDBAL"},
+ {IGC_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IGC_TCTL, "TCTL"},
+ {IGC_TDBAL(0), "TDBAL"},
+ {IGC_TDBAH(0), "TDBAH"},
+ {IGC_TDLEN(0), "TDLEN"},
+ {IGC_TDH(0), "TDH"},
+ {IGC_TDT(0), "TDT"},
+ {IGC_TXDCTL(0), "TXDCTL"},
+ {IGC_TDFH, "TDFH"},
+ {IGC_TDFT, "TDFT"},
+ {IGC_TDFHS, "TDFHS"},
+ {IGC_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/* igc_regdump - register printout routine */
+static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case IGC_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDLEN(n));
+ break;
+ case IGC_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDH(n));
+ break;
+ case IGC_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDT(n));
+ break;
+ case IGC_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RXDCTL(n));
+ break;
+ case IGC_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAL(n));
+ break;
+ case IGC_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAH(n));
+ break;
+ case IGC_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAL(n));
+ break;
+ case IGC_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDBAH(n));
+ break;
+ case IGC_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDLEN(n));
+ break;
+ case IGC_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDH(n));
+ break;
+ case IGC_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDT(n));
+ break;
+ case IGC_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TXDCTL(n));
+ break;
+ default:
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
+}
+
+/* igc_rings_dump - Tx-rings and Rx-rings */
+void igc_rings_dump(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ union igc_adv_tx_desc *tx_desc;
+ union igc_adv_rx_desc *rx_desc;
+ struct igc_ring *tx_ring;
+ struct igc_ring *rx_ring;
+ u32 staterr;
+ u16 i, n;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ struct igc_tx_buffer *buffer_info;
+
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
+ struct igc_tx_buffer *buffer_info;
+
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
+
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ dma_unmap_len(buffer_info, len),
+ true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+ struct igc_rx_buffer *buffer_info;
+
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IGC_RX_DESC(rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ if (staterr & IGC_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ next_desc);
+ } else {
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ next_desc);
+
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->dma && buffer_info->page) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ page_address
+ (buffer_info->page) +
+ buffer_info->page_offset,
+ igc_rx_bufsz(rx_ring),
+ true);
+ }
+ }
+ }
+ }
+
+exit:
+ return;
+}
+
+/* igc_regs_dump - registers dump */
+void igc_regs_dump(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_reg_info *reginfo;
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ pr_info(" Register Name Value\n");
+ for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igc_regdump(hw, reginfo);
+ }
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ee07011e13e9..69f50b8e2af3 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -308,6 +308,65 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
}
+static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
+ return;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
+ /* apply any specific unsupported masks here */
+ switch (adapter->hw.device_id) {
+ default:
+ break;
+ }
+
+ if (adapter->wol & IGC_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & IGC_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & IGC_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & IGC_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & IGC_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
+ return -EOPNOTSUPP;
+
+ if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IGC_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IGC_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IGC_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= IGC_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= IGC_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
static u32 igc_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -1859,6 +1918,8 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_drvinfo = igc_get_drvinfo,
.get_regs_len = igc_get_regs_len,
.get_regs = igc_get_regs,
+ .get_wol = igc_get_wol,
+ .set_wol = igc_set_wol,
.get_msglevel = igc_get_msglevel,
.set_msglevel = igc_set_msglevel,
.nway_reset = igc_nway_reset,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index d9d5425fe8d9..69fa1ce1f927 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3546,6 +3546,8 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
+ igc_rings_dump(adapter);
+ igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
}
@@ -4029,6 +4031,9 @@ static void igc_watchdog_task(struct work_struct *work)
}
}
if (link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
@@ -4114,6 +4119,8 @@ no_wait:
return;
}
}
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
/* also check for alternate media here */
} else if (!netif_carrier_ok(netdev) &&
@@ -4337,6 +4344,7 @@ request_done:
static int __igc_open(struct net_device *netdev, bool resuming)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
int err = 0;
int i = 0;
@@ -4348,6 +4356,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return -EBUSY;
}
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -4386,6 +4397,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
rd32(IGC_ICR);
igc_irq_enable(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
@@ -4404,6 +4418,8 @@ err_setup_rx:
igc_free_all_tx_resources(adapter);
err_setup_tx:
igc_reset(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
return err;
}
@@ -4428,9 +4444,13 @@ static int igc_open(struct net_device *netdev)
static int __igc_close(struct net_device *netdev, bool suspending)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+
igc_down(adapter);
igc_release_hw_control(adapter);
@@ -4440,6 +4460,9 @@ static int __igc_close(struct net_device *netdev, bool suspending)
igc_free_all_tx_resources(adapter);
igc_free_all_rx_resources(adapter);
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4766,6 +4789,16 @@ static int igc_probe(struct pci_dev *pdev,
hw->fc.requested_mode = igc_fc_default;
hw->fc.current_mode = igc_fc_default;
+ /* By default, support wake on port A */
+ adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
+
+ /* initialize the wol settings based on the eeprom settings */
+ if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
+ adapter->wol |= IGC_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev,
+ adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -4792,6 +4825,10 @@ static int igc_probe(struct pci_dev *pdev,
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
err_register:
@@ -4826,6 +4863,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_get_noresume(&pdev->dev);
+
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
@@ -4870,6 +4909,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igc_close(netdev, true);
+ igc_ptp_suspend(adapter);
+
igc_clear_interrupt_scheme(adapter);
rtnl_unlock();
@@ -5045,6 +5086,108 @@ static void igc_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * igc_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current PCI connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ **/
+static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igc_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igc_io_slot_reset - called after the PCI bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igc_resume routine.
+ **/
+static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Could not re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ /* In case of PCI error, adapter loses its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
+ igc_reset(adapter);
+ wr32(IGC_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return result;
+}
+
+/**
+ * igc_io_resume - called when traffic can start to flow again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igc_resume routine.
+ */
+static void igc_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ if (igc_open(netdev)) {
+ dev_err(&pdev->dev, "igc_open failed after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers igc_err_handler = {
+ .error_detected = igc_io_error_detected,
+ .slot_reset = igc_io_slot_reset,
+ .resume = igc_io_resume,
+};
+
#ifdef CONFIG_PM
static const struct dev_pm_ops igc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
@@ -5062,6 +5205,7 @@ static struct pci_driver igc_driver = {
.driver.pm = &igc_pm_ops,
#endif
.shutdown = igc_shutdown,
+ .err_handler = &igc_err_handler,
};
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 693506587198..f99c514ad0f4 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -509,7 +509,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
*/
-void igc_ptp_tx_work(struct work_struct *work)
+static void igc_ptp_tx_work(struct work_struct *work)
{
struct igc_adapter *adapter = container_of(work, struct igc_adapter,
ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index c9029b549b90..d4af53a80f11 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -17,6 +17,11 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 2e4975572e9f..de3c7ce9353c 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2077,12 +2077,7 @@ jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
IPPROTO_TCP,
0);
} else {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
- &ip6h->daddr, 0,
- IPPROTO_TCP,
- 0);
+ tcp_v6_gso_csum_prep(skb);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 98017e7d5dd0..1c391f63a26f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -341,6 +341,11 @@ enum {
ETHTOOL_STAT_EEE_WAKEUP,
ETHTOOL_STAT_SKB_ALLOC_ERR,
ETHTOOL_STAT_REFILL_ERR,
+ ETHTOOL_XDP_REDIRECT,
+ ETHTOOL_XDP_PASS,
+ ETHTOOL_XDP_DROP,
+ ETHTOOL_XDP_XMIT,
+ ETHTOOL_XDP_TX,
ETHTOOL_MAX_STATS,
};
@@ -354,10 +359,10 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
-#define MVNETA_XDP_PASS BIT(0)
-#define MVNETA_XDP_DROPPED BIT(1)
-#define MVNETA_XDP_TX BIT(2)
-#define MVNETA_XDP_REDIR BIT(3)
+#define MVNETA_XDP_PASS 0
+#define MVNETA_XDP_DROPPED BIT(0)
+#define MVNETA_XDP_TX BIT(1)
+#define MVNETA_XDP_REDIR BIT(2)
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
@@ -395,16 +400,38 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
+ { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
+ { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
+ { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
+ { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
+ { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
};
-struct mvneta_pcpu_stats {
- struct u64_stats_sync syncp;
+struct mvneta_stats {
u64 rx_packets;
u64 rx_bytes;
- u64 rx_dropped;
- u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
+ /* xdp */
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_tx;
+};
+
+struct mvneta_ethtool_stats {
+ struct mvneta_stats ps;
+ u64 skb_alloc_error;
+ u64 refill_error;
+};
+
+struct mvneta_pcpu_stats {
+ struct u64_stats_sync syncp;
+
+ struct mvneta_ethtool_stats es;
+ u64 rx_dropped;
+ u64 rx_errors;
};
struct mvneta_pcpu_port {
@@ -660,10 +687,6 @@ struct mvneta_rx_queue {
/* pointer to uncomplete skb buffer */
struct sk_buff *skb;
int left_size;
-
- /* error counters */
- u32 skb_alloc_err;
- u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -748,12 +771,12 @@ mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
+ rx_packets = cpu_stats->es.ps.rx_packets;
+ rx_bytes = cpu_stats->es.ps.rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
+ tx_packets = cpu_stats->es.ps.tx_packets;
+ tx_bytes = cpu_stats->es.ps.tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
@@ -1933,7 +1956,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- page_pool_put_page(rxq->page_pool, data, false);
+ page_pool_put_full_page(rxq->page_pool, data, false);
}
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -1942,19 +1965,18 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
}
static void
-mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
- u32 len, bool tx)
+mvneta_update_stats(struct mvneta_port *pp,
+ struct mvneta_stats *ps)
{
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
- if (tx) {
- stats->tx_packets += pkts;
- stats->tx_bytes += len;
- } else {
- stats->rx_packets += pkts;
- stats->rx_bytes += len;
- }
+ stats->es.ps.rx_packets += ps->rx_packets;
+ stats->es.ps.rx_bytes += ps->rx_bytes;
+ /* xdp */
+ stats->es.ps.xdp_redirect += ps->xdp_redirect;
+ stats->es.ps.xdp_pass += ps->xdp_pass;
+ stats->es.ps.xdp_drop += ps->xdp_drop;
u64_stats_update_end(&stats->syncp);
}
@@ -1969,9 +1991,15 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
rx_desc = rxq->descs + curr_desc;
if (!(rx_desc->buf_phys_addr)) {
if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ struct mvneta_pcpu_stats *stats;
+
pr_err("Can't refill queue %d. Done %d from %d\n",
rxq->id, i, rxq->refill_num);
- rxq->refill_err++;
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
break;
}
}
@@ -2021,7 +2049,6 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
tx_desc->buf_phys_addr = dma_addr;
tx_desc->data_size = xdpf->len;
- mvneta_update_stats(pp, 1, xdpf->len, true);
mvneta_txq_inc_put(txq);
txq->pending++;
txq->count++;
@@ -2048,8 +2075,17 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
__netif_tx_lock(nq, cpu);
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
- if (ret == MVNETA_XDP_TX)
+ if (ret == MVNETA_XDP_TX) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_packets++;
+ stats->es.ps.xdp_tx++;
+ u64_stats_update_end(&stats->syncp);
+
mvneta_txq_pend_desc_add(pp, txq, 0);
+ }
__netif_tx_unlock(nq);
return ret;
@@ -2060,10 +2096,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
struct xdp_frame **frames, u32 flags)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ int i, nxmit_byte = 0, nxmit = num_frame;
int cpu = smp_processor_id();
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
- int i, drops = 0;
u32 ret;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -2075,9 +2112,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
- if (ret != MVNETA_XDP_TX) {
+ if (ret == MVNETA_XDP_TX) {
+ nxmit_byte += frames[i]->len;
+ } else {
xdp_return_frame_rx_napi(frames[i]);
- drops++;
+ nxmit--;
}
}
@@ -2085,12 +2124,19 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
mvneta_txq_pend_desc_add(pp, txq, 0);
__netif_tx_unlock(nq);
- return num_frame - drops;
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += nxmit_byte;
+ stats->es.ps.tx_packets += nxmit;
+ stats->es.ps.xdp_xmit += nxmit;
+ u64_stats_update_end(&stats->syncp);
+
+ return nxmit;
}
static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+ struct bpf_prog *prog, struct xdp_buff *xdp,
+ struct mvneta_stats *stats)
{
unsigned int len;
u32 ret, act;
@@ -2100,28 +2146,29 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
switch (act) {
case XDP_PASS:
- ret = MVNETA_XDP_PASS;
- break;
+ stats->xdp_pass++;
+ return MVNETA_XDP_PASS;
case XDP_REDIRECT: {
int err;
err = xdp_do_redirect(pp->dev, xdp, prog);
if (err) {
ret = MVNETA_XDP_DROPPED;
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
} else {
ret = MVNETA_XDP_REDIR;
+ stats->xdp_redirect++;
}
break;
}
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2130,13 +2177,16 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len, true);
ret = MVNETA_XDP_DROPPED;
+ stats->xdp_drop++;
break;
}
+ stats->rx_bytes += xdp->data_end - xdp->data;
+ stats->rx_packets++;
+
return ret;
}
@@ -2146,12 +2196,14 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog,
- struct page *page, u32 *xdp_ret)
+ struct page *page,
+ struct mvneta_stats *stats)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
+ int ret = 0;
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2175,17 +2227,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
xdp_set_data_meta_invalid(xdp);
if (xdp_prog) {
- u32 ret;
-
- ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
- if (ret != MVNETA_XDP_PASS) {
- mvneta_update_stats(pp, 1,
- xdp->data_end - xdp->data,
- false);
- rx_desc->buf_phys_addr = 0;
- *xdp_ret |= ret;
- return ret;
- }
+ ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
+ if (ret)
+ goto out;
}
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
@@ -2193,9 +2237,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
- rxq->skb_alloc_err++;
u64_stats_update_begin(&stats->syncp);
+ stats->es.skb_alloc_error++;
stats->rx_dropped++;
u64_stats_update_end(&stats->syncp);
@@ -2209,9 +2253,11 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
rxq->left_size = rx_desc->data_size - len;
+
+out:
rx_desc->buf_phys_addr = 0;
- return 0;
+ return ret;
}
static void
@@ -2252,12 +2298,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
+ int rx_proc = 0, rx_todo, refill;
struct net_device *dev = pp->dev;
+ struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
struct xdp_buff xdp_buf;
- int rx_todo, refill;
- u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2290,7 +2335,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- xdp_prog, page, &xdp_ret);
+ xdp_prog, page, &ps);
if (err)
continue;
} else {
@@ -2314,8 +2359,9 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rxq->skb = NULL;
continue;
}
- rcvd_pkts++;
- rcvd_bytes += rxq->skb->len;
+
+ ps.rx_bytes += rxq->skb->len;
+ ps.rx_packets++;
/* Linux processing */
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
@@ -2327,11 +2373,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
rcu_read_unlock();
- if (xdp_ret & MVNETA_XDP_REDIR)
+ if (ps.xdp_redirect)
xdp_do_flush_map();
- if (rcvd_pkts)
- mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+ if (ps.rx_packets)
+ mvneta_update_stats(pp, &ps);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2339,7 +2385,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rcvd_pkts;
+ return ps.rx_packets;
}
/* Main rx processing when using hardware buffer management */
@@ -2423,8 +2469,15 @@ err_drop_frame:
/* Refill processing */
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
+ struct mvneta_pcpu_stats *stats;
+
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->refill_err++;
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
+
goto err_drop_frame_ret_pool;
}
@@ -2454,8 +2507,14 @@ err_drop_frame:
napi_gro_receive(napi, skb);
}
- if (rcvd_pkts)
- mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.rx_packets += rcvd_pkts;
+ stats->es.ps.rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2711,6 +2770,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_tx_sent_queue(nq, len);
@@ -2724,7 +2784,10 @@ out:
else
txq->pending += frags;
- mvneta_update_stats(pp, 1, len, true);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += len;
+ stats->es.ps.tx_packets++;
+ u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -4420,45 +4483,100 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
}
}
+static void
+mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
+ struct mvneta_ethtool_stats *es)
+{
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ u64 skb_alloc_error;
+ u64 refill_error;
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_tx;
+
+ stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ skb_alloc_error = stats->es.skb_alloc_error;
+ refill_error = stats->es.refill_error;
+ xdp_redirect = stats->es.ps.xdp_redirect;
+ xdp_pass = stats->es.ps.xdp_pass;
+ xdp_drop = stats->es.ps.xdp_drop;
+ xdp_xmit = stats->es.ps.xdp_xmit;
+ xdp_tx = stats->es.ps.xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ es->skb_alloc_error += skb_alloc_error;
+ es->refill_error += refill_error;
+ es->ps.xdp_redirect += xdp_redirect;
+ es->ps.xdp_pass += xdp_pass;
+ es->ps.xdp_drop += xdp_drop;
+ es->ps.xdp_xmit += xdp_xmit;
+ es->ps.xdp_tx += xdp_tx;
+ }
+}
+
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
+ struct mvneta_ethtool_stats stats = {};
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
u32 high, low;
u64 val;
int i;
+ mvneta_ethtool_update_pcpu_stats(pp, &stats);
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
- val = 0;
-
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
+ pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
val = (u64)high << 32 | low;
+ pp->ethtool_stats[i] += val;
break;
case T_SW:
switch (s->offset) {
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
+ pp->ethtool_stats[i] += val;
break;
case ETHTOOL_STAT_SKB_ALLOC_ERR:
- val = pp->rxqs[0].skb_alloc_err;
+ pp->ethtool_stats[i] = stats.skb_alloc_error;
break;
case ETHTOOL_STAT_REFILL_ERR:
- val = pp->rxqs[0].refill_err;
+ pp->ethtool_stats[i] = stats.refill_error;
+ break;
+ case ETHTOOL_XDP_REDIRECT:
+ pp->ethtool_stats[i] = stats.ps.xdp_redirect;
+ break;
+ case ETHTOOL_XDP_PASS:
+ pp->ethtool_stats[i] = stats.ps.xdp_pass;
+ break;
+ case ETHTOOL_XDP_DROP:
+ pp->ethtool_stats[i] = stats.ps.xdp_drop;
+ break;
+ case ETHTOOL_XDP_TX:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx;
+ break;
+ case ETHTOOL_XDP_XMIT:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit;
break;
}
break;
}
-
- pp->ethtool_stats[i] += val;
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 5ca788691911..9f5b7229574e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -113,7 +113,6 @@ int cgx_get_cgxcnt_max(void)
return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
@@ -124,7 +123,6 @@ int cgx_get_lmac_cnt(void *cgxd)
return cgx->lmac_count;
}
-EXPORT_SYMBOL(cgx_get_lmac_cnt);
void *cgx_get_pdata(int cgx_id)
{
@@ -136,7 +134,6 @@ void *cgx_get_pdata(int cgx_id)
}
return NULL;
}
-EXPORT_SYMBOL(cgx_get_pdata);
int cgx_get_cgxid(void *cgxd)
{
@@ -164,7 +161,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
*linfo = lmac->link_info;
return 0;
}
-EXPORT_SYMBOL(cgx_get_link_info);
static u64 mac2u64 (u8 *mac_addr)
{
@@ -195,7 +191,6 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_addr_set);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
@@ -205,7 +200,6 @@ u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
-EXPORT_SYMBOL(cgx_lmac_addr_get);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
@@ -217,7 +211,6 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-EXPORT_SYMBOL(cgx_set_pkind);
static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
{
@@ -255,7 +248,6 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
}
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_internal_loopback);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
@@ -289,7 +281,6 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
}
}
-EXPORT_SYMBOL(cgx_lmac_promisc_config);
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
@@ -318,7 +309,6 @@ void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
}
}
-EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
@@ -329,7 +319,6 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
-EXPORT_SYMBOL(cgx_get_rx_stats);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
@@ -340,7 +329,6 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
-EXPORT_SYMBOL(cgx_get_tx_stats);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
@@ -358,7 +346,6 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
{
@@ -379,7 +366,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return !!(last & DATA_PKT_TX_EN);
}
-EXPORT_SYMBOL(cgx_lmac_tx_enable);
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
@@ -610,7 +596,6 @@ int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
return 0;
}
-EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
@@ -676,7 +661,6 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_evh_register);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
{
@@ -695,7 +679,6 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_evh_unregister);
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
@@ -769,7 +752,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_linkup_start);
static int cgx_lmac_init(struct cgx *cgx)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 51c206f4fe6f..7afb7caad873 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -432,7 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
-int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 11e5921c55b9..b8e8f337316f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -350,6 +350,18 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
+/* Most of the CGX configuration is restricted to the mapped PF only,
+ * VF's of mapped PF and other PFs are not allowed. This fn() checks
+ * whether a PFFUNC is permitted to do the config or not.
+ */
+static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return false;
+ return true;
+}
+
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
u8 cgx_id, lmac_id;
@@ -373,11 +385,8 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -409,8 +418,7 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
u8 cgx_idx, lmac;
void *cgxd;
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
@@ -477,12 +485,8 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -493,16 +497,11 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -515,11 +514,8 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -571,11 +567,8 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index eb5e542424e7..a29e5c7c8cfc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -90,6 +90,26 @@ int rvu_get_nixlf_count(struct rvu *rvu)
return block->lf.max;
}
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (nix_blkaddr)
+ *nix_blkaddr = blkaddr;
+
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -1667,13 +1687,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
if (err)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -1767,17 +1783,12 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
if (req->cfg_type) {
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
@@ -2119,18 +2130,13 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int i, nixlf, blkaddr;
+ int i, nixlf, blkaddr, err;
u64 stats;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
/* Get stats count supported by HW */
stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
@@ -2418,18 +2424,14 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct nix_rss_flowkey_cfg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int alg_idx, nixlf, blkaddr;
struct nix_hw *nix_hw;
+ int err;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2522,19 +2524,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
- int blkaddr, nixlf;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
@@ -2567,19 +2565,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
bool allmulti = false, disable_promisc = false;
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
- int blkaddr, nixlf;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
if (req->mode & NIX_RX_MODE_PROMISC)
allmulti = false;
@@ -2794,22 +2788,12 @@ free_entry:
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
- struct rvu_block *block;
- struct rvu_pfvf *pfvf;
- int nixlf, blkaddr;
+ int nixlf, blkaddr, err;
u64 cfg;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- block = &hw->block[blkaddr];
- nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
/* Set the interface configuration */
@@ -3114,30 +3098,13 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
-int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
-{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (*nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- return 0;
-}
-
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
@@ -3152,7 +3119,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
u16 pcifunc = req->hdr.pcifunc;
int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d3e06cec8317..e0bb8e12356e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o
#
# Netdev basic
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
new file mode 100644
index 000000000000..17ab7efe693d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "rsc_dump.h"
+#include "lib/mlx5.h"
+
+#define MLX5_SGMT_TYPE(SGMT) MLX5_SGMT_TYPE_##SGMT
+#define MLX5_SGMT_STR_ASSING(SGMT)[MLX5_SGMT_TYPE(SGMT)] = #SGMT
+static const char *const mlx5_rsc_sgmt_name[] = {
+ MLX5_SGMT_STR_ASSING(HW_CQPC),
+ MLX5_SGMT_STR_ASSING(HW_SQPC),
+ MLX5_SGMT_STR_ASSING(HW_RQPC),
+ MLX5_SGMT_STR_ASSING(FULL_SRQC),
+ MLX5_SGMT_STR_ASSING(FULL_CQC),
+ MLX5_SGMT_STR_ASSING(FULL_EQC),
+ MLX5_SGMT_STR_ASSING(FULL_QPC),
+ MLX5_SGMT_STR_ASSING(SND_BUFF),
+ MLX5_SGMT_STR_ASSING(RCV_BUFF),
+ MLX5_SGMT_STR_ASSING(SRQ_BUFF),
+ MLX5_SGMT_STR_ASSING(CQ_BUFF),
+ MLX5_SGMT_STR_ASSING(EQ_BUFF),
+ MLX5_SGMT_STR_ASSING(SX_SLICE),
+ MLX5_SGMT_STR_ASSING(SX_SLICE_ALL),
+ MLX5_SGMT_STR_ASSING(RDB),
+ MLX5_SGMT_STR_ASSING(RX_SLICE_ALL),
+};
+
+struct mlx5_rsc_dump {
+ u32 pdn;
+ struct mlx5_core_mkey mkey;
+ u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
+};
+
+struct mlx5_rsc_dump_cmd {
+ u64 mem_size;
+ u8 cmd[MLX5_ST_SZ_BYTES(resource_dump)];
+};
+
+static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5_rsc_sgmt_name); i++)
+ if (!strcmp(name, mlx5_rsc_sgmt_name[i]))
+ return i;
+
+ return -EINVAL;
+}
+
+static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+{
+ void *data = page_address(page);
+ enum mlx5_sgmt_type sgmt_idx;
+ int num_of_items;
+ char *sgmt_name;
+ void *member;
+ void *menu;
+ int i;
+
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+
+ for (i = 0; i < num_of_items; i++) {
+ member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
+ sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
+ if (sgmt_idx == -EINVAL)
+ continue;
+ rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
+ member, segment_type);
+ }
+}
+
+static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page)
+{
+ struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
+ struct device *ddev = &dev->pdev->dev;
+ u32 out_seq_num;
+ u32 in_seq_num;
+ dma_addr_t dma;
+ int err;
+
+ dma = dma_map_page(ddev, page, 0, cmd->mem_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ddev, dma)))
+ return -ENOMEM;
+
+ in_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
+ MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey.key);
+ MLX5_SET64(resource_dump, cmd->cmd, address, dma);
+
+ err = mlx5_core_access_reg(dev, cmd->cmd, sizeof(cmd->cmd), cmd->cmd,
+ sizeof(cmd->cmd), MLX5_REG_RESOURCE_DUMP, 0, 1);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to access err %d\n", err);
+ goto out;
+ }
+ out_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
+ if (out_seq_num && (in_seq_num + 1 != out_seq_num))
+ err = -EIO;
+out:
+ dma_unmap_page(ddev, dma, cmd->mem_size, DMA_FROM_DEVICE);
+ return err;
+}
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key)
+{
+ struct mlx5_rsc_dump_cmd *cmd;
+ int sgmt_type;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ sgmt_type = dev->rsc_dump->fw_segment_type[key->rsc];
+ if (!sgmt_type && key->rsc != MLX5_SGMT_TYPE_MENU)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ mlx5_core_err(dev, "Resource dump: Failed to allocate command\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ MLX5_SET(resource_dump, cmd->cmd, segment_type, sgmt_type);
+ MLX5_SET(resource_dump, cmd->cmd, index1, key->index1);
+ MLX5_SET(resource_dump, cmd->cmd, index2, key->index2);
+ MLX5_SET(resource_dump, cmd->cmd, num_of_obj1, key->num_of_obj1);
+ MLX5_SET(resource_dump, cmd->cmd, num_of_obj2, key->num_of_obj2);
+ MLX5_SET(resource_dump, cmd->cmd, size, key->size);
+ cmd->mem_size = key->size;
+ return cmd;
+}
+
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size)
+{
+ bool more_dump;
+ int err;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return -EOPNOTSUPP;
+
+ err = mlx5_rsc_dump_trigger(dev, cmd, page);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to trigger dump, %d\n", err);
+ return err;
+ }
+ *size = MLX5_GET(resource_dump, cmd->cmd, size);
+ more_dump = MLX5_GET(resource_dump, cmd->cmd, more_dump);
+
+ return more_dump;
+}
+
+#define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff
+static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump_cmd *cmd = NULL;
+ struct mlx5_rsc_key key = {};
+ struct page *page;
+ int size;
+ int err;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ key.rsc = MLX5_SGMT_TYPE_MENU;
+ key.size = PAGE_SIZE;
+ cmd = mlx5_rsc_dump_cmd_create(dev, &key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+ MLX5_SET(resource_dump, cmd->cmd, segment_type, MLX5_RSC_DUMP_MENU_SEGMENT);
+
+ do {
+ err = mlx5_rsc_dump_next(dev, cmd, page, &size);
+ if (err < 0)
+ goto destroy_cmd;
+
+ mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+
+ } while (err > 0);
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+free_page:
+ __free_page(page);
+
+ return err;
+}
+
+static int mlx5_rsc_dump_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump *rsc_dump;
+
+ if (!MLX5_CAP_DEBUG(dev, resource_dump)) {
+ mlx5_core_dbg(dev, "Resource dump: capability not present\n");
+ return NULL;
+ }
+ rsc_dump = kzalloc(sizeof(*rsc_dump), GFP_KERNEL);
+ if (!rsc_dump)
+ return ERR_PTR(-ENOMEM);
+
+ return rsc_dump;
+}
+
+void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev)
+{
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return;
+ kfree(dev->rsc_dump);
+}
+
+int mlx5_rsc_dump_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
+ int err;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return 0;
+
+ err = mlx5_core_alloc_pd(dev, &rsc_dump->pdn);
+ if (err) {
+ mlx5_core_warn(dev, "Resource dump: Failed to allocate PD %d\n", err);
+ return err;
+ }
+ err = mlx5_rsc_dump_create_mkey(dev, rsc_dump->pdn, &rsc_dump->mkey);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to create mkey, %d\n", err);
+ goto free_pd;
+ }
+ err = mlx5_rsc_dump_menu(dev);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to read menu, %d\n", err);
+ goto destroy_mkey;
+ }
+ return err;
+
+destroy_mkey:
+ mlx5_core_destroy_mkey(dev, &rsc_dump->mkey);
+free_pd:
+ mlx5_core_dealloc_pd(dev, rsc_dump->pdn);
+ return err;
+}
+
+void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev)
+{
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return;
+
+ mlx5_core_destroy_mkey(dev, &dev->rsc_dump->mkey);
+ mlx5_core_dealloc_pd(dev, dev->rsc_dump->pdn);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
new file mode 100644
index 000000000000..3b7573461a45
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_RSC_DUMP_H
+#define __MLX5_RSC_DUMP__H
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+#define MLX5_RSC_DUMP_ALL 0xFFFF
+struct mlx5_rsc_dump_cmd;
+struct mlx5_rsc_dump;
+
+struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev);
+void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_rsc_dump_init(struct mlx5_core_dev *dev);
+void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev);
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 20b907dc1e29..3a199a03d929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -3,6 +3,7 @@
#include "health.h"
#include "lib/eq.h"
+#include "lib/mlx5.h"
int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
{
@@ -197,10 +198,114 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
- netdev_err(priv->netdev, err_str);
+ netdev_err(priv->netdev, "%s\n", err_str);
if (!reporter)
return err_ctx->recover(err_ctx->ctx);
return devlink_health_report(reporter, err_str, err_ctx);
}
+
+#define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024
+static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
+ const void *value, u32 value_len)
+
+{
+ u32 data_size;
+ u32 offset;
+ int err;
+
+ for (offset = 0; offset < value_len; offset += data_size) {
+ data_size = value_len - offset;
+ if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE)
+ data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_rsc_dump_cmd *cmd;
+ struct page *page;
+ int cmd_err, err;
+ int end_err;
+ int size;
+
+ if (IS_ERR_OR_NULL(mdev->rsc_dump))
+ return -EOPNOTSUPP;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
+ if (err)
+ return err;
+
+ cmd = mlx5_rsc_dump_cmd_create(mdev, key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+
+ do {
+ cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
+ if (cmd_err < 0) {
+ err = cmd_err;
+ goto destroy_cmd;
+ }
+
+ err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
+ if (err)
+ goto destroy_cmd;
+
+ } while (cmd_err > 0);
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+ end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
+ if (end_err)
+ err = end_err;
+free_page:
+ __free_page(page);
+ return err;
+}
+
+int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl)
+{
+ struct mlx5_rsc_key key = {};
+ int err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = queue_idx;
+ key.size = PAGE_SIZE;
+ key.num_of_obj1 = 1;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, lbl);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
+ if (err)
+ return err;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index d3693fa547ac..e90e3aec422f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -5,6 +5,7 @@
#define __MLX5E_EN_HEALTH_H
#include "en.h"
+#include "diag/rsc_dump.h"
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
@@ -36,6 +37,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
+ int (*dump)(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, void *ctx);
void *ctx;
};
@@ -48,6 +50,8 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
void mlx5e_health_channels_update(struct mlx5e_priv *priv);
-
-
+int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
+ struct devlink_fmsg *fmsg);
+int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index fce6eccdcf8b..2c4a670c8ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -343,64 +343,76 @@ out:
return err;
}
-static u32 fec_supported_speeds[] = {
- 10000,
- 40000,
- 25000,
- 50000,
- 56000,
- 100000
+enum mlx5e_fec_supported_link_mode {
+ MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_25G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_50G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_56G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_100G,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X,
+ MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
-#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds)
+#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
+
+#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
+ do { \
+ u16 *_policy = &(policy); \
+ u32 *_buf = buf; \
+ \
+ if (write) \
+ MLX5_SET(pplm_reg, _buf, fec_override_admin_##link, *_policy); \
+ else \
+ *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
+ } while (0)
+
+#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
+ do { \
+ u16 *__policy = &(policy); \
+ bool _write = (write); \
+ \
+ if (_write && *__policy) \
+ *__policy = find_first_bit((u_long *)__policy, \
+ sizeof(u16) * BITS_PER_BYTE);\
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
+ if (!_write && *__policy) \
+ *__policy = 1 << *__policy; \
+ } while (0)
/* get/set FEC admin field for a given speed */
-static int mlx5e_fec_admin_field(u32 *pplm,
- u8 *fec_policy,
- bool write,
- u32 speed)
+static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
+ enum mlx5e_fec_supported_link_mode link_mode)
{
- switch (speed) {
- case 10000:
- case 40000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_10g_40g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_10g_40g, *fec_policy);
+ switch (link_mode) {
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 10g_40g);
break;
- case 25000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_25g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_25g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_25G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 25g);
break;
- case 50000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_50g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_50g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_50G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g);
break;
- case 56000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_56g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_56g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_56G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 56g);
break;
- case 100000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_100g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_100g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_100G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;
@@ -408,32 +420,40 @@ static int mlx5e_fec_admin_field(u32 *pplm,
return 0;
}
+#define MLX5E_GET_FEC_OVERRIDE_CAP(buf, link) \
+ MLX5_GET(pplm_reg, buf, fec_override_cap_##link)
+
/* returns FEC capabilities for a given speed */
-static int mlx5e_get_fec_cap_field(u32 *pplm,
- u8 *fec_cap,
- u32 speed)
+static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
+ enum mlx5e_fec_supported_link_mode link_mode)
{
- switch (speed) {
- case 10000:
- case 40000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_10g_40g);
+ switch (link_mode) {
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 10g_40g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_25G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 25g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_50G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 50g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_56G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 56g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_100G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g);
break;
- case 25000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_25g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 50g_1x);
break;
- case 50000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_50g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_2x);
break;
- case 56000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_56g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_4x);
break;
- case 100000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_100g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x);
break;
default:
return -EINVAL;
@@ -441,13 +461,14 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
return 0;
}
-int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
+bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
{
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 current_fec_speed;
int err;
+ int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
@@ -458,23 +479,30 @@ int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
- return err;
+ return false;
- err = mlx5e_port_linkspeed(dev, &current_fec_speed);
- if (err)
- return err;
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ u16 fec_caps;
- return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed);
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
+
+ mlx5e_get_fec_cap_field(out, &fec_caps, i);
+ if (fec_caps & fec_policy)
+ return true;
+ }
+ return false;
}
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
- u8 *fec_configured_mode)
+ u16 *fec_configured_mode)
{
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 link_speed;
int err;
+ int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
@@ -490,24 +518,28 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
*fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active);
if (!fec_configured_mode)
- return 0;
+ goto out;
- err = mlx5e_port_linkspeed(dev, &link_speed);
- if (err)
- return err;
+ *fec_configured_mode = 0;
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
- return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed);
+ mlx5e_fec_admin_field(out, fec_configured_mode, 0, i);
+ if (*fec_configured_mode != 0)
+ goto out;
+ }
+out:
+ return 0;
}
-int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
- u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
- bool fec_mode_not_supp_in_speed = false;
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u8 fec_policy_auto = 0;
- u8 fec_caps = 0;
+ u16 fec_policy_auto = 0;
int err;
int i;
@@ -517,6 +549,9 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
if (!MLX5_CAP_PCAM_REG(dev, pplm))
return -EOPNOTSUPP;
+ if (fec_policy >= (1 << MLX5E_FEC_LLRS_272_257_1) && !fec_50g_per_lane)
+ return -EOPNOTSUPP;
+
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
@@ -524,25 +559,31 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
MLX5_SET(pplm_reg, out, local_port, 1);
- for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
- mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
- /* policy supported for link speed, or policy is auto */
- if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
- mlx5e_fec_admin_field(out, &fec_policy, 1,
- fec_supported_speeds[i]);
- } else {
- /* turn off FEC if supported. Else, leave it the same */
- if (fec_caps & fec_policy_nofec)
- mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
- fec_supported_speeds[i]);
- fec_mode_not_supp_in_speed = true;
- }
- }
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ u16 conf_fec = fec_policy;
+ u16 fec_caps = 0;
+
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
- if (fec_mode_not_supp_in_speed)
- mlx5_core_dbg(dev,
- "FEC policy 0x%x is not supported for some speeds",
- fec_policy);
+ /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
+ * to link modes up to 25G per lane and to
+ * MLX5E_FEC_RS_544_514 in the new link modes based on
+ * 50 G per lane
+ */
+ if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
+ i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ conf_fec = (1 << MLX5E_FEC_RS_544_514);
+
+ mlx5e_get_fec_cap_field(out, &fec_caps, i);
+
+ /* policy supported for link speed */
+ if (fec_caps & conf_fec)
+ mlx5e_fec_admin_field(out, &conf_fec, 1, i);
+ else
+ /* set FEC to auto*/
+ mlx5e_fec_admin_field(out, &fec_policy_auto, 1, i);
+ }
return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 4a7f4497692b..a2ddd446dd59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -60,15 +60,17 @@ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
-int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps);
+bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy);
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
- u8 *fec_configured_mode);
-int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy);
+ u16 *fec_configured_mode);
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy);
enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514 = 7,
+ MLX5E_FEC_LLRS_272_257_1 = 9,
};
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 6c72b592315b..ce2b41c61090 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -102,19 +102,6 @@ out:
return err;
}
-void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
-{
- struct mlx5e_priv *priv = icosq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = icosq;
- err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
- sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
@@ -171,19 +158,6 @@ out:
return err;
}
-void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
-{
- struct mlx5e_priv *priv = rq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
- sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5e_icosq *icosq;
@@ -201,21 +175,6 @@ static int mlx5e_rx_reporter_timeout_recover(void *ctx)
return err;
}
-void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
-{
- struct mlx5e_icosq *icosq = &rq->channel->icosq;
- struct mlx5e_priv *priv = rq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
- sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n",
- icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
{
return err_ctx->recover(err_ctx->ctx);
@@ -371,10 +330,235 @@ unlock:
return err;
}
+static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_txqsq *icosq = ctx;
+ struct mlx5_rsc_key key = {};
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "ICOSQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = icosq->sqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5_rsc_key key = {};
+ struct mlx5e_rq *rq = ctx;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = rq->rqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "receive_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_rsc_key key = {};
+ int i, err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
+ if (err)
+ return err;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+
+ err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
+ if (err)
+ return err;
+ }
+
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
+ struct mlx5e_err_ctx *err_ctx,
+ struct devlink_fmsg *fmsg)
+{
+ return err_ctx->dump(priv, fmsg, err_ctx->ctx);
+}
+
+static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_rx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
+ mlx5e_rx_reporter_dump_all_rqs(priv, fmsg);
+}
+
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *icosq = &rq->channel->icosq;
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+ snprintf(err_str, sizeof(err_str),
+ "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x",
+ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on RQ: 0x%x", rq->rqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_priv *priv = icosq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = icosq;
+ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_icosq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
+ .dump = mlx5e_rx_reporter_dump,
};
#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index b468549e96ff..2028ce9b151f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -82,19 +82,6 @@ out:
return err;
}
-void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
-{
- struct mlx5e_priv *priv = sq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {0};
-
- err_ctx.ctx = sq;
- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
- sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
-
- mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_tx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
@@ -110,22 +97,6 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
-int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
-{
- struct mlx5e_priv *priv = sq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx;
-
- err_ctx.ctx = sq;
- err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
- sprintf(err_str,
- "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
- sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - sq->txq->trans_start));
-
- return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
-}
-
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
@@ -275,10 +246,162 @@ unlock:
return err;
}
+static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5_rsc_key key = {};
+ struct mlx5e_txqsq *sq = ctx;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = sq->sqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_rsc_key key = {};
+ int i, tc, err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
+ if (err)
+ return err;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &c->sq[tc];
+
+ err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
+ if (err)
+ return err;
+ }
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
+ struct mlx5e_err_ctx *err_ctx,
+ struct devlink_fmsg *fmsg)
+{
+ return err_ctx->dump(priv, fmsg, err_ctx->ctx);
+}
+
+static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
+ mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
+}
+
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
+
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ snprintf(err_str, sizeof(err_str),
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
+ sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - sq->txq->trans_start));
+
+ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
+ .dump = mlx5e_tx_reporter_dump,
};
#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index cf58c9637904..29626c6c9c25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -433,7 +433,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
- drain_workqueue(ipsec->wq);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d674cb679895..68b520df07e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -633,6 +633,8 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
[MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
+ [MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
+ [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -650,45 +652,48 @@ static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
return 0;
}
-/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */
-static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code)
-{
- u32 offset;
-
- offset = find_first_bit(&ethtool_fec_code, sizeof(u32));
- offset -= ETHTOOL_FEC_OFF_BIT;
- offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT;
+#define MLX5E_ADVERTISE_SUPPORTED_FEC(mlx5_fec, ethtool_fec) \
+ do { \
+ if (mlx5e_fec_in_caps(dev, 1 << (mlx5_fec))) \
+ __set_bit(ethtool_fec, \
+ link_ksettings->link_modes.supported);\
+ } while (0)
- return offset;
-}
+static const u32 pplm_fec_2_ethtool_linkmodes[] = {
+ [MLX5E_FEC_NOFEC] = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ [MLX5E_FEC_FIRECODE] = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ [MLX5E_FEC_RS_528_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ [MLX5E_FEC_RS_544_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+};
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- u_long fec_caps = 0;
- u32 active_fec = 0;
- u32 offset;
+ u_long active_fec = 0;
u32 bitn;
int err;
- err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps);
+ err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
if (err)
return (err == -EOPNOTSUPP) ? 0 : err;
- err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
- if (err)
- return err;
-
- for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) {
- u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn];
-
- offset = ethtool_fec2ethtool_caps(ethtool_bitmask);
- __set_bit(offset, link_ksettings->link_modes.supported);
- }
-
- active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE);
- offset = ethtool_fec2ethtool_caps(active_fec);
- __set_bit(offset, link_ksettings->link_modes.advertising);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_NOFEC,
+ ETHTOOL_LINK_MODE_FEC_NONE_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_FIRECODE,
+ ETHTOOL_LINK_MODE_FEC_BASER_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_RS_528_514,
+ ETHTOOL_LINK_MODE_FEC_RS_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
+
+ /* active fec is a bit set, find out which bit is set and
+ * advertise the corresponding ethtool bit
+ */
+ bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
+ if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
+ __set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
+ link_ksettings->link_modes.advertising);
return 0;
}
@@ -1511,7 +1516,7 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 fec_configured = 0;
+ u16 fec_configured = 0;
u32 fec_active = 0;
int err;
@@ -1527,7 +1532,7 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
return -EOPNOTSUPP;
fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
- sizeof(u8) * BITS_PER_BYTE);
+ sizeof(u16) * BITS_PER_BYTE);
return 0;
}
@@ -1537,10 +1542,14 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 fec_policy = 0;
+ u16 fec_policy = 0;
int mode;
int err;
+ if (bitmap_weight((unsigned long *)&fecparam->fec,
+ ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ return -EOPNOTSUPP;
+
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f554cfddcf4e..204a26bf0a5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -70,6 +70,7 @@
#include "diag/fw_tracer.h"
#include "ecpf.h"
#include "lib/hv_vhca.h"
+#include "diag/rsc_dump.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -880,6 +881,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
+ dev->rsc_dump = mlx5_rsc_dump_create(dev);
return 0;
@@ -909,6 +911,7 @@ err_devcom:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_rsc_dump_destroy(dev);
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
@@ -1079,6 +1082,12 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_hv_vhca_init(dev->hv_vhca);
+ err = mlx5_rsc_dump_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init Resource dump\n");
+ goto err_rsc_dump;
+ }
+
err = mlx5_fpga_device_start(dev);
if (err) {
mlx5_core_err(dev, "fpga device start failed %d\n", err);
@@ -1134,6 +1143,8 @@ err_tls_start:
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
+ mlx5_rsc_dump_cleanup(dev);
+err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
@@ -1155,6 +1166,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
+ mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e9f791c43f20..0d630096a28c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -82,7 +82,7 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool fw_flash_in_progress;
- unsigned long driver_priv[0];
+ unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index feb4672a5ac0..bd2207f60722 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -72,7 +72,7 @@ struct mlxsw_afk_key_info {
* is index inside "blocks"
*/
struct mlxsw_afk_element_usage elusage;
- const struct mlxsw_afk_block *blocks[0];
+ const struct mlxsw_afk_block *blocks[];
};
static bool
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 7358b5bc7eb6..d78e790ba94a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -6316,7 +6316,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6472,7 +6472,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6549,7 +6549,7 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6609,7 +6609,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0f1f9dceec5..bed86f4825a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -168,12 +168,8 @@ struct mlxsw_sp {
struct notifier_block netdevice_nb;
struct mlxsw_sp_ptp_clock *clock;
struct mlxsw_sp_ptp_state *ptp_state;
-
struct mlxsw_sp_counter_pool *counter_pool;
- struct {
- struct mlxsw_sp_span_entry *entries;
- int entries_count;
- } span;
+ struct mlxsw_sp_span *span;
const struct mlxsw_fw_rev *req_rev;
const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
@@ -468,10 +464,6 @@ int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
const struct net_device *vxlan_dev);
-struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- u16 vid,
- struct netlink_ext_ack *extack);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -571,10 +563,10 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index 09ee0a807747..a9fff8adc75e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -60,7 +60,7 @@ static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
struct mlxsw_sp1_kvdl_part {
struct mlxsw_sp1_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
+ unsigned long usage[]; /* Entries */
};
struct mlxsw_sp1_kvdl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 8d14770766b4..3a73d654017f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -45,7 +45,7 @@ struct mlxsw_sp2_kvdl_part {
unsigned int usage_bit_count;
unsigned int indexes_per_usage_bit;
unsigned int last_allocated_bit;
- unsigned long usage[0]; /* Usage bits */
+ unsigned long usage[]; /* Usage bits */
};
struct mlxsw_sp2_kvdl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 3d3cca596116..9368b93dab38 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -58,7 +58,7 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -71,7 +71,7 @@ struct mlxsw_sp_acl_rule {
u64 last_used;
u64 last_packets;
u64 last_bytes;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 3a2de13fcb68..dbd3bebf11ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -13,7 +13,7 @@
struct mlxsw_sp_acl_bf {
struct mutex lock; /* Protects Bloom Filter updates. */
unsigned int bank_size;
- refcount_t refcnt[0];
+ refcount_t refcnt[];
};
/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e993159e8e4c..430da69003d8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -224,7 +224,7 @@ struct mlxsw_sp_acl_tcam_vchunk;
struct mlxsw_sp_acl_tcam_chunk {
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
struct mlxsw_sp_acl_tcam_region *region;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -243,7 +243,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
struct mlxsw_sp_acl_tcam_entry {
struct mlxsw_sp_acl_tcam_ventry *ventry;
struct mlxsw_sp_acl_tcam_chunk *chunk;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 5965913565a5..96437992b102 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -20,7 +20,7 @@ struct mlxsw_sp_acl_tcam {
struct mutex lock; /* guards vregion list */
struct list_head vregion_list;
u32 vregion_rehash_intrvl; /* ms */
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -86,7 +86,7 @@ struct mlxsw_sp_acl_tcam_region {
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp *mlxsw_sp;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 83c2e1e5f216..6a02ef9ec00e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/spinlock.h>
#include "spectrum_cnt.h"
@@ -18,6 +19,7 @@ struct mlxsw_sp_counter_sub_pool {
struct mlxsw_sp_counter_pool {
unsigned int pool_size;
unsigned long *usage; /* Usage bitmap */
+ spinlock_t counter_pool_lock; /* Protects counter pool allocations */
struct mlxsw_sp_counter_sub_pool *sub_pools;
};
@@ -87,6 +89,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return -ENOMEM;
+ spin_lock_init(&pool->counter_pool_lock);
pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
@@ -139,25 +142,35 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_counter_sub_pool *sub_pool;
unsigned int entry_index;
unsigned int stop_index;
- int i;
+ int i, err;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index;
+ spin_lock(&pool->counter_pool_lock);
entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
- if (entry_index == stop_index)
- return -ENOBUFS;
+ if (entry_index == stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
/* The sub-pools can contain non-integer number of entries
* so we must check for overflow
*/
- if (entry_index + sub_pool->entry_size > stop_index)
- return -ENOBUFS;
+ if (entry_index + sub_pool->entry_size > stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
for (i = 0; i < sub_pool->entry_size; i++)
__set_bit(entry_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
*p_counter_index = entry_index;
return 0;
+
+err_alloc:
+ spin_unlock(&pool->counter_pool_lock);
+ return err;
}
void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
@@ -171,6 +184,8 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(counter_index >= pool->pool_size))
return;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ spin_lock(&pool->counter_pool_lock);
for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 8df3cb21baa6..65486a90b526 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "reg.h"
@@ -24,7 +25,7 @@ struct mlxsw_sp_fid_core {
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
- unsigned int ref_count;
+ refcount_t ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -149,7 +150,7 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
mlxsw_sp_fid_ht_params);
if (fid)
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -183,7 +184,7 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni,
mlxsw_sp_fid_vni_ht_params);
if (fid)
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -1030,7 +1031,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(fid, &fid_family->fids_list, list) {
if (!fid->fid_family->ops->compare(fid, arg))
continue;
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -1075,7 +1076,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add(&fid->list, &fid_family->fids_list);
- fid->ref_count++;
+ refcount_set(&fid->ref_count, 1);
return fid;
err_rhashtable_insert:
@@ -1093,7 +1094,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (--fid->ref_count != 0)
+ if (!refcount_dec_and_test(&fid->ref_count))
return;
list_del(&fid->list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 1e4cdee7bcd7..20d72f1c0cee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include "spectrum.h"
struct mlxsw_sp_kvdl {
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
- unsigned long priv[0];
+ struct mutex kvdl_lock; /* Protects kvdl allocations */
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -22,6 +24,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
+ mutex_init(&kvdl->kvdl_lock);
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
@@ -31,6 +34,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_init:
+ mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
return err;
}
@@ -40,6 +44,7 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
}
@@ -48,9 +53,14 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count, u32 *p_entry_index)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ int err;
+
+ mutex_lock(&kvdl->kvdl_lock);
+ err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
- return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
- entry_count, p_entry_index);
+ return err;
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
@@ -59,8 +69,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ mutex_lock(&kvdl->kvdl_lock);
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
entry_count, entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
}
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 54275624718b..423eedebcd22 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -68,7 +68,7 @@ struct mlxsw_sp_mr_table {
struct list_head route_list;
struct rhashtable route_ht;
const struct mlxsw_sp_mr_table_ops *ops;
- char catchall_route_priv[0];
+ char catchall_route_priv[];
/* catchall_route_priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 2153bcc4b585..eced553fd4ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -67,7 +67,7 @@ struct mlxsw_sp_nve_mc_record {
struct mlxsw_sp_nve_mc_list *mc_list;
const struct mlxsw_sp_nve_mc_record_ops *ops;
u32 kvdl_index;
- struct mlxsw_sp_nve_mc_entry entries[0];
+ struct mlxsw_sp_nve_mc_entry entries[];
};
struct mlxsw_sp_nve_mc_list {
@@ -744,6 +744,8 @@ static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
if (nve->num_nve_tunnels++ != 0)
return 0;
+ nve->config = *config;
+
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
&nve->tunnel_index);
if (err)
@@ -760,6 +762,7 @@ err_ops_init:
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
nve->tunnel_index);
err_kvdl_alloc:
+ memset(&nve->config, 0, sizeof(nve->config));
nve->num_nve_tunnels--;
return err;
}
@@ -840,8 +843,6 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
goto err_fid_vni_set;
}
- nve->config = config;
-
err = ops->fdb_replay(params->dev, params->vni, extack);
if (err)
goto err_fdb_replay;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4a77b511ead2..634a9a949777 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -145,6 +145,9 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
@@ -988,17 +991,23 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
struct ip_tunnel *tun = netdev_priv(ol_dev);
struct net *net = dev_net(ol_dev);
- return __dev_get_by_index(net, tun->parms.link);
+ return dev_get_by_index_rcu(net, tun->parms.link);
}
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
- struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ struct net_device *d;
+ u32 tb_id;
+ rcu_read_lock();
+ d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
- return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
+ tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
- return RT_TABLE_MAIN;
+ tb_id = RT_TABLE_MAIN;
+ rcu_read_unlock();
+
+ return tb_id;
}
static struct mlxsw_sp_rif *
@@ -1355,8 +1364,12 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
ipip_list_node);
list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
- struct net_device *ipip_ul_dev =
- __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+
+ rcu_read_lock();
+ ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
return ipip_entry;
@@ -1722,9 +1735,12 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
- struct net_device *ipip_ul_dev =
- __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+ rcu_read_lock();
+ ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
}
@@ -3711,9 +3727,15 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
- struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ struct net_device *ul_dev;
+ bool is_up;
+
+ rcu_read_lock();
+ ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ rcu_read_unlock();
- return ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ return is_up;
}
static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
@@ -3840,10 +3862,14 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
- in_dev = __in_dev_get_rtnl(dev);
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
+ rcu_read_unlock();
return 0;
+ }
+ rcu_read_unlock();
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
if (err)
@@ -6233,7 +6259,7 @@ err_fib_event:
return NOTIFY_BAD;
}
-struct mlxsw_sp_rif *
+static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6247,6 +6273,33 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return !!mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+}
+
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+ u16 vid = 0;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ goto out;
+
+ /* We only return the VID for VLAN RIFs. Otherwise we return an
+ * invalid value (0).
+ */
+ if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
+ goto out;
+
+ vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+out:
+ return vid;
+}
+
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -6281,7 +6334,8 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
case NETDEV_UP:
return rif == NULL;
case NETDEV_DOWN:
- idev = __in_dev_get_rtnl(dev);
+ rcu_read_lock();
+ idev = __in_dev_get_rcu(dev);
if (idev && idev->ifa_list)
addr_list_empty = false;
@@ -6289,6 +6343,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
if (addr_list_empty && inet6_dev &&
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ rcu_read_unlock();
/* macvlans do not have a RIF, but rather piggy back on the
* RIF of their lower device.
@@ -6411,11 +6466,6 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
-{
- return rif->fid;
-}
-
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6631,8 +6681,8 @@ err_fid_port_vid_map:
return err;
}
-void
-mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+static void
+__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
@@ -6650,6 +6700,12 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+}
+
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid,
@@ -6667,7 +6723,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
l3_dev, extack);
case NETDEV_DOWN:
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
@@ -6848,8 +6904,8 @@ err_rif_vrrp_add:
return err;
}
-void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *macvlan_dev)
+static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
{
struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
struct mlxsw_sp_rif *rif;
@@ -6866,6 +6922,12 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_index(rif->fid), false);
}
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+}
+
static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *macvlan_dev,
unsigned long event,
@@ -6875,7 +6937,7 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
case NETDEV_UP:
return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
case NETDEV_DOWN:
- mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
break;
}
@@ -7428,7 +7490,7 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
}
}
- return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7519,7 +7581,7 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
}
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 0cdd7954a085..9fb2e9d93929 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/gre.h>
#include <net/lag.h>
@@ -14,38 +16,43 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
+struct mlxsw_sp_span {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ atomic_t active_entries_count;
+ int entries_count;
+ struct mlxsw_sp_span_entry entries[0];
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work);
+
static u64 mlxsw_sp_span_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (mlxsw_sp->span.entries[i].ref_count)
- occ++;
- }
-
- return occ;
+ return atomic_read(&mlxsw_sp->span->active_entries_count);
}
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- int i;
+ struct mlxsw_sp_span *span;
+ int i, entries_count;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
return -EIO;
- mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_SPAN);
- mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
- sizeof(struct mlxsw_sp_span_entry),
- GFP_KERNEL);
- if (!mlxsw_sp->span.entries)
+ entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
+ span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
+ if (!span)
return -ENOMEM;
+ span->entries_count = entries_count;
+ atomic_set(&span->active_entries_count, 0);
+ span->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp->span = span;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
INIT_LIST_HEAD(&curr->bound_ports_list);
curr->id = i;
@@ -53,6 +60,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
+ INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
}
@@ -62,14 +70,15 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
}
- kfree(mlxsw_sp->span.entries);
+ kfree(mlxsw_sp->span);
}
static int
@@ -645,15 +654,16 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
int i;
/* find a free entry to use */
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (!mlxsw_sp->span.entries[i].ref_count) {
- span_entry = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ if (!mlxsw_sp->span->entries[i].ref_count) {
+ span_entry = &mlxsw_sp->span->entries[i];
break;
}
}
if (!span_entry)
return NULL;
+ atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
span_entry->ref_count = 1;
span_entry->to_dev = to_dev;
@@ -662,9 +672,11 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
return span_entry;
}
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
{
mlxsw_sp_span_entry_deconfigure(span_entry);
+ atomic_dec(&mlxsw_sp->span->active_entries_count);
}
struct mlxsw_sp_span_entry *
@@ -673,8 +685,8 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->to_dev == to_dev)
return curr;
@@ -694,8 +706,8 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
{
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->id == span_id)
return curr;
@@ -726,7 +738,7 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
{
WARN_ON(!span_entry->ref_count);
if (--span_entry->ref_count == 0)
- mlxsw_sp_span_entry_destroy(span_entry);
+ mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
@@ -736,8 +748,8 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
struct mlxsw_sp_span_inspected_port *p;
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
list_for_each_entry(p, &curr->bound_ports_list, list)
if (p->local_port == port->local_port &&
@@ -842,9 +854,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
* so if a binding is requested, check for conflicts.
*/
if (bind)
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span.entries[i];
+ &mlxsw_sp->span->entries[i];
if (mlxsw_sp_span_entry_bound_port_find(curr, type,
port, bind))
@@ -988,14 +1000,18 @@ void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
- int i;
- int err;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
- ASSERT_RTNL();
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
struct mlxsw_sp_span_parms sparms = {NULL};
if (!curr->ref_count)
@@ -1010,4 +1026,12 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
}
}
+ rtnl_unlock();
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a3af171c6358..339c69da83b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -153,16 +153,64 @@ static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp);
}
+static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev, *stop_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
+ br_dev, dev, 0,
+ extack);
+ if (err) {
+ stop_dev = dev;
+ goto err_vxlan_join;
+ }
+ }
+ }
+
+ return 0;
+
+err_vxlan_join:
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ if (stop_dev == dev)
+ break;
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+ }
+ return err;
+}
+
+static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev))
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct device *dev = bridge->mlxsw_sp->bus_info->dev;
struct mlxsw_sp_bridge_device *bridge_device;
bool vlan_enabled = br_vlan_enabled(br_dev);
+ int err;
if (vlan_enabled && bridge->vlan_enabled_exists) {
dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
return ERR_PTR(-EINVAL);
}
@@ -184,13 +232,29 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
INIT_LIST_HEAD(&bridge_device->mids_list);
list_add(&bridge_device->list, &bridge->bridges_list);
+ /* It is possible we already have VXLAN devices enslaved to the bridge.
+ * In which case, we need to replay their configuration as if they were
+ * just now enslaved to the bridge.
+ */
+ err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
+ if (err)
+ goto err_vxlan_init;
+
return bridge_device;
+
+err_vxlan_init:
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ kfree(bridge_device);
+ return ERR_PTR(err);
}
static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
+ mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list);
@@ -203,7 +267,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_device *bridge_device;
@@ -211,7 +276,7 @@ mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
if (bridge_device)
return bridge_device;
- return mlxsw_sp_bridge_device_create(bridge, br_dev);
+ return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
}
static void
@@ -292,7 +357,8 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *brport_dev)
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
{
struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
struct mlxsw_sp_bridge_device *bridge_device;
@@ -305,7 +371,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
return bridge_port;
}
- bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
if (IS_ERR(bridge_device))
return ERR_CAST(bridge_device);
@@ -1000,7 +1066,7 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
&bridge_vlan->port_vlan_list);
mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
- bridge_port->dev);
+ bridge_port->dev, extack);
mlxsw_sp_port_vlan->bridge_port = bridge_port;
return 0;
@@ -1107,16 +1173,12 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
const struct switchdev_obj_port_vlan *vlan)
{
- struct mlxsw_sp_rif *rif;
- struct mlxsw_sp_fid *fid;
u16 pvid;
u16 vid;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
- if (!rif)
+ pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
+ if (!pvid)
return 0;
- fid = mlxsw_sp_rif_fid(rif);
- pvid = mlxsw_sp_fid_8021q_vid(fid);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
@@ -1712,36 +1774,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-struct mlxsw_sp_span_respin_work {
- struct work_struct work;
- struct mlxsw_sp *mlxsw_sp;
-};
-
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
-{
- struct mlxsw_sp_span_respin_work *respin_work =
- container_of(work, struct mlxsw_sp_span_respin_work, work);
-
- rtnl_lock();
- mlxsw_sp_span_respin(respin_work->mlxsw_sp);
- rtnl_unlock();
- kfree(respin_work);
-}
-
-static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_span_respin_work *respin_work;
-
- respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
- if (!respin_work)
- return;
-
- INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
- respin_work->mlxsw_sp = mlxsw_sp;
-
- mlxsw_core_schedule_work(&respin_work->work);
-}
-
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans,
@@ -1763,7 +1795,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
* call for later, so that the respin logic sees the
* updated bridge state.
*/
- mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
}
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
@@ -1916,7 +1948,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
break;
}
- mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1990,12 +2022,11 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
return err;
}
- /* If no other port is member in the VLAN, then the FID does not exist.
- * NVE will be enabled on the FID once a port joins the VLAN
- */
- fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
- if (!fid)
- return 0;
+ fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
+ return PTR_ERR(fid);
+ }
if (mlxsw_sp_fid_vni_is_set(fid)) {
NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
@@ -2007,11 +2038,6 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (err)
goto err_nve_fid_enable;
- /* The tunnel port does not hold a reference on the FID. Only
- * local ports and the router port
- */
- mlxsw_sp_fid_put(fid);
-
return 0;
err_nve_fid_enable:
@@ -2048,38 +2074,8 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct net_device *vxlan_dev;
- struct mlxsw_sp_fid *fid;
- int err;
-
- fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
- if (IS_ERR(fid))
- return fid;
-
- if (mlxsw_sp_fid_vni_is_set(fid))
- return fid;
-
- /* Find the VxLAN device that has the specified VLAN configured as
- * PVID and egress untagged. There can be at most one such device
- */
- vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
- vid);
- if (!vxlan_dev)
- return fid;
-
- if (!netif_running(vxlan_dev))
- return fid;
-
- err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
- if (err)
- goto err_vxlan_join;
- return fid;
-
-err_vxlan_join:
- mlxsw_sp_fid_put(fid);
- return ERR_PTR(err);
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
}
static struct mlxsw_sp_fid *
@@ -2184,9 +2180,9 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_fid *fid;
int err;
- fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (!fid) {
- NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
+ fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
return -EINVAL;
}
@@ -2200,11 +2196,6 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (err)
goto err_nve_fid_enable;
- /* The tunnel port does not hold a reference on the FID. Only
- * local ports and the router port
- */
- mlxsw_sp_fid_put(fid);
-
return 0;
err_nve_fid_enable:
@@ -2218,34 +2209,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct net_device *vxlan_dev;
- struct mlxsw_sp_fid *fid;
- int err;
-
- fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
- if (IS_ERR(fid))
- return fid;
-
- if (mlxsw_sp_fid_vni_is_set(fid))
- return fid;
- vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
- if (!vxlan_dev)
- return fid;
-
- if (!netif_running(vxlan_dev))
- return fid;
-
- err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
- extack);
- if (err)
- goto err_vxlan_join;
-
- return fid;
-
-err_vxlan_join:
- mlxsw_sp_fid_put(fid);
- return ERR_PTR(err);
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
}
static struct mlxsw_sp_fid *
@@ -2287,7 +2252,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
+ extack);
if (IS_ERR(bridge_port))
return PTR_ERR(bridge_port);
bridge_device = bridge_port->bridge_device;
@@ -2351,21 +2317,11 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+ /* Drop both the reference we just took during lookup and the reference
+ * the VXLAN device took.
+ */
+ mlxsw_sp_fid_put(fid);
mlxsw_sp_fid_put(fid);
-}
-
-struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return ERR_PTR(-EINVAL);
-
- return bridge_device->ops->fid_get(bridge_device, vid, extack);
}
static void
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index d1444ba36e10..4fe6aedca22f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5694,7 +5694,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
- (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ netif_is_bridge_port(dev)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 51fa82b429a3..bfa0c0d39600 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -147,39 +147,12 @@ static int sonic_probe1(struct net_device *dev)
dev->dev_addr[i*2+1] = val >> 8;
}
- err = -ENOMEM;
-
- /* Initialize the device structure. */
-
lp->dma_bitmode = SONIC_BITMODE32;
- /* Allocate the entire chunk of memory for the descriptors.
- Note that this cannot cross a 64K boundary. */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL)
+ err = sonic_alloc_descriptors(dev);
+ if (err)
goto out;
- /* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
dev->netdev_ops = &sonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0937fc2a928e..1b5559aacb38 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -114,17 +114,6 @@ static inline void bit_reverse_addr(unsigned char addr[6])
addr[i] = bitrev8(addr[i]);
}
-static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
-{
- irqreturn_t result;
- unsigned long flags;
-
- local_irq_save(flags);
- result = sonic_interrupt(irq, dev_id);
- local_irq_restore(flags);
- return result;
-}
-
static int macsonic_open(struct net_device* dev)
{
int retval;
@@ -135,12 +124,12 @@ static int macsonic_open(struct net_device* dev)
dev->name, dev->irq);
goto err;
}
- /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes
- * in at priority level 3. However, we sometimes get the level 2 inter-
- * rupt as well, which must prevent re-entrance of the sonic handler.
+ /* Under the A/UX interrupt scheme, the onboard SONIC interrupt gets
+ * moved from level 2 to level 3. Unfortunately we still get some
+ * level 2 interrupts so register the handler for both.
*/
if (dev->irq == IRQ_AUTO_3) {
- retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+ retval = request_irq(IRQ_NUBUS_9, sonic_interrupt, 0,
"sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
@@ -186,33 +175,10 @@ static const struct net_device_ops macsonic_netdev_ops = {
static int macsonic_init(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
+ int err = sonic_alloc_descriptors(dev);
- /* Allocate the entire chunk of memory for the descriptors.
- Note that this cannot cross a 64K boundary. */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL)
- return -ENOMEM;
-
- /* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
+ if (err)
+ return err;
dev->netdev_ops = &macsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 31be3ba66877..dd3605aa5f23 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -50,6 +50,42 @@ static void sonic_msg_init(struct net_device *dev)
netif_dbg(lp, drv, dev, "%s", version);
}
+static int sonic_alloc_descriptors(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ /* Allocate a chunk of memory for the descriptors. Note that this
+ * must not cross a 64K boundary. It is smaller than one page which
+ * means that page alignment is a sufficient condition.
+ */
+ lp->descriptors =
+ dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL);
+
+ if (!lp->descriptors)
+ return -ENOMEM;
+
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + SIZEOF_SONIC_CDA *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+
+ return 0;
+}
+
/*
* Open/initialize the SONIC controller.
*
@@ -264,7 +300,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- entry = lp->next_tx;
+ entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
@@ -275,27 +311,26 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- wmb();
+ sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
+ sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
+
+ netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
- wmb();
- sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
- sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
lp->eol_tx = entry;
- lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
- if (lp->tx_skb[lp->next_tx] != NULL) {
+ entry = (entry + 1) & SONIC_TDS_MASK;
+ if (lp->tx_skb[entry]) {
/* The ring is full, the ISR has yet to process the next TD. */
netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
- } else netif_start_queue(dev);
-
- netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
-
- SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
spin_unlock_irqrestore(&lp->lock, flags);
@@ -594,11 +629,6 @@ static void sonic_rx(struct net_device *dev)
if (rbe)
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
- /*
- * If any worth-while packets have been received, netif_rx()
- * has done a mark_bh(NET_BH) for us and will work on them
- * when we get to the bottom-half routine.
- */
}
@@ -780,7 +810,7 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
- lp->cur_tx = lp->next_tx = 0;
+ lp->cur_tx = 0;
lp->eol_tx = SONIC_NUM_TDS - 1;
/*
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index e0e4cba6f6f6..3cbb62c860c8 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -321,7 +321,6 @@ struct sonic_local {
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
unsigned int eol_tx; /* last unacked transmit packet */
- unsigned int next_tx; /* next free TD */
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
@@ -342,6 +341,7 @@ static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
+static int sonic_alloc_descriptors(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
size and endianness matter here, whereas they don't for registers,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e1b886e87a76..dda9ec7d9cee 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -167,47 +167,11 @@ static int __init sonic_probe1(struct net_device *dev)
dev->dev_addr[i*2+1] = val >> 8;
}
- /* Initialize the device structure. */
-
lp->dma_bitmode = SONIC_BITMODE32;
- /*
- * Allocate local private descriptor areas in uncached space.
- * The entire structure must be located within the same 64kb segment.
- * A simple way to ensure this is to allocate twice the
- * size of the structure -- given that the structure is
- * much less than 64 kB, at least one of the halves of
- * the allocated area will be contained entirely in 64 kB.
- * We also allocate extra space for a pointer to allow freeing
- * this structure later on (in xtsonic_cleanup_module()).
- */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL) {
- err = -ENOMEM;
+ err = sonic_alloc_descriptors(dev);
+ if (err)
goto out;
- }
-
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- /* get the virtual dma address */
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &xtsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index e452f4242ba0..020acc300d7e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -632,10 +632,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index bebe38d74d66..251d4ac4af02 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1288,11 +1288,8 @@ static int emac_tso_csum(struct emac_adapter *adpt,
memset(tpd, 0, sizeof(*tpd));
memset(&extra_tpd, 0, sizeof(extra_tpd));
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
+
TPD_PKT_LEN_SET(&extra_tpd, skb->len);
TPD_LSO_SET(&extra_tpd, 1);
TPD_LSOV_SET(&extra_tpd, 1);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a2168a14794c..267b7ae05e23 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -212,7 +212,6 @@ enum rtl_registers {
/* Unlimited maximum PCI burst. */
#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
- RxMissed = 0x4c,
Cfg9346 = 0x50,
Config0 = 0x51,
Config1 = 0x52,
@@ -576,6 +575,7 @@ struct rtl8169_tc_offsets {
__le64 tx_errors;
__le32 tx_multi_collision;
__le16 tx_aborted;
+ __le16 rx_missed;
};
enum rtl_flag {
@@ -689,6 +689,12 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
+static void rtl_pci_commit(struct rtl8169_private *tp)
+{
+ /* Read an arbitrary register to commit a preceding PCI write */
+ RTL_R8(tp, ChipCmd);
+}
+
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_60;
@@ -1319,18 +1325,13 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
rtl_irq_disable(tp);
rtl_ack_events(tp, 0xffffffff);
- /* PCI commit */
- RTL_R8(tp, ChipCmd);
+ rtl_pci_commit(tp);
}
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
- struct net_device *dev = tp->dev;
struct phy_device *phydev = tp->phydev;
- if (!netif_running(dev))
- return;
-
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (phydev->speed == SPEED_1000) {
@@ -1536,7 +1537,7 @@ static int rtl8169_set_features(struct net_device *dev,
}
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
rtl_unlock_work(tp);
@@ -1622,7 +1623,7 @@ static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
u32 cmd;
RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
- RTL_R32(tp, CounterAddrHigh);
+ rtl_pci_commit(tp);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
@@ -1689,6 +1690,7 @@ static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
+ tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
return ret;
@@ -1946,7 +1948,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
rtl_unlock_work(tp);
@@ -2044,7 +2046,7 @@ static void rtl_enable_eee(struct rtl8169_private *tp)
phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
}
-static void rtl8169_get_mac_version(struct rtl8169_private *tp)
+static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2060,7 +2062,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
static const struct rtl_mac_info {
u16 mask;
u16 val;
- u16 mac_version;
+ enum mac_version ver;
} mac_info[] = {
/* 8125 family. */
{ 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
@@ -2147,22 +2149,22 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
const struct rtl_mac_info *p = mac_info;
- u16 reg = RTL_R32(tp, TxConfig) >> 20;
+ enum mac_version ver;
- while ((reg & p->mask) != p->val)
+ while ((xid & p->mask) != p->val)
p++;
- tp->mac_version = p->mac_version;
-
- if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
- } else if (!tp->supports_gmii) {
- if (tp->mac_version == RTL_GIGA_MAC_VER_42)
- tp->mac_version = RTL_GIGA_MAC_VER_43;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
- tp->mac_version = RTL_GIGA_MAC_VER_47;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
- tp->mac_version = RTL_GIGA_MAC_VER_48;
+ ver = p->ver;
+
+ if (ver != RTL_GIGA_MAC_NONE && !gmii) {
+ if (ver == RTL_GIGA_MAC_VER_42)
+ ver = RTL_GIGA_MAC_VER_43;
+ else if (ver == RTL_GIGA_MAC_VER_45)
+ ver = RTL_GIGA_MAC_VER_47;
+ else if (ver == RTL_GIGA_MAC_VER_46)
+ ver = RTL_GIGA_MAC_VER_48;
}
+
+ return ver;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2264,10 +2266,10 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
rtl_unlock_config_regs(tp);
RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
- RTL_R32(tp, MAC4);
+ rtl_pci_commit(tp);
RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
- RTL_R32(tp, MAC0);
+ rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
rtl_rar_exgmac_set(tp, addr);
@@ -2471,66 +2473,52 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
-static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+static void rtl_jumbo_config(struct rtl8169_private *tp)
{
- rtl_unlock_config_regs(tp);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168b_1_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168c_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
- r8168dp_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168e_hw_jumbo_enable(tp);
- break;
- default:
- break;
- }
- rtl_lock_config_regs(tp);
-}
+ bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
-static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
-{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
- r8168b_1_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168b_1_hw_jumbo_enable(tp);
+ } else {
+ r8168b_1_hw_jumbo_disable(tp);
+ }
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- r8168c_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168c_hw_jumbo_enable(tp);
+ } else {
+ r8168c_hw_jumbo_disable(tp);
+ }
break;
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
- r8168dp_hw_jumbo_disable(tp);
+ if (jumbo)
+ r8168dp_hw_jumbo_enable(tp);
+ else
+ r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- r8168e_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168e_hw_jumbo_enable(tp);
+ } else {
+ r8168e_hw_jumbo_disable(tp);
+ }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, 4096);
}
-static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
-{
- if (mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
- else
- rtl_hw_jumbo_disable(tp);
-}
-
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -3838,9 +3826,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
static void rtl_hw_start_8169(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
-
RTL_W8(tp, EarlyTxThres, NoEarlyTx);
tp->cp_cmd |= PCIMulRW;
@@ -3853,8 +3838,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
- RTL_W32(tp, RxMissed, 0);
-
/* disable interrupt coalescing */
RTL_W16(tp, IntrMitigate, 0x0000);
}
@@ -3877,10 +3860,11 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
- rtl_jumbo_config(tp, tp->dev->mtu);
+ rtl_jumbo_config(tp);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
+
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
@@ -3892,10 +3876,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl_jumbo_config(tp, new_mtu);
-
dev->mtu = new_mtu;
netdev_update_features(dev);
+ rtl_jumbo_config(tp);
return 0;
}
@@ -4125,29 +4108,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
}
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct tcphdr *th;
- int ret;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- ipv6h = ipv6_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
- return ret;
-}
-
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
{
u32 mss = skb_shinfo(skb)->gso_size;
@@ -4180,9 +4140,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
break;
case htons(ETH_P_IPV6):
- if (msdn_giant_send_check(skb))
+ if (skb_cow_head(skb, 0))
return false;
+ tcp_v6_gso_csum_prep(skb);
opts[0] |= TD1_GTSENV6;
break;
@@ -4697,17 +4658,6 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void rtl8169_rx_missed(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- return;
-
- dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff;
- RTL_W32(tp, RxMissed, 0);
-}
-
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4757,12 +4707,6 @@ static void rtl8169_down(struct net_device *dev)
netif_stop_queue(dev);
rtl8169_hw_reset(tp);
- /*
- * At this point device interrupts can not be enabled in any function,
- * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
- * and napi is disabled (rtl8169_poll).
- */
- rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_rcu();
@@ -4907,9 +4851,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
- if (netif_running(dev) && pm_runtime_active(&pdev->dev))
- rtl8169_rx_missed(dev);
-
do {
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
@@ -4928,7 +4869,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = dev->stats.rx_errors;
stats->rx_crc_errors = dev->stats.rx_crc_errors;
stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
- stats->rx_missed_errors = dev->stats.rx_missed_errors;
stats->multicast = dev->stats.multicast;
/*
@@ -4948,6 +4888,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
le32_to_cpu(tp->tc_offset.tx_multi_collision);
stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
+ stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
+ le16_to_cpu(tp->tc_offset.rx_missed);
pm_runtime_put_noidle(&pdev->dev);
}
@@ -5033,7 +4975,6 @@ static int rtl8169_runtime_suspend(struct device *device)
rtl8169_net_suspend(dev);
/* Update counters before going runtime suspend */
- rtl8169_rx_missed(dev);
rtl8169_update_counters(tp);
return 0;
@@ -5098,8 +5039,7 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
pci_clear_master(tp->pci_dev);
RTL_W8(tp, ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(tp, ChipCmd);
+ rtl_pci_commit(tp);
break;
default:
break;
@@ -5442,9 +5382,10 @@ done:
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
+ int jumbo_max, region, rc;
+ enum mac_version chipset;
struct net_device *dev;
- int chipset, region;
- int jumbo_max, rc;
+ u16 xid;
/* Some tools for creating an initramfs don't consider softdeps, then
* r8169.ko may be in initramfs, but realtek.ko not. Then the generic
@@ -5511,10 +5452,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf;
+
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp);
- if (tp->mac_version == RTL_GIGA_MAC_NONE)
+ chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
+ if (chipset == RTL_GIGA_MAC_NONE) {
+ dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
return -ENODEV;
+ }
+
+ tp->mac_version = chipset;
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
@@ -5532,8 +5479,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- chipset = tp->mac_version;
-
rc = rtl_alloc_irq(tp);
if (rc < 0) {
dev_err(&pdev->dev, "Can't allocate interrupt\n");
@@ -5551,9 +5496,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -5575,7 +5517,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
} else {
@@ -5590,9 +5531,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
}
+ dev->features |= dev->hw_features;
+
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -5624,8 +5566,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mdio_unregister;
netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr,
- (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid,
pci_irq_vector(pdev, 0));
if (jumbo_max)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 58ca126518a2..8ed73f44405d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -142,69 +142,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
-static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
- SH_ETH_OFFSET_DEFAULTS,
-
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
- [CSMR] = 0x04E4,
-
- [ECMR] = 0x0500,
- [RFLR] = 0x0508,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [MAFCR] = 0x0778,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWSLC] = 0x0038,
- [TSU_VTAG0] = 0x0058,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008C,
-};
-
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -569,6 +506,9 @@ static void sh_eth_set_rate_gether(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, GECMR_10, GECMR);
@@ -590,7 +530,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
- .register_type = SH_ETH_REG_FAST_RZ,
+ .register_type = SH_ETH_REG_GIGABIT,
.edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD,
@@ -663,6 +603,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -788,6 +729,7 @@ static struct sh_eth_cpu_data r8a77980_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.nbst = 1,
@@ -957,6 +899,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, 0x00000000, GECMR);
@@ -1002,6 +947,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -1042,6 +988,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -1083,6 +1030,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -2140,11 +2088,13 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(EESR);
add_reg(EESIPR);
add_reg(TDLAR);
- add_reg(TDFAR);
+ if (!cd->no_xdfar)
+ add_reg(TDFAR);
add_reg(TDFXR);
add_reg(TDFFR);
add_reg(RDLAR);
- add_reg(RDFAR);
+ if (!cd->no_xdfar)
+ add_reg(RDFAR);
add_reg(RDFXR);
add_reg(RDFFR);
add_reg(TRSCER);
@@ -2179,21 +2129,26 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tpauser)
add_reg(TPAUSER);
add_reg(TPAUSECR);
- add_reg(GECMR);
+ if (cd->gecmr)
+ add_reg(GECMR);
if (cd->bculr)
add_reg(BCULR);
add_reg(MAHR);
add_reg(MALR);
- add_reg(TROCR);
- add_reg(CDCR);
- add_reg(LCCR);
- add_reg(CNDCR);
+ if (!cd->no_tx_cntrs) {
+ add_reg(TROCR);
+ add_reg(CDCR);
+ add_reg(LCCR);
+ add_reg(CNDCR);
+ }
add_reg(CEFCR);
add_reg(FRECR);
add_reg(TSFRCR);
add_reg(TLFRCR);
- add_reg(CERCR);
- add_reg(CEECR);
+ if (cd->cexcr) {
+ add_reg(CERCR);
+ add_reg(CEECR);
+ }
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
@@ -3121,9 +3076,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
- case SH_ETH_REG_FAST_RZ:
- reg_offset = sh_eth_offset_fast_rz;
- break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 850726301e1c..c1b3751b12c4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -145,7 +145,6 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -490,6 +489,7 @@ struct sh_eth_cpu_data {
unsigned apr:1; /* EtherC has APR */
unsigned mpr:1; /* EtherC has MPR */
unsigned tpauser:1; /* EtherC has TPAUSER */
+ unsigned gecmr:1; /* EtherC has GECMR */
unsigned bculr:1; /* EtherC has BCULR */
unsigned tsu:1; /* EtherC has TSU */
unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4481f21a1f43..256807c28ff7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -113,7 +113,6 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index f1bdb04efbe4..da54afaa3c44 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -150,24 +150,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
-static inline void efx_filter_rfs_expire(struct work_struct *data)
-{
- struct delayed_work *dwork = to_delayed_work(data);
- struct efx_channel *channel;
- unsigned int time, quota;
-
- channel = container_of(dwork, struct efx_channel, filter_work);
- time = jiffies - channel->rfs_last_expiry;
- quota = channel->rfs_filter_count * time / (30 * HZ);
- if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
- channel->rfs_last_expiry += time;
- /* Ensure we do more work eventually even if NAPI poll is not happening */
- schedule_delayed_work(dwork, 30 * HZ);
-}
-#define efx_filter_rfs_enabled() 1
-#else
-static inline void efx_filter_rfs_expire(struct work_struct *data) {}
-#define efx_filter_rfs_enabled() 0
#endif
/* RSS contexts */
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index aeb5e8aa2f2a..d2d738314c50 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -485,6 +485,23 @@ void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
+#ifdef CONFIG_RFS_ACCEL
+static void efx_filter_rfs_expire(struct work_struct *data)
+{
+ struct delayed_work *dwork = to_delayed_work(data);
+ struct efx_channel *channel;
+ unsigned int time, quota;
+
+ channel = container_of(dwork, struct efx_channel, filter_work);
+ time = jiffies - channel->rfs_last_expiry;
+ quota = channel->rfs_filter_count * time / (30 * HZ);
+ if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+ channel->rfs_last_expiry += time;
+ /* Ensure we do more work eventually even if NAPI poll is not happening */
+ schedule_delayed_work(dwork, 30 * HZ);
+}
+#endif
+
/* Allocate and initialise a channel structure. */
struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
@@ -1166,6 +1183,9 @@ static int efx_poll(struct napi_struct *napi, int budget)
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int time;
+#endif
int spent;
netif_vdbg(efx, intr, efx->net_dev,
@@ -1185,7 +1205,10 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ time = jiffies - channel->rfs_last_expiry;
+ /* Would our quota be >= 20? */
+ if (channel->rfs_filter_count * time >= 600 * HZ)
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 04d7f41d7ed9..696a77c20cb7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -287,9 +287,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
return PTR_ERR(segments);
dev_consume_skb_any(skb);
- skb = segments;
- skb_list_walk_safe(skb, skb, next) {
+ skb_list_walk_safe(segments, skb, next) {
skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
}
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e8224b543dfc..58b9b7ce7195 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -896,9 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX)
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -906,9 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
}
break;
default:
@@ -919,9 +919,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len, true);
break;
}
@@ -1020,8 +1019,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache
*/
- __page_pool_put_page(dring->page_pool, page,
- pkt_len, true);
+ page_pool_put_page(dring->page_pool, page, pkt_len,
+ true);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
@@ -1148,11 +1147,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
~tcp_v4_check(0, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0);
} else {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
tx_ctrl.tcp_seg_offload_flag = true;
@@ -1199,7 +1194,7 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
if (id == NETSEC_RING_RX) {
struct page *page = virt_to_page(desc->addr);
- page_pool_put_page(dring->page_pool, page, false);
+ page_pool_put_full_page(dring->page_pool, page, false);
} else if (id == NETSEC_RING_TX) {
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5836b21edd7e..37920b4da091 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1251,11 +1251,11 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
- page_pool_put_page(rx_q->page_pool, buf->page, false);
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
if (buf->sec_page)
- page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
buf->sec_page = NULL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fe2c9fa6a71c..7acbac73c29c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -579,28 +579,23 @@ static int __maybe_unused stmmac_pci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
/* synthetic ID, no official vendor */
-#define PCI_VENDOR_ID_STMMAC 0x700
-
-#define STMMAC_QUARK_ID 0x0937
-#define STMMAC_DEVICE_ID 0x1108
-#define STMMAC_EHL_RGMII1G_ID 0x4b30
-#define STMMAC_EHL_SGMII1G_ID 0x4b31
-#define STMMAC_TGL_SGMII1G_ID 0xa0ac
-#define STMMAC_GMAC5_ID 0x7102
-
-#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
- PCI_VDEVICE(vendor_id, dev_id), \
- .driver_data = (kernel_ulong_t)&info \
- }
+#define PCI_VENDOR_ID_STMMAC 0x0700
+
+#define PCI_DEVICE_ID_STMMAC_STMMAC 0x1108
+#define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937
+#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31
+#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
+#define PCI_DEVICE_ID_SYNOPSYS_GMAC5_ID 0x7102
static const struct pci_device_id stmmac_id_table[] = {
- STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
- STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info),
- STMMAC_DEVICE(SYNOPSYS, STMMAC_GMAC5_ID, snps_gmac5_pci_info),
+ { PCI_DEVICE_DATA(STMMAC, STMMAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(STMICRO, MAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(SYNOPSYS, GMAC5_ID, &snps_gmac5_pci_info) },
{}
};