diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro')
50 files changed, 3862 insertions, 710 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index e675ba12fde2..7737e4d0bb9e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -140,8 +140,8 @@ config DWMAC_ROCKCHIP config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" - default (ARCH_SOCFPGA || ARCH_STRATIX10) - depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) + default ARCH_INTEL_SOCFPGA + depends on OF && (ARCH_INTEL_SOCFPGA || COMPILE_TEST) select MFD_SYSCON help Support for ethernet controller on Altera SOCFPGA diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 366740ab9c5a..f2e478b884b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ + stmmac_xdp.o \ $(stmmac-y) stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 6f271c46368d..619e3c0760d6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -33,6 +33,7 @@ #define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 #define DWMAC_CORE_5_10 0x51 +#define DWMAC_CORE_5_20 0x52 #define DWXGMAC_CORE_2_10 0x21 #define DWXLGMAC_CORE_2_00 0x20 @@ -182,6 +183,12 @@ struct stmmac_extra_stats { /* TSO */ unsigned long tx_tso_frames; unsigned long tx_tso_nfrags; + /* EST */ + unsigned long mtl_est_cgce; + unsigned long mtl_est_hlbs; + unsigned long mtl_est_hlbf; + unsigned long mtl_est_btre; + unsigned long mtl_est_btrlm; }; /* Safety Feature statistics exposed by ethtool */ @@ -253,6 +260,9 @@ struct stmmac_safety_stats { #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ #define DEFAULT_DMA_PBL 8 +/* MSI defines */ +#define STMMAC_MSI_VEC_MAX 32 + /* PCS status and mask defines */ #define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ #define PCS_LINK_IRQ BIT(1) /* PCS Link */ @@ -303,12 +313,37 @@ enum dma_irq_status { handle_tx = 0x8, }; +enum dma_irq_dir { + DMA_DIR_RX = 0x1, + DMA_DIR_TX = 0x2, + DMA_DIR_RXTX = 0x3, +}; + +enum request_irq_err { + REQ_IRQ_ERR_ALL, + REQ_IRQ_ERR_TX, + REQ_IRQ_ERR_RX, + REQ_IRQ_ERR_SFTY_UE, + REQ_IRQ_ERR_SFTY_CE, + REQ_IRQ_ERR_LPI, + REQ_IRQ_ERR_WOL, + REQ_IRQ_ERR_MAC, + REQ_IRQ_ERR_NO, +}; + /* EEE and LPI defines */ #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) +/* FPE defines */ +#define FPE_EVENT_UNKNOWN 0 +#define FPE_EVENT_TRSP BIT(0) +#define FPE_EVENT_TVER BIT(1) +#define FPE_EVENT_RRSP BIT(2) +#define FPE_EVENT_RVER BIT(3) + #define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ @@ -382,6 +417,8 @@ struct dma_features { unsigned int estsel; unsigned int fpesel; unsigned int tbssel; + /* Numbers of Auxiliary Snapshot Inputs */ + unsigned int aux_snapshot_n; }; /* RX Buffer size must be multiple of 4/8/16 bytes */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 08c76636c164..dfbaea06d108 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) if (IS_ERR(gmac)) return PTR_ERR(gmac); - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 27254b27d7ed..bc91fd867dcd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -438,7 +438,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(stmmac_res.addr)) return PTR_ERR(stmmac_res.addr); - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index fad503820e04..fbfda55b4c52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) return ret; if (pdev->dev.of_node) { - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 223f69da7e95..84651207a1de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -90,6 +90,32 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) return ret; } +static int imx_dwmac_clks_config(void *priv, bool enabled) +{ + struct imx_priv_data *dwmac = priv; + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(dwmac->clk_mem); + if (ret) { + dev_err(dwmac->dev, "mem clock enable failed\n"); + return ret; + } + + ret = clk_prepare_enable(dwmac->clk_tx); + if (ret) { + dev_err(dwmac->dev, "tx clock enable failed\n"); + clk_disable_unprepare(dwmac->clk_mem); + return ret; + } + } else { + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_mem); + } + + return ret; +} + static int imx_dwmac_init(struct platform_device *pdev, void *priv) { struct plat_stmmacenet_data *plat_dat; @@ -98,39 +124,18 @@ static int imx_dwmac_init(struct platform_device *pdev, void *priv) plat_dat = dwmac->plat_dat; - ret = clk_prepare_enable(dwmac->clk_mem); - if (ret) { - dev_err(&pdev->dev, "mem clock enable failed\n"); - return ret; - } - - ret = clk_prepare_enable(dwmac->clk_tx); - if (ret) { - dev_err(&pdev->dev, "tx clock enable failed\n"); - goto clk_tx_en_failed; - } - if (dwmac->ops->set_intf_mode) { ret = dwmac->ops->set_intf_mode(plat_dat); if (ret) - goto intf_mode_failed; + return ret; } return 0; - -intf_mode_failed: - clk_disable_unprepare(dwmac->clk_tx); -clk_tx_en_failed: - clk_disable_unprepare(dwmac->clk_mem); - return ret; } static void imx_dwmac_exit(struct platform_device *pdev, void *priv) { - struct imx_priv_data *dwmac = priv; - - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_mem); + /* nothing to do now */ } static void imx_dwmac_fix_speed(void *priv, unsigned int speed) @@ -226,7 +231,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -249,10 +254,15 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->addr64 = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; + plat_dat->clks_config = imx_dwmac_clks_config; plat_dat->fix_mac_speed = imx_dwmac_fix_speed; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; + ret = imx_dwmac_clks_config(dwmac, true); + if (ret) + goto err_clks_config; + ret = imx_dwmac_init(pdev, dwmac); if (ret) goto err_dwmac_init; @@ -263,9 +273,11 @@ static int imx_dwmac_probe(struct platform_device *pdev) return 0; -err_dwmac_init: err_drv_probe: imx_dwmac_exit(pdev, plat_dat->bsp_priv); +err_dwmac_init: + imx_dwmac_clks_config(dwmac, false); +err_clks_config: err_parse_dt: err_match_data: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 6c19fcc76c6f..06d287f104be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -85,7 +85,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 0b64f7710d17..80728a4c0e3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -8,9 +8,28 @@ #include "dwmac-intel.h" #include "dwmac4.h" #include "stmmac.h" +#include "stmmac_ptp.h" + +#define INTEL_MGBE_ADHOC_ADDR 0x15 +#define INTEL_MGBE_XPCS_ADDR 0x16 + +/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */ +#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3) +#define PSE_PTP_CLK_FREQ_256MHZ (0) +#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) +#define PCH_PTP_CLK_FREQ_200MHZ (0) + +/* Cross-timestamping defines */ +#define ART_CPUID_LEAF 0x15 +#define EHL_PSE_ART_MHZ 19200000 struct intel_priv_data { int mdio_adhoc_addr; /* mdio address for serdes & etc */ + unsigned long crossts_adj; + bool is_pse; }; /* This struct is used to associate PCI Function of MAC controller on a board, @@ -134,6 +153,11 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) return data; } + /* PSE only - ungate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + 0, SERDES_PHY_RX_CLK); + return 0; } @@ -149,6 +173,11 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) serdes_phy_addr = intel_priv->mdio_adhoc_addr; + /* PSE only - gate SGMII PHY Rx Clock */ + if (intel_priv->is_pse) + mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0, + SERDES_PHY_RX_CLK, 0); + /* move power state to P3 */ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0); @@ -201,6 +230,161 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) } } +/* Program PTP Clock Frequency for different variant of + * Intel mGBE that has slightly different GPO mapping + */ +static void intel_mgbe_ptp_clk_freq_config(void *npriv) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)npriv; + struct intel_priv_data *intel_priv; + u32 gpio_value; + + intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv; + + gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS); + + if (intel_priv->is_pse) { + /* For PSE GbE, use 200MHz */ + gpio_value &= ~PSE_PTP_CLK_FREQ_MASK; + gpio_value |= PSE_PTP_CLK_FREQ_200MHZ; + } else { + /* For PCH GbE, use 200MHz */ + gpio_value &= ~PCH_PTP_CLK_FREQ_MASK; + gpio_value |= PCH_PTP_CLK_FREQ_200MHZ; + } + + writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS); +} + +static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr, + u64 *art_time) +{ + u64 ns; + + ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1); + ns <<= GMAC4_ART_TIME_SHIFT; + ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0); + + *art_time = ns; +} + +static int intel_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct intel_priv_data *intel_priv; + + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 art_time = 0; + u64 ptp_time = 0; + u32 num_snapshot; + u32 gpio_value; + u32 acr_value; + int ret; + u32 v; + int i; + + if (!boot_cpu_has(X86_FEATURE_ART)) + return -EOPNOTSUPP; + + intel_priv = priv->plat->bsp_priv; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->ext_snapshot_en) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal + * Create a rising edge by just toggle the GPO1 to low + * and back to high. + */ + gpio_value = readl(ioaddr + GMAC_GPIO_STATUS); + gpio_value &= ~GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + gpio_value |= GMAC_GPO1; + writel(gpio_value, ioaddr + GMAC_GPIO_STATUS); + + /* Poll for time sync operation done */ + ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v, + (v & GMAC_INT_TSIE), 100, 10000); + + if (ret == -ETIMEDOUT) { + pr_err("%s: Wait for time sync operation timeout\n", __func__); + return ret; + } + + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & + GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time); + *system = convert_art_to_tsc(art_time); + } + + system->cycles *= intel_priv->crossts_adj; + + return 0; +} + +static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, + int base) +{ + if (boot_cpu_has(X86_FEATURE_ART)) { + unsigned int art_freq; + + /* On systems that support ART, ART frequency can be obtained + * from ECX register of CPUID leaf (0x15). + */ + art_freq = cpuid_ecx(ART_CPUID_LEAF); + do_div(art_freq, base); + intel_priv->crossts_adj = art_freq; + } +} + static void common_default_data(struct plat_stmmacenet_data *plat) { plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ @@ -263,6 +447,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Disable Priority config by default */ plat->tx_queues_cfg[i].use_prio = false; + /* Default TX Q0 to use TSO and rest TXQ for TBS */ + if (i > 0) + plat->tx_queues_cfg[i].tbs_en = 1; } /* FIFO size is 4096 bytes for 1 tx/rx queue */ @@ -284,6 +471,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->dma_cfg->fixed_burst = 0; plat->dma_cfg->mixed_burst = 0; plat->dma_cfg->aal = 0; + plat->dma_cfg->dche = true; plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL); @@ -319,6 +507,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, return ret; } + plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; + /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; @@ -333,6 +523,30 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Use the last Rx queue */ plat->vlan_fail_q = plat->rx_queues_to_use - 1; + /* Intel mgbe SGMII interface uses pcs-xcps */ + if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) { + plat->mdio_bus_data->has_xpcs = true; + plat->mdio_bus_data->xpcs_an_inband = true; + } + + /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ + plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR; + plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; + + plat->int_snapshot_num = AUX_SNAPSHOT1; + plat->ext_snapshot_num = AUX_SNAPSHOT0; + + plat->has_crossts = true; + plat->crosststamp = intel_crosststamp; + + /* Setup MSI vector offset specific to Intel mGbE controller */ + plat->msi_mac_vec = 29; + plat->msi_lpi_vec = 28; + plat->msi_sfty_ce_vec = 27; + plat->msi_sfty_ue_vec = 26; + plat->msi_rx_base_vec = 0; + plat->msi_tx_base_vec = 1; + return 0; } @@ -378,8 +592,14 @@ static struct stmmac_pci_info ehl_rgmii1g_info = { static int ehl_pse0_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; plat->bus_id = 2; plat->addr64 = 32; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + return ehl_common_data(pdev, plat); } @@ -410,8 +630,14 @@ static struct stmmac_pci_info ehl_pse0_sgmii1g_info = { static int ehl_pse1_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + + intel_priv->is_pse = true; plat->bus_id = 3; plat->addr64 = 32; + + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); + return ehl_common_data(pdev, plat); } @@ -609,6 +835,79 @@ static const struct stmmac_pci_info quark_info = { .setup = quark_default_data, }; +static int stmmac_config_single_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n", + __func__); + return ret; + } + + res->irq = pci_irq_vector(pdev, 0); + res->wol_irq = res->irq; + plat->multi_msi_en = 0; + dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", + __func__); + + return 0; +} + +static int stmmac_config_multi_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res) +{ + int ret; + int i; + + if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || + plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { + dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", + __func__); + return -1; + } + + ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", + __func__); + return ret; + } + + /* For RX MSI */ + for (i = 0; i < plat->rx_queues_to_use; i++) { + res->rx_irq[i] = pci_irq_vector(pdev, + plat->msi_rx_base_vec + i * 2); + } + + /* For TX MSI */ + for (i = 0; i < plat->tx_queues_to_use; i++) { + res->tx_irq[i] = pci_irq_vector(pdev, + plat->msi_tx_base_vec + i * 2); + } + + if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) + res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); + if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) + res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); + if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) + res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); + if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec); + if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) + res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); + + plat->multi_msi_en = 1; + dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); + + return 0; +} + /** * intel_eth_pci_probe * @@ -650,7 +949,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -664,20 +963,27 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); plat->bsp_priv = intel_priv; - intel_priv->mdio_adhoc_addr = 0x15; + intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR; + intel_priv->crossts_adj = 1; + + /* Initialize all MSI vectors to invalid so that it can be set + * according to platform data settings below. + * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX) + */ + plat->msi_mac_vec = STMMAC_MSI_VEC_MAX; + plat->msi_wol_vec = STMMAC_MSI_VEC_MAX; + plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX; + plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX; + plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX; + plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX; ret = info->setup(pdev, plat); if (ret) return ret; - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); - if (ret < 0) - return ret; - memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; - res.wol_irq = pci_irq_vector(pdev, 0); - res.irq = pci_irq_vector(pdev, 0); if (plat->eee_usecs_rate > 0) { u32 tx_lpi_usec; @@ -686,13 +992,28 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); } + ret = stmmac_config_multi_msi(pdev, plat, &res); + if (ret) { + ret = stmmac_config_single_msi(pdev, plat, &res); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n", + __func__); + goto err_alloc_irq; + } + } + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { - pci_free_irq_vectors(pdev); - clk_disable_unprepare(plat->stmmac_clk); - clk_unregister_fixed_rate(plat->stmmac_clk); + goto err_dvr_probe; } + return 0; + +err_dvr_probe: + pci_free_irq_vectors(pdev); +err_alloc_irq: + clk_disable_unprepare(plat->stmmac_clk); + clk_unregister_fixed_rate(plat->stmmac_clk); return ret; } @@ -710,13 +1031,9 @@ static void intel_eth_pci_remove(struct pci_dev *pdev) stmmac_dvr_remove(&pdev->dev); - pci_free_irq_vectors(pdev); - clk_unregister_fixed_rate(priv->plat->stmmac_clk); pcim_iounmap_regions(pdev, BIT(0)); - - pci_disable_device(pdev); } static int __maybe_unused intel_eth_pci_suspend(struct device *dev) @@ -732,7 +1049,6 @@ static int __maybe_unused intel_eth_pci_suspend(struct device *dev) if (ret) return ret; - pci_disable_device(pdev); pci_wake_from_d3(pdev, true); return 0; } @@ -745,7 +1061,7 @@ static int __maybe_unused intel_eth_pci_resume(struct device *dev) pci_restore_state(pdev); pci_set_power_state(pdev, PCI_D0); - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) return ret; @@ -757,41 +1073,41 @@ static int __maybe_unused intel_eth_pci_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend, intel_eth_pci_resume); -#define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937 -#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30 -#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31 -#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32 +#define PCI_DEVICE_ID_INTEL_QUARK 0x0937 +#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31 +#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC * which are named PSE0 and PSE1 */ -#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0 -#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1 -#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2 -#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0 -#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1 -#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2 -#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac -#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2 -#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac -#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac -#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1 +#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2 +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac +#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2 +#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac +#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad static const struct pci_device_id intel_eth_pci_id_table[] = { - { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) }, - { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_phy1_info) }, - { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) }, + { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, {} }; MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h index e723096c0b15..542acb8ce467 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h @@ -14,6 +14,7 @@ /* SERDES defines */ #define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ +#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */ #define SERDES_RST BIT(2) /* Serdes Reset */ #define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ #define SERDES_PWR_ST_SHIFT 4 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index bf3250e0e59c..28dd0ed85a82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -255,7 +255,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (val) return val; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; plat_dat->multicast_filter_bins = 0; + plat_dat->tx_fifo_size = 8192; + plat_dat->rx_fifo_size = 8192; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 3d3f43d91b98..9d77c647badd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 9e4b83832938..58c0feaa8131 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -407,7 +407,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index bbc16b5a410a..16fb66a0ca72 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -52,7 +52,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 848e5c37746b..c7a6588d9398 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -398,7 +398,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c index 8551ea878ba5..adfeb8d3293d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c @@ -118,7 +118,7 @@ static int oxnas_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index bfc4a92f1d92..84382fc5cc4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -461,7 +461,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); @@ -477,7 +477,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii"); ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ethqos->rgmii_base)) { - dev_err(&pdev->dev, "Can't get rgmii base\n"); ret = PTR_ERR(ethqos->rgmii_base); goto err_mem; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 6ef30252bfe0..8d28a536e1bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1396,7 +1396,7 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 70d41783329d..85208128f135 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -398,7 +398,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index e1b63df6f96f..710d7435733e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -325,7 +325,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 5d4df4c5254e..2b38a499a404 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -371,7 +371,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index e62efd166ec8..4422baeed3d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -239,6 +239,22 @@ static const struct emac_variant emac_variant_h6 = { #define EMAC_RX_EARLY_INT BIT(13) #define EMAC_RGMII_STA_INT BIT(16) +#define EMAC_INT_MSK_COMMON EMAC_RGMII_STA_INT +#define EMAC_INT_MSK_TX (EMAC_TX_INT | \ + EMAC_TX_DMA_STOP_INT | \ + EMAC_TX_BUF_UA_INT | \ + EMAC_TX_TIMEOUT_INT | \ + EMAC_TX_UNDERFLOW_INT | \ + EMAC_TX_EARLY_INT |\ + EMAC_INT_MSK_COMMON) +#define EMAC_INT_MSK_RX (EMAC_RX_INT | \ + EMAC_RX_BUF_UA_INT | \ + EMAC_RX_DMA_STOP_INT | \ + EMAC_RX_TIMEOUT_INT | \ + EMAC_RX_OVERFLOW_INT | \ + EMAC_RX_EARLY_INT | \ + EMAC_INT_MSK_COMMON) + #define MAC_ADDR_TYPE_DST BIT(31) /* H3 specific bits for EPHY */ @@ -412,13 +428,19 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) } static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, + u32 dir) { u32 v; int ret = 0; v = readl(ioaddr + EMAC_INT_STA); + if (dir == DMA_DIR_RX) + v &= EMAC_INT_MSK_RX; + else if (dir == DMA_DIR_TX) + v &= EMAC_INT_MSK_TX; + if (v & EMAC_TX_INT) { ret |= handle_tx; x->tx_normal_irq_n++; @@ -1199,7 +1221,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) if (ret) return -EINVAL; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 0e1ca2cba3c7..527077c98ebc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -108,7 +108,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index d23be45a64e5..d046e33b8a29 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -208,7 +208,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 2bac49b49f73..90383abafa66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -255,7 +255,7 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr, } static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, - u32 number_chan) + u32 queue) { writel(riwt, ioaddr + DMA_RX_WATCHDOG); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 82df91c130f7..462ca7ed095a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -42,6 +42,7 @@ #define GMAC_HW_FEATURE3 0x00000128 #define GMAC_MDIO_ADDR 0x00000200 #define GMAC_MDIO_DATA 0x00000204 +#define GMAC_GPIO_STATUS 0x0000020C #define GMAC_ARP_ADDR 0x00000210 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) @@ -49,6 +50,7 @@ #define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30) #define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30) #define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30) +#define GMAC_TIMESTAMP_STATUS 0x00000b20 /* RX Queues Routing */ #define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) @@ -143,6 +145,7 @@ #define GMAC_INT_PCS_PHYIS BIT(3) #define GMAC_INT_PMT_EN BIT(4) #define GMAC_INT_LPI_EN BIT(5) +#define GMAC_INT_TSIE BIT(12) #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) @@ -259,6 +262,7 @@ enum power_event { #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28) #define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) @@ -278,6 +282,12 @@ enum power_event { #define GMAC_HW_FEAT_DVLAN BIT(5) #define GMAC_HW_FEAT_NRVF GENMASK(2, 0) +/* GMAC GPIO Status reg */ +#define GMAC_GPO0 BIT(16) +#define GMAC_GPO1 BIT(17) +#define GMAC_GPO2 BIT(18) +#define GMAC_GPO3 BIT(19) + /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) #define GMAC_HI_DCS_SHIFT 16 @@ -298,6 +308,11 @@ enum power_event { #define GMAC_L4DP0_SHIFT 16 #define GMAC_L4SP0 GENMASK(15, 0) +/* MAC Timestamp Status */ +#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2) +#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define GMAC_TIMESTAMP_ATSNS_SHIFT 25 + /* MTL registers */ #define MTL_OPERATION_MODE 0x00000c00 #define MTL_FRPE BIT(15) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 29f765a246a0..f35c03c9f91e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -53,6 +53,10 @@ static void dwmac4_core_init(struct mac_device_info *hw, if (hw->pcs) value |= GMAC_PCS_IRQ_DEFAULT; + /* Enable FPE interrupt */ + if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) + value |= GMAC_INT_FPE_EN; + writel(value, ioaddr + GMAC_INT_EN); } @@ -638,6 +642,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, value &= ~GMAC_PACKET_FILTER_PCF; value &= ~GMAC_PACKET_FILTER_PM; value &= ~GMAC_PACKET_FILTER_PR; + value &= ~GMAC_PACKET_FILTER_RA; if (dev->flags & IFF_PROMISC) { /* VLAN Tag Filter Fail Packets Queuing */ if (hw->vlan_fail_q_en) { @@ -1245,6 +1250,8 @@ const struct stmmac_ops dwmac410_ops = { .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, @@ -1294,6 +1301,8 @@ const struct stmmac_ops dwmac510_ops = { .config_l4_filter = dwmac4_config_l4_filter, .est_configure = dwmac5_est_configure, .fpe_configure = dwmac5_fpe_configure, + .fpe_send_mpacket = dwmac5_fpe_send_mpacket, + .fpe_irq_status = dwmac5_fpe_irq_status, .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 62aa0e95beb7..5be8e6a631d9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -161,6 +161,19 @@ static void dwmac4_dma_init(void __iomem *ioaddr, value |= DMA_SYS_BUS_EAME; writel(value, ioaddr + DMA_SYS_BUS_MODE); + + value = readl(ioaddr + DMA_BUS_MODE); + + if (dma_cfg->multi_msi_en) { + value &= ~DMA_BUS_MODE_INTM_MASK; + value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT); + } + + if (dma_cfg->dche) + value |= DMA_BUS_MODE_DCHE; + + writel(value, ioaddr + DMA_BUS_MODE); + } static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, @@ -210,19 +223,16 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) _dwmac4_dump_dma_regs(ioaddr, i, reg_space); } -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) { - u32 chan; - - for (chan = 0; chan < number_chan; chan++) - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); } static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { unsigned int rqs = fifosz / 256 - 1; - u32 mtl_rx_op, mtl_rx_int; + u32 mtl_rx_op; mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); @@ -283,11 +293,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, } writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); - - /* Enable MTL RX overflow */ - mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); - writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, - ioaddr + MTL_CHAN_INT_CTRL(channel)); } static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, @@ -415,6 +420,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; + /* Number of Auxiliary Snapshot Inputs */ + dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28; /* MAC HW feature3 */ hw_cap = readl(ioaddr + GMAC_HW_FEATURE3); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 8391ca63d943..9321879b599c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -25,6 +25,10 @@ #define DMA_TBS_CTRL 0x00001050 /* DMA Bus Mode bitmap */ +#define DMA_BUS_MODE_DCHE BIT(19) +#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16) +#define DMA_BUS_MODE_INTM_SHIFT 16 +#define DMA_BUS_MODE_INTM_MODE1 0x1 #define DMA_BUS_MODE_SFT_RESET BIT(0) /* DMA SYS Bus Mode bitmap */ @@ -149,6 +153,25 @@ #define DMA_CHAN_STATUS_TPS BIT(1) #define DMA_CHAN_STATUS_TI BIT(0) +#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \ + DMA_CHAN_STATUS_AIS | \ + DMA_CHAN_STATUS_CDE | \ + DMA_CHAN_STATUS_FBE) + +#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \ + DMA_CHAN_STATUS_ERI | \ + DMA_CHAN_STATUS_RWT | \ + DMA_CHAN_STATUS_RPS | \ + DMA_CHAN_STATUS_RBU | \ + DMA_CHAN_STATUS_RI | \ + DMA_CHAN_STATUS_MSK_COMMON) + +#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \ + DMA_CHAN_STATUS_TBU | \ + DMA_CHAN_STATUS_TPS | \ + DMA_CHAN_STATUS_TI | \ + DMA_CHAN_STATUS_MSK_COMMON) + /* Interrupt enable bits per channel */ #define DMA_CHAN_INTR_ENA_NIE BIT(16) #define DMA_CHAN_INTR_ENA_AIE BIT(15) @@ -206,7 +229,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); int dwmac4_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan); + struct stmmac_extra_stats *x, u32 chan, u32 dir); void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 71e50751ef2d..e63270267578 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -135,12 +135,17 @@ void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx) } int dwmac4_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, u32 dir) { u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); int ret = 0; + if (dir == DMA_DIR_RX) + intr_status &= DMA_CHAN_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_CHAN_STATUS_MSK_TX; + /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) @@ -161,20 +166,19 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, } } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) x->normal_irq_n++; - if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - x->rx_normal_irq_n++; - ret |= handle_rx; - } - if (likely(intr_status & (DMA_CHAN_STATUS_TI | - DMA_CHAN_STATUS_TBU))) { - x->tx_normal_irq_n++; - ret |= handle_tx; - } - if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) - x->rx_early_irq++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + if (likely(intr_status & (DMA_CHAN_STATUS_TI | + DMA_CHAN_STATUS_TBU))) { + x->tx_normal_irq_n++; + ret |= handle_tx; } + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 8f7ac24545ef..d8c6ff725237 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -192,6 +192,7 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp) /* 1. Enable Safety Features */ value = readl(ioaddr + MTL_ECC_CONTROL); + value |= MEEAO; /* MTL ECC Error Addr Status Override */ value |= TSOEE; /* TSO ECC */ value |= MRXPEE; /* MTL RX Parser ECC */ value |= MESTEE; /* MTL EST ECC */ @@ -595,9 +596,95 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, ctrl &= ~EEST; writel(ctrl, ioaddr + MTL_EST_CONTROL); + + /* Configure EST interrupt */ + if (cfg->enable) + ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC); + else + ctrl = 0; + + writel(ctrl, ioaddr + MTL_EST_INT_EN); + return 0; } +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt) +{ + u32 status, value, feqn, hbfq, hbfs, btrl; + u32 txqcnt_mask = (1 << txqcnt) - 1; + + status = readl(ioaddr + MTL_EST_STATUS); + + value = (CGCE | HLBS | HLBF | BTRE | SWLC); + + /* Return if there is no error */ + if (!(status & value)) + return; + + if (status & CGCE) { + /* Clear Interrupt */ + writel(CGCE, ioaddr + MTL_EST_STATUS); + + x->mtl_est_cgce++; + } + + if (status & HLBS) { + value = readl(ioaddr + MTL_EST_SCH_ERR); + value &= txqcnt_mask; + + x->mtl_est_hlbs++; + + /* Clear Interrupt */ + writel(value, ioaddr + MTL_EST_SCH_ERR); + + /* Collecting info to shows all the queues that has HLBS + * issue. The only way to clear this is to clear the + * statistic + */ + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value); + } + + if (status & HLBF) { + value = readl(ioaddr + MTL_EST_FRM_SZ_ERR); + feqn = value & txqcnt_mask; + + value = readl(ioaddr + MTL_EST_FRM_SZ_CAP); + hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT; + hbfs = value & SZ_CAP_HBFS_MASK; + + x->mtl_est_hlbf++; + + /* Clear Interrupt */ + writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR); + + if (net_ratelimit()) + netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n", + hbfq, hbfs); + } + + if (status & BTRE) { + if ((status & BTRL) == BTRL_MAX) + x->mtl_est_btrlm++; + else + x->mtl_est_btre++; + + btrl = (status & BTRL) >> BTRL_SHIFT; + + if (net_ratelimit()) + netdev_info(dev, "EST: BTR Error Loop Count %u\n", + btrl); + + writel(BTRE, ioaddr + MTL_EST_STATUS); + } + + if (status & SWLC) { + writel(SWLC, ioaddr + MTL_EST_STATUS); + netdev_info(dev, "EST: SWOL has been switched\n"); + } +} + void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable) { @@ -621,3 +708,52 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, value |= EFPE; writel(value, ioaddr + MAC_FPE_CTRL_STS); } + +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) +{ + u32 value; + int status; + + status = FPE_EVENT_UNKNOWN; + + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (value & TRSP) { + status |= FPE_EVENT_TRSP; + netdev_info(dev, "FPE: Respond mPacket is transmitted\n"); + } + + if (value & TVER) { + status |= FPE_EVENT_TVER; + netdev_info(dev, "FPE: Verify mPacket is transmitted\n"); + } + + if (value & RRSP) { + status |= FPE_EVENT_RRSP; + netdev_info(dev, "FPE: Respond mPacket is received\n"); + } + + if (value & RVER) { + status |= FPE_EVENT_RVER; + netdev_info(dev, "FPE: Verify mPacket is received\n"); + } + + return status; +} + +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type) +{ + u32 value; + + value = readl(ioaddr + MAC_FPE_CTRL_STS); + + if (type == MPACKET_VERIFY) { + value &= ~SRSP; + value |= SVER; + } else { + value &= ~SVER; + value |= SRSP; + } + + writel(value, ioaddr + MAC_FPE_CTRL_STS); +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index 56b0762c1276..6b2fd37b29ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -12,6 +12,12 @@ #define TMOUTEN BIT(0) #define MAC_FPE_CTRL_STS 0x00000234 +#define TRSP BIT(19) +#define TVER BIT(18) +#define RRSP BIT(17) +#define RVER BIT(16) +#define SRSP BIT(2) +#define SVER BIT(1) #define EFPE BIT(0) #define MAC_PPS_CONTROL 0x00000b70 @@ -38,6 +44,36 @@ #define PTOV_SHIFT 24 #define SSWL BIT(1) #define EEST BIT(0) + +#define MTL_EST_STATUS 0x00000c58 +#define BTRL GENMASK(11, 8) +#define BTRL_SHIFT 8 +#define BTRL_MAX (0xF << BTRL_SHIFT) +#define SWOL BIT(7) +#define SWOL_SHIFT 7 +#define CGCE BIT(4) +#define HLBS BIT(3) +#define HLBF BIT(2) +#define BTRE BIT(1) +#define SWLC BIT(0) + +#define MTL_EST_SCH_ERR 0x00000c60 +#define MTL_EST_FRM_SZ_ERR 0x00000c64 +#define MTL_EST_FRM_SZ_CAP 0x00000c68 +#define SZ_CAP_HBFS_MASK GENMASK(14, 0) +#define SZ_CAP_HBFQ_SHIFT 16 +#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \ + ((val) > 4 ? GENMASK(18, 16) : \ + (val) > 2 ? GENMASK(17, 16) : \ + BIT(16)); }) + +#define MTL_EST_INT_EN 0x00000c70 +#define IECGCE CGCE +#define IEHS HLBS +#define IEHF HLBF +#define IEBE BTRE +#define IECC SWLC + #define MTL_EST_GCL_CONTROL 0x00000c80 #define BTR_LOW 0x0 #define BTR_HIGH 0x1 @@ -62,6 +98,7 @@ #define ADDR GENMASK(15, 0) #define MTL_RXP_IACC_DATA 0x00000cb4 #define MTL_ECC_CONTROL 0x00000cc0 +#define MEEAO BIT(8) #define TSOEE BIT(4) #define MRXPEE BIT(3) #define MESTEE BIT(2) @@ -98,6 +135,8 @@ #define GMAC_RXQCTRL_VFFQ_SHIFT 17 #define GMAC_RXQCTRL_VFFQE BIT(16) +#define GMAC_INT_FPE_EN BIT(17) + int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, @@ -111,7 +150,12 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, u32 sub_second_inc, u32 systime_flags); int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); +void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable); +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + enum stmmac_mpacket_type type); +int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e5dbd0bc257e..1914ad698cab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -128,6 +128,26 @@ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ +#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \ + DMA_STATUS_AIS | \ + DMA_STATUS_FBI) + +#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \ + DMA_STATUS_RWT | \ + DMA_STATUS_RPS | \ + DMA_STATUS_RU | \ + DMA_STATUS_RI | \ + DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON) + +#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \ + DMA_STATUS_UNF | \ + DMA_STATUS_TJT | \ + DMA_STATUS_TU | \ + DMA_STATUS_TPS | \ + DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON) + #define NUM_DWMAC100_DMA_REGS 9 #define NUM_DWMAC1000_DMA_REGS 23 @@ -139,7 +159,7 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, - u32 chan); + u32 chan, u32 dir); int dwmac_dma_reset(void __iomem *ioaddr); #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 57a53a600aa5..d1c31200bb91 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -155,7 +155,7 @@ static void show_rx_process_state(unsigned int status) #endif int dwmac_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, u32 dir) { int ret = 0; /* read the status register (CSR5) */ @@ -167,6 +167,12 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, show_tx_process_state(intr_status); show_rx_process_state(intr_status); #endif + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX; + /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_STATUS_AIS)) { if (unlikely(intr_status & DMA_STATUS_UNF)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 6c3b8a950f58..1913385df685 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -412,6 +412,12 @@ #define XGMAC_TI BIT(0) #define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4) +#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE) +#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \ + XGMAC_DMA_STATUS_MSK_COMMON) +#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \ + XGMAC_DMA_STATUS_MSK_COMMON) + /* Descriptors */ #define XGMAC_TDES0_LTV BIT(31) #define XGMAC_TDES0_LT GENMASK(7, 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 77308c5c5d29..906e985441a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -323,12 +323,18 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan) } static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan) + struct stmmac_extra_stats *x, u32 chan, + u32 dir) { u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; + if (dir == DMA_DIR_RX) + intr_status &= XGMAC_DMA_STATUS_MSK_RX; + else if (dir == DMA_DIR_TX) + intr_status &= XGMAC_DMA_STATUS_MSK_TX; + /* ABNORMAL interrupts */ if (unlikely(intr_status & XGMAC_AIS)) { if (unlikely(intr_status & XGMAC_RBU)) { @@ -441,12 +447,9 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; } -static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan) +static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue) { - u32 i; - - for (i = 0; i < nchan; i++) - writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i)); + writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue)); } static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 979ac9fca23c..6d5e0f2b03ce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -201,12 +201,12 @@ struct stmmac_dma_ops { void (*start_rx)(void __iomem *ioaddr, u32 chan); void (*stop_rx)(void __iomem *ioaddr, u32 chan); int (*dma_interrupt) (void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan); + struct stmmac_extra_stats *x, u32 chan, u32 dir); /* If supported then get the optional core features */ void (*get_hw_feature)(void __iomem *ioaddr, struct dma_features *dma_cap); /* Program the HW RX Watchdog */ - void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue); void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); @@ -280,7 +280,6 @@ struct stmmac_dma_ops { struct mac_device_info; struct net_device; struct rgmii_adv; -struct stmmac_safety_stats; struct stmmac_tc_entry; struct stmmac_pps_cfg; struct stmmac_rss; @@ -393,8 +392,13 @@ struct stmmac_ops { void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr); int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); + void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, + struct stmmac_extra_stats *x, u32 txqcnt); void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, bool enable); + void (*fpe_send_mpacket)(void __iomem *ioaddr, + enum stmmac_mpacket_type type); + int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); }; #define stmmac_core_init(__priv, __args...) \ @@ -491,8 +495,16 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, set_arp_offload, __args) #define stmmac_est_configure(__priv, __args...) \ stmmac_do_callback(__priv, mac, est_configure, __args) +#define stmmac_est_irq_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, est_irq_status, __args) #define stmmac_fpe_configure(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, fpe_configure, __args) +#define stmmac_fpe_send_mpacket(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args) +#define stmmac_fpe_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, fpe_irq_status, __args) + +struct stmmac_priv; /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { @@ -504,6 +516,8 @@ struct stmmac_hwtimestamp { int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, int gmac4); void (*get_systime) (void __iomem *ioaddr, u64 *systime); + void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); + void (*timestamp_interrupt)(struct stmmac_priv *priv); }; #define stmmac_config_hw_tstamping(__priv, __args...) \ @@ -518,6 +532,10 @@ struct stmmac_hwtimestamp { stmmac_do_callback(__priv, ptp, adjust_systime, __args) #define stmmac_get_systime(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, get_systime, __args) +#define stmmac_get_ptptime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) +#define stmmac_timestamp_interrupt(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) /* Helpers to manage the descriptors for chain and ring modes */ struct stmmac_mode_ops { @@ -546,7 +564,6 @@ struct stmmac_mode_ops { #define stmmac_clean_desc3(__priv, __args...) \ stmmac_do_void_callback(__priv, mode, clean_desc3, __args) -struct stmmac_priv; struct tc_cls_u32_offload; struct tc_cbs_qopt_offload; struct flow_cls_offload; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e553b9a1f785..b6cd43eda7ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -26,10 +26,21 @@ struct stmmac_resources { void __iomem *addr; - const char *mac; + u8 mac[ETH_ALEN]; int wol_irq; int lpi_irq; int irq; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; +}; + +enum stmmac_txbuf_type { + STMMAC_TXBUF_T_SKB, + STMMAC_TXBUF_T_XDP_TX, + STMMAC_TXBUF_T_XDP_NDO, + STMMAC_TXBUF_T_XSK_TX, }; struct stmmac_tx_info { @@ -38,6 +49,7 @@ struct stmmac_tx_info { unsigned len; bool last_segment; bool is_jumbo; + enum stmmac_txbuf_type buf_type; }; #define STMMAC_TBS_AVAIL BIT(0) @@ -53,8 +65,13 @@ struct stmmac_tx_queue { struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; struct dma_edesc *dma_entx; struct dma_desc *dma_tx; - struct sk_buff **tx_skbuff; + union { + struct sk_buff **tx_skbuff; + struct xdp_frame **xdpf; + }; struct stmmac_tx_info *tx_skbuff_dma; + struct xsk_buff_pool *xsk_pool; + u32 xsk_frames_done; unsigned int cur_tx; unsigned int dirty_tx; dma_addr_t dma_tx_phy; @@ -63,15 +80,23 @@ struct stmmac_tx_queue { }; struct stmmac_rx_buffer { - struct page *page; + union { + struct { + struct page *page; + dma_addr_t addr; + __u32 page_offset; + }; + struct xdp_buff *xdp; + }; struct page *sec_page; - dma_addr_t addr; dma_addr_t sec_addr; }; struct stmmac_rx_queue { u32 rx_count_frames; u32 queue_index; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; struct page_pool *page_pool; struct stmmac_rx_buffer *buf_pool; struct stmmac_priv *priv_data; @@ -79,6 +104,7 @@ struct stmmac_rx_queue { struct dma_desc *dma_rx ____cacheline_aligned_in_smp; unsigned int cur_rx; unsigned int dirty_rx; + unsigned int buf_alloc_num; u32 rx_zeroc_thresh; dma_addr_t dma_rx_phy; u32 rx_tail_addr; @@ -93,6 +119,7 @@ struct stmmac_rx_queue { struct stmmac_channel { struct napi_struct rx_napi ____cacheline_aligned_in_smp; struct napi_struct tx_napi ____cacheline_aligned_in_smp; + struct napi_struct rxtx_napi ____cacheline_aligned_in_smp; struct stmmac_priv *priv_data; spinlock_t lock; u32 index; @@ -147,20 +174,21 @@ struct stmmac_flow_entry { struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ - u32 tx_coal_frames; - u32 tx_coal_timer; - u32 rx_coal_frames; + u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; + u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; int tx_coalesce; int hwts_tx_en; bool tx_path_in_lpi_mode; bool tso; int sph; + int sph_cap; u32 sarc_type; unsigned int dma_buf_sz; unsigned int rx_copybreak; - u32 rx_riwt; + u32 rx_riwt[MTL_MAX_TX_QUEUES]; int hwts_rx_en; void __iomem *ioaddr; @@ -222,9 +250,24 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + /* Protects auxiliary snapshot registers from concurrent access. */ + struct mutex aux_ts_lock; + void __iomem *mmcaddr; void __iomem *ptpaddr; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int sfty_ce_irq; + int sfty_ue_irq; + int rx_irq[MTL_MAX_RX_QUEUES]; + int tx_irq[MTL_MAX_TX_QUEUES]; + /*irq name */ + char int_name_mac[IFNAMSIZ + 9]; + char int_name_wol[IFNAMSIZ + 9]; + char int_name_lpi[IFNAMSIZ + 9]; + char int_name_sfty_ce[IFNAMSIZ + 10]; + char int_name_sfty_ue[IFNAMSIZ + 10]; + char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14]; + char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18]; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; @@ -234,6 +277,12 @@ struct stmmac_priv { struct workqueue_struct *wq; struct work_struct service_task; + /* Workqueue for handling FPE hand-shaking */ + unsigned long fpe_task_state; + struct workqueue_struct *fpe_wq; + struct work_struct fpe_task; + char wq_name[IFNAMSIZ + 4]; + /* TC Handling */ unsigned int tc_entries_max; unsigned int tc_off_max; @@ -246,6 +295,10 @@ struct stmmac_priv { /* Receive Side Scaling */ struct stmmac_rss rss; + + /* XDP BPF Program */ + unsigned long *af_xdp_zc_qps; + struct bpf_prog *xdp_prog; }; enum stmmac_state { @@ -262,6 +315,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); +int stmmac_open(struct net_device *dev); +int stmmac_release(struct net_device *dev); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); int stmmac_dvr_remove(struct device *dev); @@ -272,6 +327,27 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable); + +static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) +{ + return !!priv->xdp_prog; +} + +static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) +{ + if (stmmac_xdp_is_enabled(priv)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index c5642985ef95..61b11639ee0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -158,6 +158,12 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { /* TSO */ STMMAC_STAT(tx_tso_frames), STMMAC_STAT(tx_tso_nfrags), + /* EST */ + STMMAC_STAT(mtl_est_cgce), + STMMAC_STAT(mtl_est_hlbs), + STMMAC_STAT(mtl_est_hlbf), + STMMAC_STAT(mtl_est_btre), + STMMAC_STAT(mtl_est_btrlm), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -756,28 +762,75 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) return (riwt * 256) / (clk / 1000000); } -static int stmmac_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) +static int __stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) { struct stmmac_priv *priv = netdev_priv(dev); + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; - ec->tx_coalesce_usecs = priv->tx_coal_timer; - ec->tx_max_coalesced_frames = priv->tx_coal_frames; + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); - if (priv->use_riwt) { - ec->rx_max_coalesced_frames = priv->rx_coal_frames; - ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); + if (queue < 0) + queue = 0; + else if (queue >= max_cnt) + return -EINVAL; + + if (queue < tx_cnt) { + ec->tx_coalesce_usecs = priv->tx_coal_timer[queue]; + ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue]; + } else { + ec->tx_coalesce_usecs = 0; + ec->tx_max_coalesced_frames = 0; + } + + if (priv->use_riwt && queue < rx_cnt) { + ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue]; + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue], + priv); + } else { + ec->rx_max_coalesced_frames = 0; + ec->rx_coalesce_usecs = 0; } return 0; } -static int stmmac_set_coalesce(struct net_device *dev, +static int stmmac_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { + return __stmmac_get_coalesce(dev, ec, -1); +} + +static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_get_coalesce(dev, ec, queue); +} + +static int __stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) +{ struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_cnt = priv->plat->rx_queues_to_use; + bool all_queues = false; unsigned int rx_riwt; + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; + + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); + + if (queue < 0) + all_queues = true; + else if (queue >= max_cnt) + return -EINVAL; if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); @@ -785,8 +838,23 @@ static int stmmac_set_coalesce(struct net_device *dev, if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) return -EINVAL; - priv->rx_riwt = rx_riwt; - stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + if (all_queues) { + int i; + + for (i = 0; i < rx_cnt; i++) { + priv->rx_riwt[i] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, i); + priv->rx_coal_frames[i] = + ec->rx_max_coalesced_frames; + } + } else if (queue < rx_cnt) { + priv->rx_riwt[queue] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, queue); + priv->rx_coal_frames[queue] = + ec->rx_max_coalesced_frames; + } } if ((ec->tx_coalesce_usecs == 0) && @@ -797,13 +865,37 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) return -EINVAL; - /* Only copy relevant parameters, ignore all others. */ - priv->tx_coal_frames = ec->tx_max_coalesced_frames; - priv->tx_coal_timer = ec->tx_coalesce_usecs; - priv->rx_coal_frames = ec->rx_max_coalesced_frames; + if (all_queues) { + int i; + + for (i = 0; i < tx_cnt; i++) { + priv->tx_coal_frames[i] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[i] = + ec->tx_coalesce_usecs; + } + } else if (queue < tx_cnt) { + priv->tx_coal_frames[queue] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[queue] = + ec->tx_coalesce_usecs; + } + return 0; } +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, -1); +} + +static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, queue); +} + static int stmmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -1001,6 +1093,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, + .get_per_queue_coalesce = stmmac_get_per_queue_coalesce, + .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, .get_channels = stmmac_get_channels, .set_channels = stmmac_set_channels, .get_tunable = stmmac_get_tunable, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index d291612eeafb..074e2cdfb0fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -12,8 +12,11 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/delay.h> +#include <linux/ptp_clock_kernel.h> #include "common.h" #include "stmmac_ptp.h" +#include "dwmac4.h" +#include "stmmac.h" static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { @@ -153,6 +156,51 @@ static void get_systime(void __iomem *ioaddr, u64 *systime) *systime = ns; } +static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + PTP_ATNR); + ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + +static void timestamp_interrupt(struct stmmac_priv *priv) +{ + u32 num_snapshot, ts_status, tsync_int; + struct ptp_clock_event event; + unsigned long flags; + u64 ptp_time; + int i; + + tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE; + + if (!tsync_int) + return; + + /* Read timestamp status to clear interrupt from either external + * timestamp or start/end of PPS. + */ + ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); + + if (!priv->plat->ext_snapshot_en) + return; + + num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> + GMAC_TIMESTAMP_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + spin_lock_irqsave(&priv->ptp_lock, flags); + get_ptptime(priv->ptpaddr, &ptp_time); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + const struct stmmac_hwtimestamp stmmac_ptp = { .config_hw_tstamping = config_hw_tstamping, .init_systime = init_systime, @@ -160,4 +208,6 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .config_addend = config_addend, .adjust_systime = adjust_systime, .get_systime = get_systime, + .get_ptptime = get_ptptime, + .timestamp_interrupt = timestamp_interrupt, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 208cae344ffa..345b4c6d1fd4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,6 +28,7 @@ #include <linux/if_vlan.h> #include <linux/dma-mapping.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> #include <linux/prefetch.h> #include <linux/pinctrl/consumer.h> #ifdef CONFIG_DEBUG_FS @@ -37,9 +38,12 @@ #include <linux/net_tstamp.h> #include <linux/phylink.h> #include <linux/udp.h> +#include <linux/bpf_trace.h> #include <net/pkt_cls.h> +#include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" #include "stmmac.h" +#include "stmmac_xdp.h" #include <linux/reset.h> #include <linux/of_mdio.h> #include "dwmac1000.h" @@ -66,6 +70,16 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) +/* Limit to make sure XDP TX and slow path can coexist */ +#define STMMAC_XSK_TX_BUDGET_MAX 256 +#define STMMAC_TX_XSK_AVAIL 16 +#define STMMAC_RX_FILL_BATCH 16 + +#define STMMAC_XDP_PASS 0 +#define STMMAC_XDP_CONSUMED BIT(0) +#define STMMAC_XDP_TX BIT(1) +#define STMMAC_XDP_REDIRECT BIT(2) + static int flow_ctrl = FLOW_AUTO; module_param(flow_ctrl, int, 0644); MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); @@ -104,6 +118,13 @@ module_param(chain_mode, int, 0444); MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +/* For MSI interrupts handling */ +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; @@ -113,6 +134,38 @@ static void stmmac_exit_fs(struct net_device *dev); #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) +int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) +{ + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(priv->plat->stmmac_clk); + if (ret) + return ret; + ret = clk_prepare_enable(priv->plat->pclk); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + return ret; + } + if (priv->plat->clks_config) { + ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); + if (ret) { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + return ret; + } + } + } else { + clk_disable_unprepare(priv->plat->stmmac_clk); + clk_disable_unprepare(priv->plat->pclk); + if (priv->plat->clks_config) + priv->plat->clks_config(priv->plat->bsp_priv, enabled); + } + + return ret; +} +EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); + /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of @@ -134,11 +187,7 @@ static void stmmac_verify_args(void) eee_timer = STMMAC_DEFAULT_LPI_TIMER; } -/** - * stmmac_disable_all_queues - Disable all queues - * @priv: driver private structure - */ -static void stmmac_disable_all_queues(struct stmmac_priv *priv) +static void __stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; u32 tx_queues_cnt = priv->plat->tx_queues_to_use; @@ -148,6 +197,12 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_disable(&ch->rxtx_napi); + continue; + } + if (queue < rx_queues_cnt) napi_disable(&ch->rx_napi); if (queue < tx_queues_cnt) @@ -156,6 +211,28 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) } /** + * stmmac_disable_all_queues - Disable all queues + * @priv: driver private structure + */ +static void stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + struct stmmac_rx_queue *rx_q; + u32 queue; + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + for (queue = 0; queue < rx_queues_cnt; queue++) { + rx_q = &priv->rx_queue[queue]; + if (rx_q->xsk_pool) { + synchronize_rcu(); + break; + } + } + + __stmmac_disable_all_queues(priv); +} + +/** * stmmac_enable_all_queues - Enable all queues * @priv: driver private structure */ @@ -169,6 +246,12 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) { + napi_enable(&ch->rxtx_napi); + continue; + } + if (queue < rx_queues_cnt) napi_enable(&ch->rx_napi); if (queue < tx_queues_cnt) @@ -433,6 +516,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, { struct skb_shared_hwtstamps shhwtstamp; bool found = false; + s64 adjust = 0; u64 ns = 0; if (!priv->hwts_tx_en) @@ -451,6 +535,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { + adjust += -(2 * (NSEC_PER_SEC / + priv->plat->clk_ptp_rate)); + ns += adjust; + } + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -474,6 +565,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; + u64 adjust = 0; u64 ns = 0; if (!priv->hwts_rx_en) @@ -485,6 +577,13 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, /* Check if timestamp is available */ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); + + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { + adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); + ns -= adjust; + } + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); @@ -922,6 +1021,21 @@ static void stmmac_mac_an_restart(struct phylink_config *config) /* Not Supported */ } +static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (is_up && *hs_enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); + } else { + *lo_state = FPE_EVENT_UNKNOWN; + *lp_state = FPE_EVENT_UNKNOWN; + } +} + static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -932,6 +1046,9 @@ static void stmmac_mac_link_down(struct phylink_config *config, priv->tx_lpi_enabled = false; stmmac_eee_init(priv); stmmac_set_eee_pls(priv, priv->hw, false); + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, false); } static void stmmac_mac_link_up(struct phylink_config *config, @@ -1030,6 +1147,9 @@ static void stmmac_mac_link_up(struct phylink_config *config, priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); } + + if (priv->dma_cap.fpesel) + stmmac_fpe_link_state_handle(priv, true); } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { @@ -1117,6 +1237,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.pcs_poll = true; + priv->phylink_config.ovr_an_inband = + priv->plat->mdio_bus_data->xpcs_an_inband; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1304,11 +1426,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->page) - return -ENOMEM; + if (!buf->page) { + buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + if (!buf->page) + return -ENOMEM; + buf->page_offset = stmmac_rx_offset(priv); + } - if (priv->sph) { + if (priv->sph && !buf->sec_page) { buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); if (!buf->sec_page) return -ENOMEM; @@ -1320,7 +1445,8 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); } - buf->addr = page_pool_get_dma_addr(buf->page); + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; + stmmac_set_desc_addr(priv, p, buf->addr); if (priv->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); @@ -1358,7 +1484,8 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - if (tx_q->tx_skbuff_dma[i].buf) { + if (tx_q->tx_skbuff_dma[i].buf && + tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { if (tx_q->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[i].buf, @@ -1371,166 +1498,227 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) DMA_TO_DEVICE); } - if (tx_q->tx_skbuff[i]) { + if (tx_q->xdpf[i] && + (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { + xdp_return_frame(tx_q->xdpf[i]); + tx_q->xdpf[i] = NULL; + } + + if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff[i] && + tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { dev_kfree_skb_any(tx_q->tx_skbuff[i]); tx_q->tx_skbuff[i] = NULL; - tx_q->tx_skbuff_dma[i].buf = 0; - tx_q->tx_skbuff_dma[i].map_as_page = false; } + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; } /** - * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. - * @priv: driver private structure - * Description: this function is called to re-allocate a receive buffer, perform - * the DMA mapping and init the descriptor. + * dma_free_rx_skbufs - free RX dma buffers + * @priv: private structure + * @queue: RX queue index */ -static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) { - u32 rx_count = priv->plat->rx_queues_to_use; - u32 queue; int i; - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, queue, i); +} - for (i = 0; i < priv->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, + gfp_t flags) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; - if (buf->page) { - page_pool_recycle_direct(rx_q->page_pool, buf->page); - buf->page = NULL; - } + for (i = 0; i < priv->dma_rx_size; i++) { + struct dma_desc *p; + int ret; - if (priv->sph && buf->sec_page) { - page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); - buf->sec_page = NULL; - } - } + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, p, i, flags, + queue); + if (ret) + return ret; + + rx_q->buf_alloc_num++; } - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + return 0; +} - for (i = 0; i < priv->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - struct dma_desc *p; +/** + * dma_free_rx_xskbufs - free RX dma buffers from XSK pool + * @priv: private structure + * @queue: RX queue index + */ +static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; - if (priv->extend_desc) - p = &((rx_q->dma_erx + i)->basic); - else - p = rx_q->dma_rx + i; + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->page) - goto err_reinit_rx_buffers; + if (!buf->xdp) + continue; - buf->addr = page_pool_get_dma_addr(buf->page); - } + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + } +} - if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); - if (!buf->sec_page) - goto err_reinit_rx_buffers; +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int i; - buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); - } + for (i = 0; i < priv->dma_rx_size; i++) { + struct stmmac_rx_buffer *buf; + dma_addr_t dma_addr; + struct dma_desc *p; - stmmac_set_desc_addr(priv, p, buf->addr); - if (priv->sph) - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); - else - stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); - if (priv->dma_buf_sz == BUF_SIZE_16KiB) - stmmac_init_desc3(priv, p); - } - } + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + i); + else + p = rx_q->dma_rx + i; - return; + buf = &rx_q->buf_pool[i]; -err_reinit_rx_buffers: - do { - while (--i >= 0) - stmmac_free_rx_buffer(priv, queue, i); + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) + return -ENOMEM; - if (queue == 0) - break; + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, p, dma_addr); + rx_q->buf_alloc_num++; + } - i = priv->dma_rx_size; - } while (queue-- > 0); + return 0; +} + +static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) +{ + if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) + return NULL; + + return xsk_get_pool_from_qid(priv->dev, queue); } /** - * init_dma_rx_desc_rings - init the RX descriptor rings - * @dev: net device structure + * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) + * @priv: driver private structure + * @queue: RX queue index * @flags: gfp flag. * Description: this function initializes the DMA RX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) { - struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_count = priv->plat->rx_queues_to_use; - int ret = -ENOMEM; - int queue; - int i; + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int ret; - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, - "SKB addresses:\nskb\t\tskb data\tdma data\n"); + "(%s) dma_rx_phy=0x%08x\n", __func__, + (u32)rx_q->dma_rx_phy); - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + stmmac_clear_rx_descriptors(priv, queue); - netif_dbg(priv, probe, priv->dev, - "(%s) dma_rx_phy=0x%08x\n", __func__, - (u32)rx_q->dma_rx_phy); + xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); - stmmac_clear_rx_descriptors(priv, queue); + rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - for (i = 0; i < priv->dma_rx_size; i++) { - struct dma_desc *p; + if (rx_q->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + netdev_info(priv->dev, + "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", + rx_q->queue_index); + xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx_q->page_pool)); + netdev_info(priv->dev, + "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", + rx_q->queue_index); + } - if (priv->extend_desc) - p = &((rx_q->dma_erx + i)->basic); - else - p = rx_q->dma_rx + i; + if (rx_q->xsk_pool) { + /* RX XDP ZC buffer pool may not be populated, e.g. + * xdpsock TX-only. + */ + stmmac_alloc_rx_buffers_zc(priv, queue); + } else { + ret = stmmac_alloc_rx_buffers(priv, queue, flags); + if (ret < 0) + return -ENOMEM; + } - ret = stmmac_init_rx_buffers(priv, p, i, flags, - queue); - if (ret) - goto err_init_rx_buffers; - } + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; - rx_q->cur_rx = 0; - rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); - - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) - stmmac_mode_init(priv, rx_q->dma_erx, - rx_q->dma_rx_phy, - priv->dma_rx_size, 1); - else - stmmac_mode_init(priv, rx_q->dma_rx, - rx_q->dma_rx_phy, - priv->dma_rx_size, 0); - } + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, + priv->dma_rx_size, 1); + else + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, + priv->dma_rx_size, 0); + } + + return 0; +} + +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + int ret; + + /* RX INITIALIZATION */ + netif_dbg(priv, probe, priv->dev, + "SKB addresses:\nskb\t\tskb data\tdma data\n"); + + for (queue = 0; queue < rx_count; queue++) { + ret = __init_dma_rx_desc_rings(priv, queue, flags); + if (ret) + goto err_init_rx_buffers; } return 0; err_init_rx_buffers: while (queue >= 0) { - while (--i >= 0) - stmmac_free_rx_buffer(priv, queue, i); + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, queue); + else + dma_free_rx_skbufs(priv, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; if (queue == 0) break; - i = priv->dma_rx_size; queue--; } @@ -1538,63 +1726,75 @@ err_init_rx_buffers: } /** - * init_dma_tx_desc_rings - init the TX descriptor rings - * @dev: net device structure. + * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) + * @priv: driver private structure + * @queue : TX queue index * Description: this function initializes the DMA TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_tx_desc_rings(struct net_device *dev) +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) { - struct stmmac_priv *priv = netdev_priv(dev); - u32 tx_queue_cnt = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; int i; - for (queue = 0; queue < tx_queue_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + netif_dbg(priv, probe, priv->dev, + "(%s) dma_tx_phy=0x%08x\n", __func__, + (u32)tx_q->dma_tx_phy); - netif_dbg(priv, probe, priv->dev, - "(%s) dma_tx_phy=0x%08x\n", __func__, - (u32)tx_q->dma_tx_phy); - - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) - stmmac_mode_init(priv, tx_q->dma_etx, - tx_q->dma_tx_phy, - priv->dma_tx_size, 1); - else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) - stmmac_mode_init(priv, tx_q->dma_tx, - tx_q->dma_tx_phy, - priv->dma_tx_size, 0); - } + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, + priv->dma_tx_size, 1); + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, + priv->dma_tx_size, 0); + } - for (i = 0; i < priv->dma_tx_size; i++) { - struct dma_desc *p; - if (priv->extend_desc) - p = &((tx_q->dma_etx + i)->basic); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - p = &((tx_q->dma_entx + i)->basic); - else - p = tx_q->dma_tx + i; + tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - stmmac_clear_desc(priv, p); + for (i = 0; i < priv->dma_tx_size; i++) { + struct dma_desc *p; - tx_q->tx_skbuff_dma[i].buf = 0; - tx_q->tx_skbuff_dma[i].map_as_page = false; - tx_q->tx_skbuff_dma[i].len = 0; - tx_q->tx_skbuff_dma[i].last_segment = false; - tx_q->tx_skbuff[i] = NULL; - } + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &((tx_q->dma_entx + i)->basic); + else + p = tx_q->dma_tx + i; - tx_q->dirty_tx = 0; - tx_q->cur_tx = 0; - tx_q->mss = 0; + stmmac_clear_desc(priv, p); - netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; } + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); + + return 0; +} + +static int init_dma_tx_desc_rings(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_queue_cnt; + u32 queue; + + tx_queue_cnt = priv->plat->tx_queues_to_use; + + for (queue = 0; queue < tx_queue_cnt; queue++) + __init_dma_tx_desc_rings(priv, queue); + return 0; } @@ -1626,29 +1826,25 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) } /** - * dma_free_rx_skbufs - free RX dma buffers - * @priv: private structure - * @queue: RX queue index - */ -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) -{ - int i; - - for (i = 0; i < priv->dma_rx_size; i++) - stmmac_free_rx_buffer(priv, queue, i); -} - -/** * dma_free_tx_skbufs - free TX dma buffers * @priv: private structure * @queue: TX queue index */ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; int i; + tx_q->xsk_frames_done = 0; + for (i = 0; i < priv->dma_tx_size; i++) stmmac_free_tx_buffer(priv, queue, i); + + if (tx_q->xsk_pool && tx_q->xsk_frames_done) { + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + tx_q->xsk_frames_done = 0; + tx_q->xsk_pool = NULL; + } } /** @@ -1665,137 +1861,186 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) } /** - * free_dma_rx_desc_resources - free RX dma desc resources + * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * @priv: private structure + * @queue: RX queue index */ +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + /* Release the DMA RX socket buffers */ + if (rx_q->xsk_pool) + dma_free_rx_xskbufs(priv, queue); + else + dma_free_rx_skbufs(priv, queue); + + rx_q->buf_alloc_num = 0; + rx_q->xsk_pool = NULL; + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); + + if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) + xdp_rxq_info_unreg(&rx_q->xdp_rxq); + + kfree(rx_q->buf_pool); + if (rx_q->page_pool) + page_pool_destroy(rx_q->page_pool); +} + static void free_dma_rx_desc_resources(struct stmmac_priv *priv) { u32 rx_count = priv->plat->rx_queues_to_use; u32 queue; /* Free RX queue resources */ - for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - - /* Release the DMA RX socket buffers */ - dma_free_rx_skbufs(priv, queue); - - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) - dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_desc), - rx_q->dma_rx, rx_q->dma_rx_phy); - else - dma_free_coherent(priv->device, priv->dma_rx_size * - sizeof(struct dma_extended_desc), - rx_q->dma_erx, rx_q->dma_rx_phy); - - kfree(rx_q->buf_pool); - if (rx_q->page_pool) - page_pool_destroy(rx_q->page_pool); - } + for (queue = 0; queue < rx_count; queue++) + __free_dma_rx_desc_resources(priv, queue); } /** - * free_dma_tx_desc_resources - free TX dma desc resources + * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * @priv: private structure + * @queue: TX queue index */ -static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 tx_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; - /* Free TX queue resources */ - for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - size_t size; - void *addr; + /* Release the DMA TX socket buffers */ + dma_free_tx_skbufs(priv, queue); + + if (priv->extend_desc) { + size = sizeof(struct dma_extended_desc); + addr = tx_q->dma_etx; + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + size = sizeof(struct dma_edesc); + addr = tx_q->dma_entx; + } else { + size = sizeof(struct dma_desc); + addr = tx_q->dma_tx; + } - /* Release the DMA TX socket buffers */ - dma_free_tx_skbufs(priv, queue); + size *= priv->dma_tx_size; - if (priv->extend_desc) { - size = sizeof(struct dma_extended_desc); - addr = tx_q->dma_etx; - } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { - size = sizeof(struct dma_edesc); - addr = tx_q->dma_entx; - } else { - size = sizeof(struct dma_desc); - addr = tx_q->dma_tx; - } + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); - size *= priv->dma_tx_size; + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); +} - dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); +static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; - kfree(tx_q->tx_skbuff_dma); - kfree(tx_q->tx_skbuff); - } + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) + __free_dma_tx_desc_resources(priv, queue); } /** - * alloc_dma_rx_desc_resources - alloc RX resources. + * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * @priv: private structure + * @queue: RX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + bool xdp_prog = stmmac_xdp_is_enabled(priv); + struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; + unsigned int napi_id; + int ret; + + rx_q->queue_index = queue; + rx_q->priv_data = priv; + + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = priv->dma_rx_size; + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); + pp_params.nid = dev_to_node(priv->device); + pp_params.dev = priv->device; + pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + pp_params.offset = stmmac_rx_offset(priv); + pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + + rx_q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_q->page_pool)) { + ret = PTR_ERR(rx_q->page_pool); + rx_q->page_pool = NULL; + return ret; + } + + rx_q->buf_pool = kcalloc(priv->dma_rx_size, + sizeof(*rx_q->buf_pool), + GFP_KERNEL); + if (!rx_q->buf_pool) + return -ENOMEM; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_alloc_coherent(priv->device, + priv->dma_rx_size * + sizeof(struct dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_erx) + return -ENOMEM; + + } else { + rx_q->dma_rx = dma_alloc_coherent(priv->device, + priv->dma_rx_size * + sizeof(struct dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_rx) + return -ENOMEM; + } + + if (stmmac_xdp_is_enabled(priv) && + test_bit(queue, priv->af_xdp_zc_qps)) + napi_id = ch->rxtx_napi.napi_id; + else + napi_id = ch->rx_napi.napi_id; + + ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, + rx_q->queue_index, + napi_id); + if (ret) { + netdev_err(priv->dev, "Failed to register xdp rxq info\n"); + return -EINVAL; + } + + return 0; +} + static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) { u32 rx_count = priv->plat->rx_queues_to_use; - int ret = -ENOMEM; u32 queue; + int ret; /* RX queues buffers and DMA */ for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - struct page_pool_params pp_params = { 0 }; - unsigned int num_pages; - - rx_q->queue_index = queue; - rx_q->priv_data = priv; - - pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = priv->dma_rx_size; - num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); - pp_params.nid = dev_to_node(priv->device); - pp_params.dev = priv->device; - pp_params.dma_dir = DMA_FROM_DEVICE; - - rx_q->page_pool = page_pool_create(&pp_params); - if (IS_ERR(rx_q->page_pool)) { - ret = PTR_ERR(rx_q->page_pool); - rx_q->page_pool = NULL; - goto err_dma; - } - - rx_q->buf_pool = kcalloc(priv->dma_rx_size, - sizeof(*rx_q->buf_pool), - GFP_KERNEL); - if (!rx_q->buf_pool) + ret = __alloc_dma_rx_desc_resources(priv, queue); + if (ret) goto err_dma; - - if (priv->extend_desc) { - rx_q->dma_erx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * - sizeof(struct dma_extended_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); - if (!rx_q->dma_erx) - goto err_dma; - - } else { - rx_q->dma_rx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * - sizeof(struct dma_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); - if (!rx_q->dma_rx) - goto err_dma; - } } return 0; @@ -1807,60 +2052,70 @@ err_dma: } /** - * alloc_dma_tx_desc_resources - alloc TX resources. + * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * @priv: private structure + * @queue: TX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - u32 tx_count = priv->plat->tx_queues_to_use; - int ret = -ENOMEM; - u32 queue; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; - /* TX queues buffers and DMA */ - for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - size_t size; - void *addr; + tx_q->queue_index = queue; + tx_q->priv_data = priv; - tx_q->queue_index = queue; - tx_q->priv_data = priv; + tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + return -ENOMEM; - tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, - sizeof(*tx_q->tx_skbuff_dma), - GFP_KERNEL); - if (!tx_q->tx_skbuff_dma) - goto err_dma; + tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) + return -ENOMEM; - tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, - sizeof(struct sk_buff *), - GFP_KERNEL); - if (!tx_q->tx_skbuff) - goto err_dma; + if (priv->extend_desc) + size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + size = sizeof(struct dma_edesc); + else + size = sizeof(struct dma_desc); - if (priv->extend_desc) - size = sizeof(struct dma_extended_desc); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - size = sizeof(struct dma_edesc); - else - size = sizeof(struct dma_desc); + size *= priv->dma_tx_size; + + addr = dma_alloc_coherent(priv->device, size, + &tx_q->dma_tx_phy, GFP_KERNEL); + if (!addr) + return -ENOMEM; - size *= priv->dma_tx_size; + if (priv->extend_desc) + tx_q->dma_etx = addr; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_q->dma_entx = addr; + else + tx_q->dma_tx = addr; - addr = dma_alloc_coherent(priv->device, size, - &tx_q->dma_tx_phy, GFP_KERNEL); - if (!addr) - goto err_dma; + return 0; +} - if (priv->extend_desc) - tx_q->dma_etx = addr; - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - tx_q->dma_entx = addr; - else - tx_q->dma_tx = addr; +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; + int ret; + + /* TX queues buffers and DMA */ + for (queue = 0; queue < tx_count; queue++) { + ret = __alloc_dma_tx_desc_resources(priv, queue); + if (ret) + goto err_dma; } return 0; @@ -1897,11 +2152,13 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) */ static void free_dma_desc_resources(struct stmmac_priv *priv) { - /* Release the DMA RX socket buffers */ - free_dma_rx_desc_resources(priv); - /* Release the DMA TX socket buffers */ free_dma_tx_desc_resources(priv); + + /* Release the DMA RX socket buffers later + * to ensure all pending XDP_TX buffers are returned. + */ + free_dma_rx_desc_resources(priv); } /** @@ -2058,12 +2315,24 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) /* configure all channels */ for (chan = 0; chan < rx_channels_count; chan++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + u32 buf_size; + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); - stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, - chan); + + if (rx_q->xsk_pool) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + chan); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + chan); + } } for (chan = 0; chan < tx_channels_count; chan++) { @@ -2074,6 +2343,101 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } } +static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct xsk_buff_pool *pool = tx_q->xsk_pool; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc = NULL; + struct xdp_desc xdp_desc; + bool work_done = true; + + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + budget = min(budget, stmmac_tx_avail(priv, queue)); + + while (budget-- > 0) { + dma_addr_t dma_addr; + bool set_ic; + + /* We are sharing with slow path and stop XSK TX desc submission when + * available TX ring is less than threshold. + */ + if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || + !netif_carrier_ok(priv->dev)) { + work_done = false; + break; + } + + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; + + /* To return XDP buffer to XSK pool, we simple call + * xsk_tx_completed(), so we don't need to fill up + * 'buf' and 'xdpf'. + */ + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->xdpf[entry] = NULL; + + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + tx_q->tx_count_frames++; + + if (!priv->tx_coal_frames[queue]) + set_ic = false; + else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, + true, priv->mode, true, true, + xdp_desc.len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + entry = tx_q->cur_tx; + } + + if (tx_desc) { + stmmac_flush_tx_descriptors(priv, queue); + xsk_tx_release(pool); + } + + /* Return true if all of the 3 conditions are met + * a) TX Budget is still available + * b) work_done = true when XSK TX desc peek is empty (no more + * pending XSK TX for transmission) + */ + return !!budget && work_done; +} + /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure @@ -2085,18 +2449,35 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry, count = 0; + unsigned int entry, xmits = 0, count = 0; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); priv->xstats.tx_clean++; + tx_q->xsk_frames_done = 0; + entry = tx_q->dirty_tx; - while ((entry != tx_q->cur_tx) && (count < budget)) { - struct sk_buff *skb = tx_q->tx_skbuff[entry]; + + /* Try to clean all TX complete frame in 1 shot */ + while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { + struct xdp_frame *xdpf; + struct sk_buff *skb; struct dma_desc *p; int status; + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdpf = tx_q->xdpf[entry]; + skb = NULL; + } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + xdpf = NULL; + skb = tx_q->tx_skbuff[entry]; + } else { + xdpf = NULL; + skb = NULL; + } + if (priv->extend_desc) p = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -2126,10 +2507,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; } - stmmac_get_tx_hwtstamp(priv, p, skb); + if (skb) + stmmac_get_tx_hwtstamp(priv, p, skb); } - if (likely(tx_q->tx_skbuff_dma[entry].buf)) { + if (likely(tx_q->tx_skbuff_dma[entry].buf && + tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { if (tx_q->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, tx_q->tx_skbuff_dma[entry].buf, @@ -2150,11 +2533,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) tx_q->tx_skbuff_dma[entry].last_segment = false; tx_q->tx_skbuff_dma[entry].is_jumbo = false; - if (likely(skb != NULL)) { - pkts_compl++; - bytes_compl += skb->len; - dev_consume_skb_any(skb); - tx_q->tx_skbuff[entry] = NULL; + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (xdpf && + tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { + xdp_return_frame(xdpf); + tx_q->xdpf[entry] = NULL; + } + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) + tx_q->xsk_frames_done++; + + if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { + if (likely(skb)) { + pkts_compl++; + bytes_compl += skb->len; + dev_consume_skb_any(skb); + tx_q->tx_skbuff[entry] = NULL; + } } stmmac_release_tx_desc(priv, p, priv->mode); @@ -2175,6 +2575,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); } + if (tx_q->xsk_pool) { + bool work_done; + + if (tx_q->xsk_frames_done) + xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_q->xsk_pool)) + xsk_set_tx_need_wakeup(tx_q->xsk_pool); + + /* For XSK TX, we try to send as many as possible. + * If XSK work done (XSK TX desc empty and budget still + * available), return "budget - 1" to reenable TX IRQ. + * Else, return "budget" to make NAPI continue polling. + */ + work_done = stmmac_xdp_xmit_zc(priv, queue, + STMMAC_XSK_TX_BUDGET_MAX); + if (work_done) + xmits = budget - 1; + else + xmits = budget; + } + if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) { stmmac_enable_eee_mode(priv); @@ -2183,12 +2605,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), HRTIMER_MODE_REL); __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); - return count; + /* Combine decisions from TX clean and XSK TX */ + return max(count, xmits); } /** @@ -2266,28 +2690,35 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) return false; } -static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) +static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) { int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); + &priv->xstats, chan, dir); + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; struct stmmac_channel *ch = &priv->channel[chan]; + struct napi_struct *rx_napi; + struct napi_struct *tx_napi; unsigned long flags; + rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; + tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { - if (napi_schedule_prep(&ch->rx_napi)) { + if (napi_schedule_prep(rx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->rx_napi); + __napi_schedule(rx_napi); } } if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { - if (napi_schedule_prep(&ch->tx_napi)) { + if (napi_schedule_prep(tx_napi)) { spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->tx_napi); + __napi_schedule(tx_napi); } } @@ -2315,7 +2746,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) channels_to_check = ARRAY_SIZE(status); for (chan = 0; chan < channels_to_check; chan++) - status[chan] = stmmac_napi_check(priv, chan); + status[chan] = stmmac_napi_check(priv, chan, + DMA_DIR_RXTX); for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { @@ -2443,7 +2875,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) rx_q->dma_rx_phy, chan); rx_q->rx_tail_addr = rx_q->dma_rx_phy + - (priv->dma_rx_size * + (rx_q->buf_alloc_num * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, chan); @@ -2468,7 +2900,8 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), HRTIMER_MODE_REL); } @@ -2483,16 +2916,18 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); struct stmmac_priv *priv = tx_q->priv_data; struct stmmac_channel *ch; + struct napi_struct *napi; ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; - if (likely(napi_schedule_prep(&ch->tx_napi))) { + if (likely(napi_schedule_prep(napi))) { unsigned long flags; spin_lock_irqsave(&ch->lock, flags); stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); spin_unlock_irqrestore(&ch->lock, flags); - __napi_schedule(&ch->tx_napi); + __napi_schedule(napi); } return HRTIMER_NORESTART; @@ -2509,18 +2944,21 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) static void stmmac_init_coalesce(struct stmmac_priv *priv) { u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 rx_channel_count = priv->plat->rx_queues_to_use; u32 chan; - priv->tx_coal_frames = STMMAC_TX_FRAMES; - priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - priv->rx_coal_frames = STMMAC_RX_FRAMES; - for (chan = 0; chan < tx_channel_count; chan++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; + priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); tx_q->txtimer.function = stmmac_tx_timer; } + + for (chan = 0; chan < rx_channel_count; chan++) + priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; } static void stmmac_set_rings_length(struct stmmac_priv *priv) @@ -2737,6 +3175,27 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) } } +static int stmmac_fpe_start_wq(struct stmmac_priv *priv) +{ + char *name; + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); + clear_bit(__FPE_REMOVING, &priv->fpe_task_state); + + name = priv->wq_name; + sprintf(name, "%s-fpe", priv->dev->name); + + priv->fpe_wq = create_singlethread_workqueue(name); + if (!priv->fpe_wq) { + netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); + + return -ENOMEM; + } + netdev_info(priv->dev, "FPE workqueue start"); + + return 0; +} + /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -2755,6 +3214,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; + bool sph_en; u32 chan; int ret; @@ -2825,10 +3285,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->tx_lpi_timer = eee_timer * 1000; if (priv->use_riwt) { - if (!priv->rx_riwt) - priv->rx_riwt = DEF_DMA_RIWT; + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) { + if (!priv->rx_riwt[queue]) + priv->rx_riwt[queue] = DEF_DMA_RIWT; - ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + stmmac_rx_watchdog(priv, priv->ioaddr, + priv->rx_riwt[queue], queue); + } } if (priv->hw->pcs) @@ -2839,15 +3304,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Enable TSO */ if (priv->tso) { - for (chan = 0; chan < tx_cnt; chan++) + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + + /* TSO and TBS cannot co-exist */ + if (tx_q->tbs & STMMAC_TBS_AVAIL) + continue; + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); + } } /* Enable Split Header */ - if (priv->sph && priv->hw->rx_csum) { - for (chan = 0; chan < rx_cnt; chan++) - stmmac_enable_sph(priv, priv->ioaddr, 1, chan); - } + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < rx_cnt; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + /* VLAN Tag Insertion */ if (priv->dma_cap.vlins) @@ -2868,6 +3340,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Start the ball rolling... */ stmmac_start_all_dma(priv); + if (priv->dma_cap.fpesel) { + stmmac_fpe_start_wq(priv); + + if (priv->plat->fpe_cfg->enable) + stmmac_fpe_handshake(priv, true); + } + return 0; } @@ -2878,6 +3357,271 @@ static void stmmac_hw_teardown(struct net_device *dev) clk_disable_unprepare(priv->plat->clk_ptp_ref); } +static void stmmac_free_irq(struct net_device *dev, + enum request_irq_err irq_err, int irq_idx) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int j; + + switch (irq_err) { + case REQ_IRQ_ERR_ALL: + irq_idx = priv->plat->tx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_TX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->tx_irq[j] > 0) { + irq_set_affinity_hint(priv->tx_irq[j], NULL); + free_irq(priv->tx_irq[j], &priv->tx_queue[j]); + } + } + irq_idx = priv->plat->rx_queues_to_use; + fallthrough; + case REQ_IRQ_ERR_RX: + for (j = irq_idx - 1; j >= 0; j--) { + if (priv->rx_irq[j] > 0) { + irq_set_affinity_hint(priv->rx_irq[j], NULL); + free_irq(priv->rx_irq[j], &priv->rx_queue[j]); + } + } + + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) + free_irq(priv->sfty_ue_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_UE: + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) + free_irq(priv->sfty_ce_irq, dev); + fallthrough; + case REQ_IRQ_ERR_SFTY_CE: + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) + free_irq(priv->lpi_irq, dev); + fallthrough; + case REQ_IRQ_ERR_LPI: + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); + fallthrough; + case REQ_IRQ_ERR_WOL: + free_irq(dev->irq, dev); + fallthrough; + case REQ_IRQ_ERR_MAC: + case REQ_IRQ_ERR_NO: + /* If MAC IRQ request error, no more IRQ to free */ + break; + } +} + +static int stmmac_request_irq_multi_msi(struct net_device *dev) +{ + enum request_irq_err irq_err = REQ_IRQ_ERR_NO; + struct stmmac_priv *priv = netdev_priv(dev); + cpumask_t cpu_mask; + int irq_idx = 0; + char *int_name; + int ret; + int i; + + /* For common interrupt */ + int_name = priv->int_name_mac; + sprintf(int_name, "%s:%s", dev->name, "mac"); + ret = request_irq(dev->irq, stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc mac MSI %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + goto irq_error; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + int_name = priv->int_name_wol; + sprintf(int_name, "%s:%s", dev->name, "wol"); + ret = request_irq(priv->wol_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc wol MSI %d (error: %d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + goto irq_error; + } + } + + /* Request the LPI IRQ in case of another line + * is used for LPI + */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + int_name = priv->int_name_lpi; + sprintf(int_name, "%s:%s", dev->name, "lpi"); + ret = request_irq(priv->lpi_irq, + stmmac_mac_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc lpi MSI %d (error: %d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + /* Request the Safety Feature Correctible Error line in + * case of another line is used + */ + if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { + int_name = priv->int_name_sfty_ce; + sprintf(int_name, "%s:%s", dev->name, "safety-ce"); + ret = request_irq(priv->sfty_ce_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ce MSI %d (error: %d)\n", + __func__, priv->sfty_ce_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_CE; + goto irq_error; + } + } + + /* Request the Safety Feature Uncorrectible Error line in + * case of another line is used + */ + if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { + int_name = priv->int_name_sfty_ue; + sprintf(int_name, "%s:%s", dev->name, "safety-ue"); + ret = request_irq(priv->sfty_ue_irq, + stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty ue MSI %d (error: %d)\n", + __func__, priv->sfty_ue_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY_UE; + goto irq_error; + } + } + + /* Request Rx MSI irq */ + for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (priv->rx_irq[i] == 0) + continue; + + int_name = priv->int_name_rx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); + ret = request_irq(priv->rx_irq[i], + stmmac_msi_intr_rx, + 0, int_name, &priv->rx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc rx-%d MSI %d (error: %d)\n", + __func__, i, priv->rx_irq[i], ret); + irq_err = REQ_IRQ_ERR_RX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); + } + + /* Request Tx MSI irq */ + for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (priv->tx_irq[i] == 0) + continue; + + int_name = priv->int_name_tx_irq[i]; + sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); + ret = request_irq(priv->tx_irq[i], + stmmac_msi_intr_tx, + 0, int_name, &priv->tx_queue[i]); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc tx-%d MSI %d (error: %d)\n", + __func__, i, priv->tx_irq[i], ret); + irq_err = REQ_IRQ_ERR_TX; + irq_idx = i; + goto irq_error; + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, irq_idx); + return ret; +} + +static int stmmac_request_irq_single(struct net_device *dev) +{ + enum request_irq_err irq_err = REQ_IRQ_ERR_NO; + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + irq_err = REQ_IRQ_ERR_MAC; + return ret; + } + + /* Request the Wake IRQ in case of another line + * is used for WoL + */ + if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { + ret = request_irq(priv->wol_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the WoL IRQ %d (%d)\n", + __func__, priv->wol_irq, ret); + irq_err = REQ_IRQ_ERR_WOL; + return ret; + } + } + + /* Request the IRQ lines */ + if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { + ret = request_irq(priv->lpi_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + irq_err = REQ_IRQ_ERR_LPI; + goto irq_error; + } + } + + return 0; + +irq_error: + stmmac_free_irq(dev, irq_err, 0); + return ret; +} + +static int stmmac_request_irq(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + /* Request the IRQ lines */ + if (priv->plat->multi_msi_en) + ret = stmmac_request_irq_multi_msi(dev); + else + ret = stmmac_request_irq_single(dev); + + return ret; +} + /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. @@ -2887,22 +3631,28 @@ static void stmmac_hw_teardown(struct net_device *dev) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -static int stmmac_open(struct net_device *dev) +int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int bfsize = 0; u32 chan; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI && - priv->hw->xpcs == NULL) { + priv->hw->xpcs_args.an_mode != DW_AN_C73) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, "%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - return ret; + goto init_phy_error; } } @@ -2932,9 +3682,8 @@ static int stmmac_open(struct net_device *dev) struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + /* Setup per-TXQ tbs flag before TX descriptor alloc */ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; - if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) - tx_q->tbs &= ~STMMAC_TBS_AVAIL; } ret = alloc_dma_desc_resources(priv); @@ -2963,50 +3712,15 @@ static int stmmac_open(struct net_device *dev) /* We may have called phylink_speed_down before */ phylink_speed_up(priv->phylink); - /* Request the IRQ lines */ - ret = request_irq(dev->irq, stmmac_interrupt, - IRQF_SHARED, dev->name, dev); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: ERROR: allocating the IRQ %d (error: %d)\n", - __func__, dev->irq, ret); + ret = stmmac_request_irq(dev); + if (ret) goto irq_error; - } - - /* Request the Wake IRQ in case of another line is used for WoL */ - if (priv->wol_irq != dev->irq) { - ret = request_irq(priv->wol_irq, stmmac_interrupt, - IRQF_SHARED, dev->name, dev); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: ERROR: allocating the WoL IRQ %d (%d)\n", - __func__, priv->wol_irq, ret); - goto wolirq_error; - } - } - - /* Request the IRQ lines */ - if (priv->lpi_irq > 0) { - ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, - dev->name, dev); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: ERROR: allocating the LPI IRQ %d (%d)\n", - __func__, priv->lpi_irq, ret); - goto lpiirq_error; - } - } stmmac_enable_all_queues(priv); netif_tx_start_all_queues(priv->dev); return 0; -lpiirq_error: - if (priv->wol_irq != dev->irq) - free_irq(priv->wol_irq, dev); -wolirq_error: - free_irq(dev->irq, dev); irq_error: phylink_stop(priv->phylink); @@ -3018,16 +3732,28 @@ init_error: free_dma_desc_resources(priv); dma_desc_error: phylink_disconnect_phy(priv->phylink); +init_phy_error: + pm_runtime_put(priv->device); return ret; } +static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) +{ + set_bit(__FPE_REMOVING, &priv->fpe_task_state); + + if (priv->fpe_wq) + destroy_workqueue(priv->fpe_wq); + + netdev_info(priv->dev, "FPE workqueue stop"); +} + /** * stmmac_release - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ -static int stmmac_release(struct net_device *dev) +int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; @@ -3044,11 +3770,7 @@ static int stmmac_release(struct net_device *dev) hrtimer_cancel(&priv->tx_queue[chan].txtimer); /* Free the IRQ lines */ - free_irq(dev->irq, dev); - if (priv->wol_irq != dev->irq) - free_irq(priv->wol_irq, dev); - if (priv->lpi_irq > 0) - free_irq(priv->lpi_irq, dev); + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); if (priv->eee_enabled) { priv->tx_path_in_lpi_mode = false; @@ -3068,6 +3790,11 @@ static int stmmac_release(struct net_device *dev) stmmac_release_ptp(priv); + pm_runtime_put(priv->device); + + if (priv->dma_cap.fpesel) + stmmac_fpe_stop_wq(priv); + return 0; } @@ -3153,6 +3880,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, } } +static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + int desc_size; + + if (likely(priv->extend_desc)) + desc_size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_edesc); + else + desc_size = sizeof(struct dma_desc); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + wmb(); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); +} + /** * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) * @skb : the socket buffer @@ -3184,10 +3933,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); - int desc_size, tmp_pay_len = 0, first_tx; int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, tx_packets; + int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; @@ -3269,6 +4018,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; if (priv->dma_cap.addr64 <= 32) { first->des0 = cpu_to_le32(des); @@ -3304,12 +4055,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; } tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[tx_q->cur_tx] = skb; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; /* Manage tx mitigation */ tx_packets = (tx_q->cur_tx + 1) - first_tx; @@ -3317,11 +4070,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; - else if (!priv->tx_coal_frames) + else if (!priv->tx_coal_frames[queue]) set_ic = false; - else if (tx_packets > priv->tx_coal_frames) + else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; - else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; @@ -3384,12 +4138,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_tx_owner(priv, mss_desc); } - /* The own bit must be the latest setting done when prepare the - * descriptor and then barrier is needed to make sure that - * all is coherent before granting the DMA engine. - */ - wmb(); - if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, @@ -3400,13 +4148,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - if (tx_q->tbs & STMMAC_TBS_AVAIL) - desc_size = sizeof(struct dma_edesc); - else - desc_size = sizeof(struct dma_desc); - - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); - stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3436,10 +4178,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; struct dma_edesc *tbs_desc = NULL; - int entry, desc_size, first_tx; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; + int entry, first_tx; dma_addr_t des; tx_q = &priv->tx_queue[queue]; @@ -3527,6 +4269,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].last_segment = last_segment; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; /* Prepare the descriptor and set the own bit too */ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, @@ -3535,6 +4278,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[entry] = skb; + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; /* According to the coalesce parameter the IC bit for the latest * segment is reset and the timer re-started to clean the tx status. @@ -3546,11 +4290,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; - else if (!priv->tx_coal_frames) + else if (!priv->tx_coal_frames[queue]) set_ic = false; - else if (tx_packets > priv->tx_coal_frames) + else if (tx_packets > priv->tx_coal_frames[queue]) set_ic = true; - else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + else if ((tx_q->tx_count_frames % + priv->tx_coal_frames[queue]) < tx_packets) set_ic = true; else set_ic = false; @@ -3612,6 +4357,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; + tx_q->tx_skbuff_dma[first_entry].map_as_page = false; stmmac_set_desc_addr(priv, first, des); @@ -3640,25 +4387,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_tx_owner(priv, first); - /* The own bit must be the latest setting done when prepare the - * descriptor and then barrier is needed to make sure that - * all is coherent before granting the DMA engine. - */ - wmb(); - netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr); - if (likely(priv->extend_desc)) - desc_size = sizeof(struct dma_extended_desc); - else if (tx_q->tbs & STMMAC_TBS_AVAIL) - desc_size = sizeof(struct dma_edesc); - else - desc_size = sizeof(struct dma_desc); - - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); - stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3701,11 +4434,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - int len, dirty = stmmac_rx_dirty(priv, queue); + int dirty = stmmac_rx_dirty(priv, queue); unsigned int entry = rx_q->dirty_rx; - len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; - while (dirty-- > 0) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; struct dma_desc *p; @@ -3728,18 +4459,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) break; buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); - - dma_sync_single_for_device(priv->device, buf->sec_addr, - len, DMA_FROM_DEVICE); } - buf->addr = page_pool_get_dma_addr(buf->page); - - /* Sync whole allocation to device. This will invalidate old - * data. - */ - dma_sync_single_for_device(priv->device, buf->addr, len, - DMA_FROM_DEVICE); + buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); if (priv->sph) @@ -3749,11 +4471,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) stmmac_refill_desc3(priv, rx_q, p); rx_q->rx_count_frames++; - rx_q->rx_count_frames += priv->rx_coal_frames; - if (rx_q->rx_count_frames > priv->rx_coal_frames) + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) rx_q->rx_count_frames = 0; - use_rx_wd = !priv->rx_coal_frames; + use_rx_wd = !priv->rx_coal_frames[queue]; use_rx_wd |= rx_q->rx_count_frames > 0; if (!priv->use_riwt) use_rx_wd = false; @@ -3818,6 +4540,487 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, return plen - len; } +static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, + struct xdp_frame *xdpf, bool dma_map) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + unsigned int entry = tx_q->cur_tx; + struct dma_desc *tx_desc; + dma_addr_t dma_addr; + bool set_ic; + + if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) + return STMMAC_XDP_CONSUMED; + + if (likely(priv->extend_desc)) + tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_desc = &tx_q->dma_entx[entry].basic; + else + tx_desc = tx_q->dma_tx + entry; + + if (dma_map) { + dma_addr = dma_map_single(priv->device, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) + return STMMAC_XDP_CONSUMED; + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; + } else { + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + + xdpf->headroom; + dma_sync_single_for_device(priv->device, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + + tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; + } + + tx_q->tx_skbuff_dma[entry].buf = dma_addr; + tx_q->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].len = xdpf->len; + tx_q->tx_skbuff_dma[entry].last_segment = true; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; + + tx_q->xdpf[entry] = xdpf; + + stmmac_set_desc_addr(priv, tx_desc, dma_addr); + + stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, + true, priv->mode, true, true, + xdpf->len); + + tx_q->tx_count_frames++; + + if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, tx_desc); + priv->xstats.tx_set_ic_bit++; + } + + stmmac_enable_dma_transmission(priv, priv->ioaddr); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + tx_q->cur_tx = entry; + + return STMMAC_XDP_TX; +} + +static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= priv->plat->tx_queues_to_use) + index -= priv->plat->tx_queues_to_use; + + return index; +} + +static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int queue; + int res; + + if (unlikely(!xdpf)) + return STMMAC_XDP_CONSUMED; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); + if (res == STMMAC_XDP_TX) + stmmac_flush_tx_descriptors(priv, queue); + + __netif_tx_unlock(nq); + + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act; + int res; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + res = STMMAC_XDP_PASS; + break; + case XDP_TX: + res = stmmac_xdp_xmit_back(priv, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(priv->dev, xdp, prog) < 0) + res = STMMAC_XDP_CONSUMED; + else + res = STMMAC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->dev, prog, act); + fallthrough; + case XDP_DROP: + res = STMMAC_XDP_CONSUMED; + break; + } + + return res; +} + +static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + rcu_read_lock(); + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + res = STMMAC_XDP_PASS; + goto unlock; + } + + res = __stmmac_xdp_run_prog(priv, prog, xdp); +unlock: + rcu_read_unlock(); + return ERR_PTR(-res); +} + +static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, + int xdp_status) +{ + int cpu = smp_processor_id(); + int queue; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + + if (xdp_status & STMMAC_XDP_TX) + stmmac_tx_timer_arm(priv, queue); + + if (xdp_status & STMMAC_XDP_REDIRECT) + xdp_do_flush(); +} + +static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; +} + +static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, + struct dma_desc *p, struct dma_desc *np, + struct xdp_buff *xdp) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned int len = xdp->data_end - xdp->data; + enum pkt_hash_types hash_type; + int coe = priv->hw->rx_csum; + struct sk_buff *skb; + u32 hash; + + skb = stmmac_construct_skb_zc(ch, xdp); + if (!skb) { + priv->dev->stats.rx_dropped++; + return; + } + + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) + skb_set_hash(skb, hash, hash_type); + + skb_record_rx_queue(skb, queue); + napi_gro_receive(&ch->rxtx_napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += len; +} + +static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int entry = rx_q->dirty_rx; + struct dma_desc *rx_desc = NULL; + bool ret = true; + + budget = min(budget, stmmac_rx_dirty(priv, queue)); + + while (budget-- > 0 && entry != rx_q->cur_rx) { + struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; + dma_addr_t dma_addr; + bool use_rx_wd; + + if (!buf->xdp) { + buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); + if (!buf->xdp) { + ret = false; + break; + } + } + + if (priv->extend_desc) + rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); + else + rx_desc = rx_q->dma_rx + entry; + + dma_addr = xsk_buff_xdp_get_dma(buf->xdp); + stmmac_set_desc_addr(priv, rx_desc, dma_addr); + stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); + stmmac_refill_desc3(priv, rx_q, rx_desc); + + rx_q->rx_count_frames++; + rx_q->rx_count_frames += priv->rx_coal_frames[queue]; + if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) + rx_q->rx_count_frames = 0; + + use_rx_wd = !priv->rx_coal_frames[queue]; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; + + dma_wmb(); + stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); + + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); + } + + if (rx_desc) { + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->dirty_rx * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); + } + + return ret; +} + +static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int count = 0, error = 0, len = 0; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int next_entry = rx_q->cur_rx; + unsigned int desc_size; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + int status = 0; + + if (netif_msg_rx_status(priv)) { + void *rx_head; + + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); + if (priv->extend_desc) { + rx_head = (void *)rx_q->dma_erx; + desc_size = sizeof(struct dma_extended_desc); + } else { + rx_head = (void *)rx_q->dma_rx; + desc_size = sizeof(struct dma_desc); + } + + stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, + rx_q->dma_rx_phy, desc_size); + } + while (count < limit) { + struct stmmac_rx_buffer *buf; + unsigned int buf1_len = 0; + struct dma_desc *np, *p; + int entry; + int res; + + if (!count && rx_q->state_saved) { + error = rx_q->state.error; + len = rx_q->state.len; + } else { + rx_q->state_saved = false; + error = 0; + len = 0; + } + + if (count >= limit) + break; + +read_again: + buf1_len = 0; + entry = next_entry; + buf = &rx_q->buf_pool[entry]; + + if (dirty >= STMMAC_RX_FILL_BATCH) { + failure = failure || + !stmmac_rx_refill_zc(priv, queue, dirty); + dirty = 0; + } + + if (priv->extend_desc) + p = (struct dma_desc *)(rx_q->dma_erx + entry); + else + p = rx_q->dma_rx + entry; + + /* read the status of the incoming frame */ + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) + break; + + /* Prefetch the next RX descriptor */ + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, + priv->dma_rx_size); + next_entry = rx_q->cur_rx; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); + else + np = rx_q->dma_rx + next_entry; + + prefetch(np); + + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, + rx_q->dma_erx + entry); + if (unlikely(status == discard_frame)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + error = 1; + if (!priv->hwts_rx_en) + priv->dev->stats.rx_errors++; + } + + if (unlikely(error && (status & rx_not_ls))) + goto read_again; + if (unlikely(error)) { + count++; + continue; + } + + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + + /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ + if (likely(status & rx_not_ls)) { + xsk_buff_free(buf->xdp); + buf->xdp = NULL; + dirty++; + count++; + goto read_again; + } + + /* XDP ZC Frame only support primary buffers for now */ + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 + * Type frames (LLC/LLC-SNAP) + * + * llc_snap is never checked in GMAC >= 4, so this ACS + * feature is always disabled and packets need to be + * stripped manually. + */ + if (likely(!(status & rx_not_ls)) && + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap))) { + buf1_len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } + + /* RX buffer is good and fit into a XSK pool buffer */ + buf->xdp->data_end = buf->xdp->data + buf1_len; + xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); + + rcu_read_lock(); + prog = READ_ONCE(priv->xdp_prog); + res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); + rcu_read_unlock(); + + switch (res) { + case STMMAC_XDP_PASS: + stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); + xsk_buff_free(buf->xdp); + break; + case STMMAC_XDP_CONSUMED: + xsk_buff_free(buf->xdp); + priv->dev->stats.rx_dropped++; + break; + case STMMAC_XDP_TX: + case STMMAC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + buf->xdp = NULL; + dirty++; + count++; + } + + if (status & rx_not_ls) { + rx_q->state_saved = true; + rx_q->state.error = error; + rx_q->state.len = len; + } + + stmmac_finalize_xdp_rx(priv, xdp_status); + + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { + if (failure || stmmac_rx_dirty(priv, queue) > 0) + xsk_set_rx_need_wakeup(rx_q->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_q->xsk_pool); + + return (int)count; + } + + return failure ? limit : (int)count; +} + /** * stmmac_rx - manage the receive process * @priv: driver private structure @@ -3833,8 +5036,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int count = 0, error = 0, len = 0; int status = 0, coe = priv->hw->rx_csum; unsigned int next_entry = rx_q->cur_rx; + enum dma_data_direction dma_dir; unsigned int desc_size; struct sk_buff *skb = NULL; + struct xdp_buff xdp; + int xdp_status = 0; + int buf_sz; + + dma_dir = page_pool_get_dma_dir(rx_q->page_pool); + buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; if (netif_msg_rx_status(priv)) { void *rx_head; @@ -3952,6 +5162,64 @@ read_again: } if (!skb) { + unsigned int pre_len, sync_len; + + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + + xdp.data = page_address(buf->page) + buf->page_offset; + xdp.data_end = xdp.data + buf1_len; + xdp.data_hard_start = page_address(buf->page); + xdp_set_data_meta_invalid(&xdp); + xdp.frame_sz = buf_sz; + xdp.rxq = &rx_q->xdp_rxq; + + pre_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + skb = stmmac_xdp_run_prog(priv, &xdp); + /* Due xdp_adjust_tail: DMA sync for_device + * cover max len CPU touch + */ + sync_len = xdp.data_end - xdp.data_hard_start - + buf->page_offset; + sync_len = max(sync_len, pre_len); + + /* For Not XDP_PASS verdict */ + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & STMMAC_XDP_CONSUMED) { + page_pool_put_page(rx_q->page_pool, + virt_to_head_page(xdp.data), + sync_len, true); + buf->page = NULL; + priv->dev->stats.rx_dropped++; + + /* Clear skb as it was set as + * status by XDP program. + */ + skb = NULL; + + if (unlikely((status & rx_not_ls))) + goto read_again; + + count++; + continue; + } else if (xdp_res & (STMMAC_XDP_TX | + STMMAC_XDP_REDIRECT)) { + xdp_status |= xdp_res; + buf->page = NULL; + skb = NULL; + count++; + continue; + } + } + } + + if (!skb) { + /* XDP program may expand or reduce tail */ + buf1_len = xdp.data_end - xdp.data; + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); if (!skb) { priv->dev->stats.rx_dropped++; @@ -3959,10 +5227,8 @@ read_again: goto drain_data; } - dma_sync_single_for_cpu(priv->device, buf->addr, - buf1_len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(buf->page), - buf1_len); + /* XDP program may adjust header */ + skb_copy_to_linear_data(skb, xdp.data, buf1_len); skb_put(skb, buf1_len); /* Data payload copied into SKB, page ready for recycle */ @@ -3970,9 +5236,9 @@ read_again: buf->page = NULL; } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, - buf1_len, DMA_FROM_DEVICE); + buf1_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf->page, 0, buf1_len, + buf->page, buf->page_offset, buf1_len, priv->dma_buf_sz); /* Data payload appended into SKB */ @@ -3982,7 +5248,7 @@ read_again: if (buf2_len) { dma_sync_single_for_cpu(priv->device, buf->sec_addr, - buf2_len, DMA_FROM_DEVICE); + buf2_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_buf_sz); @@ -4028,6 +5294,8 @@ drain_data: rx_q->state.len = len; } + stmmac_finalize_xdp_rx(priv, xdp_status); + stmmac_rx_refill(priv, queue); priv->xstats.rx_pkt_n += count; @@ -4067,7 +5335,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) priv->xstats.napi_poll++; - work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); + work_done = stmmac_tx_clean(priv, budget, chan); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -4081,6 +5349,42 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) return work_done; } +static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, rxtx_napi); + struct stmmac_priv *priv = ch->priv_data; + int rx_done, tx_done; + u32 chan = ch->index; + + priv->xstats.napi_poll++; + + tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = min(tx_done, budget); + + rx_done = stmmac_rx_zc(priv, budget, chan); + + /* If either TX or RX work is not complete, return budget + * and keep pooling + */ + if (tx_done >= budget || rx_done >= budget) + return budget; + + /* all work done, exit the polling mode */ + if (napi_complete_done(napi, rx_done)) { + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + /* Both RX and TX work done are compelte, + * so enable both RX & TX IRQs. + */ + stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); + spin_unlock_irqrestore(&ch->lock, flags); + } + + return min(rx_done, budget - 1); +} + /** * stmmac_tx_timeout * @dev : Pointer to net device structure @@ -4140,6 +5444,11 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return -EBUSY; } + if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); + return -EINVAL; + } + new_mtu = STMMAC_ALIGN(new_mtu); /* If condition true, FIFO is too small or MTU too large */ @@ -4201,27 +5510,57 @@ static int stmmac_set_features(struct net_device *netdev, stmmac_rx_ipc(priv, priv->hw); sph_en = (priv->hw->rx_csum > 0) && priv->sph; + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); return 0; } -/** - * stmmac_interrupt - main ISR - * @irq: interrupt number. - * @dev_id: to pass the net device pointer (must be valid). - * Description: this is the main driver interrupt service routine. - * It can call: - * o DMA service routine (to manage incoming frame reception and transmission - * status) - * o Core interrupts to manage: remote wake-up, management counter, LPI - * interrupts. - */ -static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) +{ + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + + if (status == FPE_EVENT_UNKNOWN || !*hs_enable) + return; + + /* If LP has sent verify mPacket, LP is FPE capable */ + if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { + if (*lp_state < FPE_STATE_CAPABLE) + *lp_state = FPE_STATE_CAPABLE; + + /* If user has requested FPE enable, quickly response */ + if (*hs_enable) + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_RESPONSE); + } + + /* If Local has sent verify mPacket, Local is FPE capable */ + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { + if (*lo_state < FPE_STATE_CAPABLE) + *lo_state = FPE_STATE_CAPABLE; + } + + /* If LP has sent response mPacket, LP is entering FPE ON */ + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) + *lp_state = FPE_STATE_ENTERING_ON; + + /* If Local has sent response mPacket, Local is entering FPE ON */ + if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) + *lo_state = FPE_STATE_ENTERING_ON; + + if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && + !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && + priv->fpe_wq) { + queue_work(priv->fpe_wq, &priv->fpe_task); + } +} + +static void stmmac_common_interrupt(struct stmmac_priv *priv) { - struct net_device *dev = (struct net_device *)dev_id; - struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queues_count; @@ -4234,17 +5573,20 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (priv->irq_wake) pm_wakeup_event(priv->device, 0); - /* Check if adapter is up */ - if (test_bit(STMMAC_DOWN, &priv->state)) - return IRQ_HANDLED; - /* Check if a fatal error happened */ - if (stmmac_safety_feat_interrupt(priv)) - return IRQ_HANDLED; + if (priv->dma_cap.estsel) + stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, + &priv->xstats, tx_cnt); + + if (priv->dma_cap.fpesel) { + int status = stmmac_fpe_irq_status(priv, priv->ioaddr, + priv->dev); + + stmmac_fpe_event_status(priv, status); + } /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || xmac) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); - int mtl_status; if (unlikely(status)) { /* For LPI we need to save the tx status */ @@ -4255,27 +5597,48 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) } for (queue = 0; queue < queues_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - - mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, - queue); - if (mtl_status != -EINVAL) - status |= mtl_status; - - if (status & CORE_IRQ_MTL_RX_OVERFLOW) - stmmac_set_rx_tail_ptr(priv, priv->ioaddr, - rx_q->rx_tail_addr, - queue); + status = stmmac_host_mtl_irq_status(priv, priv->hw, + queue); } /* PCS link status */ if (priv->hw->pcs) { if (priv->xstats.pcs_link) - netif_carrier_on(dev); + netif_carrier_on(priv->dev); else - netif_carrier_off(dev); + netif_carrier_off(priv->dev); } + + stmmac_timestamp_interrupt(priv, priv); } +} + +/** + * stmmac_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. + */ +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + if (stmmac_safety_feat_interrupt(priv)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); /* To handle DMA interrupts */ stmmac_dma_interrupt(priv); @@ -4283,15 +5646,136 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (unlikely(!dev)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* To handle Common interrupts */ + stmmac_common_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (unlikely(!dev)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + /* Check if a fatal error happened */ + stmmac_safety_feat_interrupt(priv); + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) +{ + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; + int chan = tx_q->queue_index; + struct stmmac_priv *priv; + int status; + + priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); + + if (unlikely(!data)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + status = stmmac_napi_check(priv, chan, DMA_DIR_TX); + + if (unlikely(status & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && + tc <= 256) { + tc += 64; + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, + tc, + tc, + chan); + else + stmmac_set_dma_operation_mode(priv, + tc, + SF_DMA_MODE, + chan); + priv->xstats.threshold = tc; + } + } else if (unlikely(status == tx_hard_error)) { + stmmac_tx_err(priv, chan); + } + + return IRQ_HANDLED; +} + +static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) +{ + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; + int chan = rx_q->queue_index; + struct stmmac_priv *priv; + + priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); + + if (unlikely(!data)) { + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; + + stmmac_napi_check(priv, chan, DMA_DIR_RX); + + return IRQ_HANDLED; +} + #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling receive - used by NETCONSOLE and other diagnostic tools * to allow network I/O with interrupts disabled. */ static void stmmac_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + /* If adapter is down, do nothing */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return; + + if (priv->plat->multi_msi_en) { + for (i = 0; i < priv->plat->rx_queues_to_use; i++) + stmmac_msi_intr_rx(0, &priv->rx_queue[i]); + + for (i = 0; i < priv->plat->tx_queues_to_use; i++) + stmmac_msi_intr_tx(0, &priv->tx_queue[i]); + } else { + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); + } } #endif @@ -4340,7 +5824,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) return ret; - stmmac_disable_all_queues(priv); + __stmmac_disable_all_queues(priv); switch (type) { case TC_SETUP_CLSU32: @@ -4704,6 +6188,12 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid bool is_double = false; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; @@ -4737,10 +6227,222 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi if (priv->hw->num_vlan) { ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); if (ret) - return ret; + goto del_vlan_error; + } + + ret = stmmac_vlan_update(priv, is_double); + +del_vlan_error: + pm_runtime_put(priv->device); + + return ret; +} + +static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + int i, nxmit = 0; + int queue; + + if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + queue = stmmac_xdp_get_tx_queue(priv, cpu); + nq = netdev_get_tx_queue(priv->dev, queue); + + __netif_tx_lock(nq, cpu); + /* Avoids TX time-out as we are sharing with slow path */ + nq->trans_start = jiffies; + + for (i = 0; i < num_frames; i++) { + int res; + + res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); + if (res == STMMAC_XDP_CONSUMED) + break; + + nxmit++; + } + + if (flags & XDP_XMIT_FLUSH) { + stmmac_flush_tx_descriptors(priv, queue); + stmmac_tx_timer_arm(priv, queue); } - return stmmac_vlan_update(priv, is_double); + __netif_tx_unlock(nq); + + return nxmit; +} + +void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_rx_dma(priv, queue); + __free_dma_rx_desc_resources(priv, queue); +} + +void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + u32 buf_size; + int ret; + + ret = __alloc_dma_rx_desc_resources(priv, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc RX desc.\n"); + return; + } + + ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); + if (ret) { + __free_dma_rx_desc_resources(priv, queue); + netdev_err(priv->dev, "Failed to init RX desc.\n"); + return; + } + + stmmac_clear_rx_descriptors(priv, queue); + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, rx_q->queue_index); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, rx_q->queue_index); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + rx_q->queue_index); + } + + stmmac_start_rx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); + spin_unlock_irqrestore(&ch->lock, flags); +} + +void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + + spin_lock_irqsave(&ch->lock, flags); + stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); + + stmmac_stop_tx_dma(priv, queue); + __free_dma_tx_desc_resources(priv, queue); +} + +void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; + unsigned long flags; + int ret; + + ret = __alloc_dma_tx_desc_resources(priv, queue); + if (ret) { + netdev_err(priv->dev, "Failed to alloc TX desc.\n"); + return; + } + + ret = __init_dma_tx_desc_rings(priv, queue); + if (ret) { + __free_dma_tx_desc_resources(priv, queue); + netdev_err(priv->dev, "Failed to init TX desc.\n"); + return; + } + + stmmac_clear_tx_descriptors(priv, queue); + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, tx_q->queue_index); + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, tx_q->queue_index); + + stmmac_start_tx_dma(priv, queue); + + spin_lock_irqsave(&ch->lock, flags); + stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); + spin_unlock_irqrestore(&ch->lock, flags); +} + +int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + struct stmmac_channel *ch; + + if (test_bit(STMMAC_DOWN, &priv->state) || + !netif_carrier_ok(priv->dev)) + return -ENETDOWN; + + if (!stmmac_xdp_is_enabled(priv)) + return -ENXIO; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + rx_q = &priv->rx_queue[queue]; + tx_q = &priv->tx_queue[queue]; + ch = &priv->channel[queue]; + + if (!rx_q->xsk_pool && !tx_q->xsk_pool) + return -ENXIO; + + if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { + /* EQoS does not have per-DMA channel SW interrupt, + * so we schedule RX Napi straight-away. + */ + if (likely(napi_schedule_prep(&ch->rxtx_napi))) + __napi_schedule(&ch->rxtx_napi); + } + + return 0; } static const struct net_device_ops stmmac_netdev_ops = { @@ -4761,6 +6463,9 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, + .ndo_bpf = stmmac_bpf, + .ndo_xdp_xmit = stmmac_xdp_xmit, + .ndo_xsk_wakeup = stmmac_xsk_wakeup, }; static void stmmac_reset_subtask(struct stmmac_priv *priv) @@ -4919,6 +6624,12 @@ static void stmmac_napi_add(struct net_device *dev) stmmac_napi_poll_tx, NAPI_POLL_WEIGHT); } + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_add(dev, &ch->rxtx_napi, + stmmac_napi_poll_rxtx, + NAPI_POLL_WEIGHT); + } } } @@ -4936,6 +6647,10 @@ static void stmmac_napi_del(struct net_device *dev) netif_napi_del(&ch->rx_napi); if (queue < priv->plat->tx_queues_to_use) netif_napi_del(&ch->tx_napi); + if (queue < priv->plat->rx_queues_to_use && + queue < priv->plat->tx_queues_to_use) { + netif_napi_del(&ch->rxtx_napi); + } } } @@ -4977,6 +6692,68 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) return ret; } +#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" +static void stmmac_fpe_lp_task(struct work_struct *work) +{ + struct stmmac_priv *priv = container_of(work, struct stmmac_priv, + fpe_task); + struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; + enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; + enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; + bool *hs_enable = &fpe_cfg->hs_enable; + bool *enable = &fpe_cfg->enable; + int retries = 20; + + while (retries-- > 0) { + /* Bail out immediately if FPE handshake is OFF */ + if (*lo_state == FPE_STATE_OFF || !*hs_enable) + break; + + if (*lo_state == FPE_STATE_ENTERING_ON && + *lp_state == FPE_STATE_ENTERING_ON) { + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + *enable); + + netdev_info(priv->dev, "configured FPE\n"); + + *lo_state = FPE_STATE_ON; + *lp_state = FPE_STATE_ON; + netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); + break; + } + + if ((*lo_state == FPE_STATE_CAPABLE || + *lo_state == FPE_STATE_ENTERING_ON) && + *lp_state != FPE_STATE_ON) { + netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, + *lo_state, *lp_state); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_VERIFY); + } + /* Sleep then retry */ + msleep(500); + } + + clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); +} + +void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) +{ + if (priv->plat->fpe_cfg->hs_enable != enable) { + if (enable) { + stmmac_fpe_send_mpacket(priv, priv->ioaddr, + MPACKET_VERIFY); + } else { + priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; + priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; + } + + priv->plat->fpe_cfg->hs_enable = enable; + } +} + /** * stmmac_dvr_probe * @device: device pointer @@ -5012,12 +6789,19 @@ int stmmac_dvr_probe(struct device *device, priv->plat = plat_dat; priv->ioaddr = res->addr; priv->dev->base_addr = (unsigned long)res->addr; + priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; priv->dev->irq = res->irq; priv->wol_irq = res->wol_irq; priv->lpi_irq = res->lpi_irq; - - if (!IS_ERR_OR_NULL(res->mac)) + priv->sfty_ce_irq = res->sfty_ce_irq; + priv->sfty_ue_irq = res->sfty_ue_irq; + for (i = 0; i < MTL_MAX_RX_QUEUES; i++) + priv->rx_irq[i] = res->rx_irq[i]; + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) + priv->tx_irq[i] = res->tx_irq[i]; + + if (!is_zero_ether_addr(res->mac)) memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); dev_set_drvdata(device, priv->dev); @@ -5025,6 +6809,10 @@ int stmmac_dvr_probe(struct device *device, /* Verify driver arguments */ stmmac_verify_args(); + priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); + if (!priv->af_xdp_zc_qps) + return -ENOMEM; + /* Allocate workqueue */ priv->wq = create_singlethread_workqueue("stmmac_wq"); if (!priv->wq) { @@ -5034,6 +6822,9 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); + /* Initialize Link Partner FPE workqueue */ + INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); + /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances */ @@ -5055,6 +6846,11 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_hw_init; + /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. + */ + if (priv->synopsys_id < DWMAC_CORE_5_20) + priv->plat->dma_cfg->dche = false; + stmmac_check_ether_addr(priv); ndev->netdev_ops = &stmmac_netdev_ops; @@ -5077,7 +6873,8 @@ int stmmac_dvr_probe(struct device *device, if (priv->dma_cap.sphen) { ndev->hw_features |= NETIF_F_GRO; - priv->sph = true; + priv->sph_cap = true; + priv->sph = priv->sph_cap; dev_info(priv->device, "SPH feature enabled\n"); } @@ -5179,6 +6976,10 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); + pm_runtime_get_noresume(device); + pm_runtime_set_active(device); + pm_runtime_enable(device); + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { /* MDIO bus Registration */ @@ -5216,6 +7017,11 @@ int stmmac_dvr_probe(struct device *device, stmmac_init_fs(ndev); #endif + /* Let pm_runtime_put() disable the clocks. + * If CONFIG_PM is not enabled, the clocks will stay powered. + */ + pm_runtime_put(device); + return ret; error_serdes_powerup: @@ -5230,6 +7036,8 @@ error_mdio_register: stmmac_napi_del(ndev); error_hw_init: destroy_workqueue(priv->wq); + stmmac_bus_clks_config(priv, false); + bitmap_free(priv->af_xdp_zc_qps); return ret; } @@ -5265,13 +7073,14 @@ int stmmac_dvr_remove(struct device *dev) phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); + pm_runtime_put(dev); + pm_runtime_disable(dev); if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); mutex_destroy(&priv->lock); + bitmap_free(priv->af_xdp_zc_qps); return 0; } @@ -5289,6 +7098,7 @@ int stmmac_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; + int ret; if (!ndev || !netif_running(ndev)) return 0; @@ -5332,11 +7142,24 @@ int stmmac_suspend(struct device *dev) pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable_unprepare(priv->plat->clk_ptp_ref); - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); + ret = pm_runtime_force_suspend(dev); + if (ret) { + mutex_unlock(&priv->lock); + return ret; + } } + mutex_unlock(&priv->lock); + if (priv->dma_cap.fpesel) { + /* Disable FPE */ + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, false); + + stmmac_fpe_handshake(priv, false); + } + priv->speed = SPEED_UNKNOWN; return 0; } @@ -5399,8 +7222,9 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk previously disabled */ - clk_prepare_enable(priv->plat->stmmac_clk); - clk_prepare_enable(priv->plat->pclk); + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; if (priv->plat->clk_ptp_ref) clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ @@ -5428,7 +7252,7 @@ int stmmac_resume(struct device *dev) mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); - stmmac_reinit_rx_buffers(priv); + stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index d64116e0543e..b750074f8f9c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -15,6 +15,7 @@ #include <linux/iopoll.h> #include <linux/mii.h> #include <linux/of_mdio.h> +#include <linux/pm_runtime.h> #include <linux/phy.h> #include <linux/property.h> #include <linux/slab.h> @@ -87,21 +88,29 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) u32 tmp, addr, value = MII_XGMAC_BUSY; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } if (phyreg & MII_ADDR_C45) { phyreg &= ~MII_ADDR_C45; ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; value |= MII_XGMAC_SADDR; } @@ -112,8 +121,10 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to read */ writel(addr, priv->ioaddr + mii_address); @@ -121,11 +132,18 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Read the data from the MII data register */ - return readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, @@ -138,21 +156,29 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, u32 addr, tmp, value = MII_XGMAC_BUSY; int ret; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } if (phyreg & MII_ADDR_C45) { phyreg &= ~MII_ADDR_C45; ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) - return ret; + goto err_disable_clks; value |= MII_XGMAC_SADDR; } @@ -164,16 +190,23 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; + !(tmp & MII_XGMAC_BUSY), 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to write */ writel(addr, priv->ioaddr + mii_address); writel(value, priv->ioaddr + mii_data); /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000); + ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } /** @@ -196,6 +229,12 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) int data = 0; u32 v; + data = pm_runtime_get_sync(priv->device); + if (data < 0) { + pm_runtime_put_noidle(priv->device); + return data; + } + value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; @@ -216,19 +255,26 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) } if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } writel(data, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + data = -EBUSY; + goto err_disable_clks; + } /* Read the data from the MII data register */ data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; +err_disable_clks: + pm_runtime_put(priv->device); + return data; } @@ -247,10 +293,16 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; + int ret, data = phydata; u32 value = MII_BUSY; - int data = phydata; u32 v; + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; + } + value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; @@ -275,16 +327,23 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) - return -EBUSY; + 100, 10000)) { + ret = -EBUSY; + goto err_disable_clks; + } /* Set the MII address register to write */ writel(data, priv->ioaddr + mii_data); writel(value, priv->ioaddr + mii_address); /* Wait until any existing MII operation is complete */ - return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000); + ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000); + +err_disable_clks: + pm_runtime_put(priv->device); + + return ret; } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 272cb47af9f2..95e0e4d6f74d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -198,8 +198,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev, if (ret) return ret; - pci_enable_msi(pdev); - memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[i]; res.wol_irq = pdev->irq; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6dc9f10414e4..1e17a23d9118 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -394,7 +394,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np) * set some private fields that will be used by the main at runtime. */ struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; @@ -406,12 +406,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) if (!plat) return ERR_PTR(-ENOMEM); - *mac = of_get_mac_address(np); - if (IS_ERR(*mac)) { - if (PTR_ERR(*mac) == -EPROBE_DEFER) - return ERR_CAST(*mac); + rc = of_get_mac_address(np, mac); + if (rc) { + if (rc == -EPROBE_DEFER) + return ERR_PTR(rc); - *mac = NULL; + eth_zero_addr(mac); } plat->phy_interface = device_get_phy_mode(&pdev->dev); @@ -627,7 +627,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, } #else struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { return ERR_PTR(-EINVAL); } @@ -704,7 +704,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); -#ifdef CONFIG_PM_SLEEP /** * stmmac_pltfr_suspend * @dev: device pointer @@ -712,7 +711,7 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); * call the main suspend function and then, if required, on some platform, it * can call an exit helper. */ -static int stmmac_pltfr_suspend(struct device *dev) +static int __maybe_unused stmmac_pltfr_suspend(struct device *dev) { int ret; struct net_device *ndev = dev_get_drvdata(dev); @@ -733,7 +732,7 @@ static int stmmac_pltfr_suspend(struct device *dev) * the main resume function, on some platforms, it can call own init helper * if required. */ -static int stmmac_pltfr_resume(struct device *dev) +static int __maybe_unused stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -744,10 +743,29 @@ static int stmmac_pltfr_resume(struct device *dev) return stmmac_resume(dev); } -#endif /* CONFIG_PM_SLEEP */ -SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, - stmmac_pltfr_resume); +static int __maybe_unused stmmac_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_bus_clks_config(priv, false); + + return 0; +} + +static int __maybe_unused stmmac_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return stmmac_bus_clks_config(priv, true); +} + +const struct dev_pm_ops stmmac_pltfr_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) + SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) +}; EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 3a4663b7b460..3fff3f59d73d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -12,7 +12,7 @@ #include "stmmac.h" struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); +stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 0989e2bb6ee3..4e86cdf2bc9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -9,6 +9,7 @@ *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" +#include "dwmac4.h" /** * stmmac_adjust_freq @@ -134,7 +135,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, { struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; struct stmmac_pps_cfg *cfg; + u32 intr_value, acr_value; int ret = -EOPNOTSUPP; unsigned long flags; @@ -158,6 +162,37 @@ static int stmmac_enable(struct ptp_clock_info *ptp, priv->systime_flags); spin_unlock_irqrestore(&priv->ptp_lock, flags); break; + case PTP_CLK_REQ_EXTTS: + priv->plat->ext_snapshot_en = on; + mutex_lock(&priv->aux_ts_lock); + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + if (on) { + /* Enable External snapshot trigger */ + acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSFC; + netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Enable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value |= GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + + } else { + netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", + priv->plat->ext_snapshot_num >> + PTP_ACR_ATSEN_SHIFT); + /* Disable Timestamp Interrupt */ + intr_value = readl(ioaddr + GMAC_INT_EN); + intr_value &= ~GMAC_INT_TSIE; + writel(intr_value, ioaddr + GMAC_INT_EN); + } + writel(acr_value, ptpaddr + PTP_ACR); + mutex_unlock(&priv->aux_ts_lock); + ret = 0; + break; + default: break; } @@ -165,13 +200,43 @@ static int stmmac_enable(struct ptp_clock_info *ptp, return ret; } +/** + * stmmac_get_syncdevicetime + * @device: current device time + * @system: system counter value read synchronously with device time + * @ctx: context provided by timekeeping code + * Description: Read device and system clock simultaneously and return the + * corrected clock values in ns. + **/ +static int stmmac_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + + if (priv->plat->crosststamp) + return priv->plat->crosststamp(device, system, ctx); + else + return -EOPNOTSUPP; +} + +static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + + return get_device_system_crosststamp(stmmac_get_syncdevicetime, + priv, NULL, xtstamp); +} + /* structure describing a PTP hardware clock */ static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac ptp", .max_adj = 62500000, .n_alarm = 0, - .n_ext_ts = 0, + .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */ .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, @@ -180,6 +245,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .gettime64 = stmmac_get_time, .settime64 = stmmac_set_time, .enable = stmmac_enable, + .getcrosststamp = stmmac_getcrosststamp, }; /** @@ -192,6 +258,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv) { int i; + if (priv->plat->ptp_clk_freq_config) + priv->plat->ptp_clk_freq_config(priv); + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { if (i >= STMMAC_PPS_MAX) break; @@ -202,8 +271,10 @@ void stmmac_ptp_register(struct stmmac_priv *priv) stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; spin_lock_init(&priv->ptp_lock); + mutex_init(&priv->aux_ts_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, @@ -229,4 +300,6 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } + + mutex_destroy(&priv->aux_ts_lock); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 7abb1d47e7da..53172a439810 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -23,6 +23,9 @@ #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ #define PTP_TAR 0x18 /* Timestamp Addend Reg */ +#define PTP_ACR 0x40 /* Auxiliary Control Reg */ +#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ +#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ #define PTP_STNSUR_ADDSUB_SHIFT 31 #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ @@ -64,4 +67,25 @@ #define PTP_SSIR_SSINC_MASK 0xff #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 +/* Auxiliary Control defines */ +#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */ +#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */ +#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ +#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ +#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ +#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ +#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ +#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ +#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ +#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */ +#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */ +#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */ + +enum aux_snapshot { + AUX_SNAPSHOT0 = 0x10, + AUX_SNAPSHOT1 = 0x20, + AUX_SNAPSHOT2 = 0x40, + AUX_SNAPSHOT3 = 0x80, +}; + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 44bb133c3000..4e70efc45458 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -254,6 +254,16 @@ static int tc_init(struct stmmac_priv *priv) priv->flow_entries_max); } + if (!priv->plat->fpe_cfg) { + priv->plat->fpe_cfg = devm_kzalloc(priv->device, + sizeof(*priv->plat->fpe_cfg), + GFP_KERNEL); + if (!priv->plat->fpe_cfg) + return -ENOMEM; + } else { + memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg)); + } + /* Fail silently as we can still use remaining features, e.g. CBS */ if (!dma_cap->frpsel) return 0; @@ -297,6 +307,7 @@ static int tc_init(struct stmmac_priv *priv) dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", priv->tc_entries_max, priv->tc_off_max); + return 0; } @@ -598,6 +609,87 @@ static int tc_del_flow(struct stmmac_priv *priv, return ret; } +#define VLAN_PRIO_FULL_MASK (0x07) + +static int tc_add_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_vlan match; + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + flow_rule_match_vlan(rule, &match); + + if (match.mask->vlan_priority) { + u32 prio; + + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + prio = BIT(match.key->vlan_priority); + stmmac_rx_queue_prio(priv, priv->hw, prio, tc); + } + + return 0; +} + +static int tc_del_vlan_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + + /* Nothing to do here */ + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + return -EINVAL; + + if (tc < 0) { + netdev_err(priv->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + stmmac_rx_queue_prio(priv, priv->hw, 0, tc); + + return 0; +} + +static int tc_add_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_add_flow(priv, cls); + if (!ret) + return ret; + + return tc_add_vlan_flow(priv, cls); +} + +static int tc_del_flow_cls(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + int ret; + + ret = tc_del_flow(priv, cls); + if (!ret) + return ret; + + return tc_del_vlan_flow(priv, cls); +} + static int tc_setup_cls(struct stmmac_priv *priv, struct flow_cls_offload *cls) { @@ -609,10 +701,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, switch (cls->command) { case FLOW_CLS_REPLACE: - ret = tc_add_flow(priv, cls); + ret = tc_add_flow_cls(priv, cls); break; case FLOW_CLS_DESTROY: - ret = tc_del_flow(priv, cls); + ret = tc_del_flow_cls(priv, cls); break; default: return -EOPNOTSUPP; @@ -748,13 +840,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv, if (fpe && !priv->dma_cap.fpesel) return -EOPNOTSUPP; - ret = stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, fpe); - if (ret && fpe) { - netdev_err(priv->dev, "failed to enable Frame Preemption\n"); - return ret; - } + /* Actual FPE register configuration will be done after FPE handshake + * is success. + */ + priv->plat->fpe_cfg->enable = fpe; ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); @@ -764,12 +853,29 @@ static int tc_setup_taprio(struct stmmac_priv *priv, } netdev_info(priv->dev, "configured EST\n"); + + if (fpe) { + stmmac_fpe_handshake(priv, true); + netdev_info(priv->dev, "start FPE handshake\n"); + } + return 0; disable: priv->plat->est->enable = false; stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + + priv->plat->fpe_cfg->enable = false; + stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->tx_queues_to_use, + priv->plat->rx_queues_to_use, + false); + netdev_info(priv->dev, "disabled FPE\n"); + + stmmac_fpe_handshake(priv, false); + netdev_info(priv->dev, "stop FPE handshake\n"); + return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c new file mode 100644 index 000000000000..105821b53020 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Intel Corporation. */ + +#include <net/xdp_sock_drv.h> + +#include "stmmac.h" +#include "stmmac_xdp.h" + +static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, + struct xsk_buff_pool *pool, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + bool need_update; + u32 frame_size; + int err; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + /* XDP ZC does not span multiple frame, make sure XSK pool buffer + * size can at least store Q-in-Q frame. + */ + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) + return -EOPNOTSUPP; + + err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR); + if (err) { + netdev_err(priv->dev, "Failed to map xsk pool\n"); + return err; + } + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + napi_disable(&ch->rx_napi); + napi_disable(&ch->tx_napi); + } + + set_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + napi_enable(&ch->rxtx_napi); + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + + err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); + if (err) + return err; + } + + return 0; +} + +static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) +{ + struct stmmac_channel *ch = &priv->channel[queue]; + struct xsk_buff_pool *pool; + bool need_update; + + if (queue >= priv->plat->rx_queues_to_use || + queue >= priv->plat->tx_queues_to_use) + return -EINVAL; + + pool = xsk_get_pool_from_qid(priv->dev, queue); + if (!pool) + return -EINVAL; + + need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); + + if (need_update) { + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); + synchronize_rcu(); + napi_disable(&ch->rxtx_napi); + } + + xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); + + clear_bit(queue, priv->af_xdp_zc_qps); + + if (need_update) { + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); + stmmac_enable_rx_queue(priv, queue); + stmmac_enable_tx_queue(priv, queue); + } + + return 0; +} + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue) +{ + return pool ? stmmac_xdp_enable_pool(priv, pool, queue) : + stmmac_xdp_disable_pool(priv, queue); +} + +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = priv->dev; + struct bpf_prog *old_prog; + bool need_update; + bool if_running; + + if_running = netif_running(dev); + + if (prog && dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + need_update = !!priv->xdp_prog != !!prog; + if (if_running && need_update) + stmmac_release(dev); + + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + /* Disable RX SPH for XDP operation */ + priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); + + if (if_running && need_update) + stmmac_open(dev); + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h new file mode 100644 index 000000000000..896dc987d4ef --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021, Intel Corporation. */ + +#ifndef _STMMAC_XDP_H_ +#define _STMMAC_XDP_H_ + +#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) +#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, + u16 queue); +int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + +#endif /* _STMMAC_XDP_H_ */ |